Merge pull request #13230 from LabNConsulting/micronet-is-munet

Micronet is munet
This commit is contained in:
Donald Sharp 2023-04-17 13:35:29 -04:00 committed by GitHub
commit eda79af4a4
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
32 changed files with 12521 additions and 1448 deletions

View file

@ -7,21 +7,23 @@ import glob
import os
import pdb
import re
import resource
import subprocess
import sys
import time
import resource
import pytest
import lib.fixtures
from lib import topolog
from lib.micronet import Commander, proc_error
from lib.micronet_cli import cli
from lib.micronet_compat import Mininet, cleanup_current, cleanup_previous
from lib.micronet_compat import Mininet
from lib.topogen import diagnose_env, get_topogen
from lib.topolog import logger
from lib.topotest import g_extra_config as topotest_extra_config
from lib.topotest import json_cmp_result
from munet.base import Commander, proc_error
from munet.cleanup import cleanup_current, cleanup_previous
from munet import cli
def pytest_addoption(parser):
@ -501,7 +503,7 @@ def pytest_runtest_makereport(item, call):
# Really would like something better than using this global here.
# Not all tests use topogen though so get_topogen() won't work.
if Mininet.g_mnet_inst:
cli(Mininet.g_mnet_inst, title=title, background=False)
cli.cli(Mininet.g_mnet_inst, title=title, background=False)
else:
logger.error("Could not launch CLI b/c no mininet exists yet")
@ -515,7 +517,7 @@ def pytest_runtest_makereport(item, call):
user = user.strip()
if user == "cli":
cli(Mininet.g_mnet_inst)
cli.cli(Mininet.g_mnet_inst)
elif user == "pdb":
pdb.set_trace() # pylint: disable=forgotten-debug-statement
elif user:

View file

@ -21,7 +21,8 @@ try:
import grpc
import grpc_tools
from micronet import commander
sys.path.append(os.path.dirname(CWD))
from munet.base import commander
commander.cmd_raises(f"cp {CWD}/../../../grpc/frr-northbound.proto .")
commander.cmd_raises(

File diff suppressed because it is too large Load diff

View file

@ -1,306 +0,0 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# July 24 2021, Christian Hopps <chopps@labn.net>
#
# Copyright (c) 2021, LabN Consulting, L.L.C.
#
import argparse
import logging
import os
import pty
import re
import readline
import select
import socket
import subprocess
import sys
import tempfile
import termios
import tty
ENDMARKER = b"\x00END\x00"
def lineiter(sock):
s = ""
while True:
sb = sock.recv(256)
if not sb:
return
s += sb.decode("utf-8")
i = s.find("\n")
if i != -1:
yield s[:i]
s = s[i + 1 :]
def spawn(unet, host, cmd):
if sys.stdin.isatty():
old_tty = termios.tcgetattr(sys.stdin)
tty.setraw(sys.stdin.fileno())
try:
master_fd, slave_fd = pty.openpty()
# use os.setsid() make it run in a new process group, or bash job
# control will not be enabled
p = unet.hosts[host].popen(
cmd,
preexec_fn=os.setsid,
stdin=slave_fd,
stdout=slave_fd,
stderr=slave_fd,
universal_newlines=True,
)
while p.poll() is None:
r, w, e = select.select([sys.stdin, master_fd], [], [], 0.25)
if sys.stdin in r:
d = os.read(sys.stdin.fileno(), 10240)
os.write(master_fd, d)
elif master_fd in r:
o = os.read(master_fd, 10240)
if o:
os.write(sys.stdout.fileno(), o)
finally:
# restore tty settings back
if sys.stdin.isatty():
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
def doline(unet, line, writef):
def host_cmd_split(unet, cmd):
csplit = cmd.split()
for i, e in enumerate(csplit):
if e not in unet.hosts:
break
hosts = csplit[:i]
if not hosts:
hosts = sorted(unet.hosts.keys())
cmd = " ".join(csplit[i:])
return hosts, cmd
line = line.strip()
m = re.match(r"^(\S+)(?:\s+(.*))?$", line)
if not m:
return True
cmd = m.group(1)
oargs = m.group(2) if m.group(2) else ""
if cmd == "q" or cmd == "quit":
return False
if cmd == "hosts":
writef("%% hosts: %s\n" % " ".join(sorted(unet.hosts.keys())))
elif cmd in ["term", "vtysh", "xterm"]:
args = oargs.split()
if not args or (len(args) == 1 and args[0] == "*"):
args = sorted(unet.hosts.keys())
hosts = [unet.hosts[x] for x in args if x in unet.hosts]
for host in hosts:
if cmd == "t" or cmd == "term":
host.run_in_window("bash", title="sh-%s" % host)
elif cmd == "v" or cmd == "vtysh":
host.run_in_window("vtysh", title="vt-%s" % host)
elif cmd == "x" or cmd == "xterm":
host.run_in_window("bash", title="sh-%s" % host, forcex=True)
elif cmd == "sh":
hosts, cmd = host_cmd_split(unet, oargs)
for host in hosts:
if sys.stdin.isatty():
spawn(unet, host, cmd)
else:
if len(hosts) > 1:
writef("------ Host: %s ------\n" % host)
output = unet.hosts[host].cmd_legacy(cmd)
writef(output)
if len(hosts) > 1:
writef("------- End: %s ------\n" % host)
writef("\n")
elif cmd == "h" or cmd == "help":
writef(
"""
Commands:
help :: this help
sh [hosts] <shell-command> :: execute <shell-command> on <host>
term [hosts] :: open shell terminals for hosts
vtysh [hosts] :: open vtysh terminals for hosts
[hosts] <vtysh-command> :: execute vtysh-command on hosts\n\n"""
)
else:
hosts, cmd = host_cmd_split(unet, line)
for host in hosts:
if len(hosts) > 1:
writef("------ Host: %s ------\n" % host)
output = unet.hosts[host].cmd_legacy('vtysh -c "{}"'.format(cmd))
writef(output)
if len(hosts) > 1:
writef("------- End: %s ------\n" % host)
writef("\n")
return True
def cli_server_setup(unet):
sockdir = tempfile.mkdtemp("-sockdir", "pyt")
sockpath = os.path.join(sockdir, "cli-server.sock")
try:
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(10)
sock.bind(sockpath)
sock.listen(1)
return sock, sockdir, sockpath
except Exception:
unet.cmd_status("rm -rf " + sockdir)
raise
def cli_server(unet, server_sock):
sock, addr = server_sock.accept()
# Go into full non-blocking mode now
sock.settimeout(None)
for line in lineiter(sock):
line = line.strip()
def writef(x):
xb = x.encode("utf-8")
sock.send(xb)
if not doline(unet, line, writef):
return
sock.send(ENDMARKER)
def cli_client(sockpath, prompt="unet> "):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(10)
sock.connect(sockpath)
# Go into full non-blocking mode now
sock.settimeout(None)
print("\n--- Micronet CLI Starting ---\n\n")
while True:
if sys.version_info[0] == 2:
line = raw_input(prompt) # pylint: disable=E0602
else:
line = input(prompt)
if line is None:
return
# Need to put \n back
line += "\n"
# Send the CLI command
sock.send(line.encode("utf-8"))
def bendswith(b, sentinel):
slen = len(sentinel)
return len(b) >= slen and b[-slen:] == sentinel
# Collect the output
rb = b""
while not bendswith(rb, ENDMARKER):
lb = sock.recv(4096)
if not lb:
return
rb += lb
# Remove the marker
rb = rb[: -len(ENDMARKER)]
# Write the output
sys.stdout.write(rb.decode("utf-8"))
def local_cli(unet, outf, prompt="unet> "):
print("\n--- Micronet CLI Starting ---\n\n")
while True:
if sys.version_info[0] == 2:
line = raw_input(prompt) # pylint: disable=E0602
else:
line = input(prompt)
if line is None:
return
if not doline(unet, line, outf.write):
return
def cli(
unet,
histfile=None,
sockpath=None,
force_window=False,
title=None,
prompt=None,
background=True,
):
logger = logging.getLogger("cli-client")
if prompt is None:
prompt = "unet> "
if force_window or not sys.stdin.isatty():
# Run CLI in another window b/c we have no tty.
sock, sockdir, sockpath = cli_server_setup(unet)
python_path = unet.get_exec_path(["python3", "python"])
us = os.path.realpath(__file__)
cmd = "{} {}".format(python_path, us)
if histfile:
cmd += " --histfile=" + histfile
if title:
cmd += " --prompt={}".format(title)
cmd += " " + sockpath
try:
unet.run_in_window(cmd, new_window=True, title=title, background=background)
return cli_server(unet, sock)
finally:
unet.cmd_status("rm -rf " + sockdir)
if not unet:
logger.debug("client-cli using sockpath %s", sockpath)
try:
if histfile is None:
histfile = os.path.expanduser("~/.micronet-history.txt")
if not os.path.exists(histfile):
if unet:
unet.cmd("touch " + histfile)
else:
subprocess.run("touch " + histfile)
if histfile:
readline.read_history_file(histfile)
except Exception:
pass
try:
if sockpath:
cli_client(sockpath, prompt=prompt)
else:
local_cli(unet, sys.stdout, prompt=prompt)
except EOFError:
pass
except Exception as ex:
logger.critical("cli: got exception: %s", ex, exc_info=True)
raise
finally:
readline.write_history_file(histfile)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, filename="/tmp/topotests/cli-client.log")
logger = logging.getLogger("cli-client")
logger.info("Start logging cli-client")
parser = argparse.ArgumentParser()
parser.add_argument("--histfile", help="file to user for history")
parser.add_argument("--prompt-text", help="prompt string to use")
parser.add_argument("socket", help="path to pair of sockets to communicate over")
args = parser.parse_args()
prompt = "{}> ".format(args.prompt_text) if args.prompt_text else "unet> "
cli(None, args.histfile, args.socket, prompt=prompt)

View file

@ -3,140 +3,43 @@
#
# July 11 2021, Christian Hopps <chopps@labn.net>
#
# Copyright (c) 2021, LabN Consulting, L.L.C
# Copyright (c) 2021-2023, LabN Consulting, L.L.C
#
import glob
import logging
import ipaddress
import os
import signal
import time
from lib.micronet import LinuxNamespace, Micronet
from lib.micronet_cli import cli
def get_pids_with_env(has_var, has_val=None):
result = {}
for pidenv in glob.iglob("/proc/*/environ"):
pid = pidenv.split("/")[2]
try:
with open(pidenv, "rb") as rfb:
envlist = [
x.decode("utf-8").split("=", 1) for x in rfb.read().split(b"\0")
]
envlist = [[x[0], ""] if len(x) == 1 else x for x in envlist]
envdict = dict(envlist)
if has_var not in envdict:
continue
if has_val is None:
result[pid] = envdict
elif envdict[has_var] == str(has_val):
result[pid] = envdict
except Exception:
# E.g., process exited and files are gone
pass
return result
def _kill_piddict(pids_by_upid, sig):
for upid, pids in pids_by_upid:
logging.info(
"Sending %s to (%s) of micronet pid %s", sig, ", ".join(pids), upid
)
for pid in pids:
try:
os.kill(int(pid), sig)
except Exception:
pass
def _get_our_pids():
ourpid = str(os.getpid())
piddict = get_pids_with_env("MICRONET_PID", ourpid)
pids = [x for x in piddict if x != ourpid]
if pids:
return {ourpid: pids}
return {}
def _get_other_pids():
piddict = get_pids_with_env("MICRONET_PID")
unet_pids = {d["MICRONET_PID"] for d in piddict.values()}
pids_by_upid = {p: set() for p in unet_pids}
for pid, envdict in piddict.items():
pids_by_upid[envdict["MICRONET_PID"]].add(pid)
# Filter out any child pid sets whos micronet pid is still running
return {x: y for x, y in pids_by_upid.items() if x not in y}
def _get_pids_by_upid(ours):
if ours:
return _get_our_pids()
return _get_other_pids()
def _cleanup_pids(ours):
pids_by_upid = _get_pids_by_upid(ours).items()
if not pids_by_upid:
return
_kill_piddict(pids_by_upid, signal.SIGTERM)
# Give them 5 second to exit cleanly
logging.info("Waiting up to 5s to allow for clean exit of abandon'd pids")
for _ in range(0, 5):
pids_by_upid = _get_pids_by_upid(ours).items()
if not pids_by_upid:
return
time.sleep(1)
pids_by_upid = _get_pids_by_upid(ours).items()
_kill_piddict(pids_by_upid, signal.SIGKILL)
def cleanup_current():
"""Attempt to cleanup preview runs.
Currently this only scans for old processes.
"""
logging.info("reaping current micronet processes")
_cleanup_pids(True)
def cleanup_previous():
"""Attempt to cleanup preview runs.
Currently this only scans for old processes.
"""
logging.info("reaping past micronet processes")
_cleanup_pids(False)
from munet import cli
from munet.base import BaseMunet, LinuxNamespace
class Node(LinuxNamespace):
"""Node (mininet compat)."""
def __init__(self, name, **kwargs):
"""
Create a Node.
"""
self.params = kwargs
def __init__(self, name, rundir=None, **kwargs):
nkwargs = {}
if "unet" in kwargs:
nkwargs["unet"] = kwargs["unet"]
if "private_mounts" in kwargs:
private_mounts = kwargs["private_mounts"]
else:
private_mounts = kwargs.get("privateDirs", [])
nkwargs["private_mounts"] = kwargs["private_mounts"]
logger = kwargs.get("logger")
# This is expected by newer munet CLI code
self.config_dirname = ""
self.config = {"kind": "frr"}
self.mgmt_ip = None
self.mgmt_ip6 = None
super(Node, self).__init__(name, logger=logger, private_mounts=private_mounts)
super().__init__(name, **nkwargs)
self.rundir = self.unet.rundir.joinpath(self.name)
def cmd(self, cmd, **kwargs):
"""Execute a command, joins stdout, stderr, ignores exit status."""
return super(Node, self).cmd_legacy(cmd, **kwargs)
def config(self, lo="up", **params):
def config_host(self, lo="up", **params):
"""Called by Micronet when topology is built (but not started)."""
# mininet brings up loopback here.
del params
@ -148,20 +51,76 @@ class Node(LinuxNamespace):
def terminate(self):
return
def add_vlan(self, vlanname, linkiface, vlanid):
self.logger.debug("Adding VLAN interface: %s (%s)", vlanname, vlanid)
ip_path = self.get_exec_path("ip")
assert ip_path, "XXX missing ip command!"
self.cmd_raises(
[
ip_path,
"link",
"add",
"link",
linkiface,
"name",
vlanname,
"type",
"vlan",
"id",
vlanid,
]
)
self.cmd_raises([ip_path, "link", "set", "dev", vlanname, "up"])
def add_loop(self, loopname):
self.logger.debug("Adding Linux iface: %s", loopname)
ip_path = self.get_exec_path("ip")
assert ip_path, "XXX missing ip command!"
self.cmd_raises([ip_path, "link", "add", loopname, "type", "dummy"])
self.cmd_raises([ip_path, "link", "set", "dev", loopname, "up"])
def add_l3vrf(self, vrfname, tableid):
self.logger.debug("Adding Linux VRF: %s", vrfname)
ip_path = self.get_exec_path("ip")
assert ip_path, "XXX missing ip command!"
self.cmd_raises(
[ip_path, "link", "add", vrfname, "type", "vrf", "table", tableid]
)
self.cmd_raises([ip_path, "link", "set", "dev", vrfname, "up"])
def del_iface(self, iface):
self.logger.debug("Removing Linux Iface: %s", iface)
ip_path = self.get_exec_path("ip")
assert ip_path, "XXX missing ip command!"
self.cmd_raises([ip_path, "link", "del", iface])
def attach_iface_to_l3vrf(self, ifacename, vrfname):
self.logger.debug("Attaching Iface %s to Linux VRF %s", ifacename, vrfname)
ip_path = self.get_exec_path("ip")
assert ip_path, "XXX missing ip command!"
if vrfname:
self.cmd_raises(
[ip_path, "link", "set", "dev", ifacename, "master", vrfname]
)
else:
self.cmd_raises([ip_path, "link", "set", "dev", ifacename, "nomaster"])
set_cwd = LinuxNamespace.set_ns_cwd
class Topo(object): # pylint: disable=R0205
def __init__(self, *args, **kwargs):
raise Exception("Remove Me")
class Mininet(Micronet):
class Mininet(BaseMunet):
"""
Mininet using Micronet.
"""
g_mnet_inst = None
def __init__(self):
def __init__(self, rundir=None):
"""
Create a Micronet.
"""
@ -179,7 +138,146 @@ class Mininet(Micronet):
# to set permissions to root:frr 770 to make this unneeded in that case
# os.umask(0)
super(Mininet, self).__init__()
super(Mininet, self).__init__(pid=False, rundir=rundir)
# From munet/munet/native.py
with open(os.path.join(self.rundir, "nspid"), "w", encoding="ascii") as f:
f.write(f"{self.pid}\n")
with open(os.path.join(self.rundir, "nspids"), "w", encoding="ascii") as f:
f.write(f'{" ".join([str(x) for x in self.pids])}\n')
hosts_file = os.path.join(self.rundir, "hosts.txt")
with open(hosts_file, "w", encoding="ascii") as hf:
hf.write(
f"""127.0.0.1\tlocalhost {self.name}
::1\tip6-localhost ip6-loopback
fe00::0\tip6-localnet
ff00::0\tip6-mcastprefix
ff02::1\tip6-allnodes
ff02::2\tip6-allrouters
"""
)
self.bind_mount(hosts_file, "/etc/hosts")
# Common CLI commands for any topology
cdict = {
"commands": [
#
# Window commands.
#
{
"name": "pcap",
"format": "pcap NETWORK",
"help": (
"capture packets from NETWORK into file capture-NETWORK.pcap"
" the command is run within a new window which also shows"
" packet summaries. NETWORK can also be an interface specified"
" as HOST:INTF. To capture inside the host namespace."
),
"exec": "tshark -s 9200 -i {0} -P -w capture-{0}.pcap",
"top-level": True,
"new-window": {"background": True},
},
{
"name": "term",
"format": "term HOST [HOST ...]",
"help": "open terminal[s] (TMUX or XTerm) on HOST[S], * for all",
"exec": "bash",
"new-window": True,
},
{
"name": "vtysh",
"exec": "/usr/bin/vtysh",
"format": "vtysh ROUTER [ROUTER ...]",
"new-window": True,
"kinds": ["frr"],
},
{
"name": "xterm",
"format": "xterm HOST [HOST ...]",
"help": "open XTerm[s] on HOST[S], * for all",
"exec": "bash",
"new-window": {
"forcex": True,
},
},
{
"name": "logd",
"exec": "tail -F %RUNDIR%/{}.log",
"format": "logd HOST [HOST ...] DAEMON",
"help": (
"tail -f on the logfile of the given "
"DAEMON for the given HOST[S]"
),
"new-window": True,
},
{
"name": "stdlog",
"exec": (
"[ -e %RUNDIR%/frr.log ] && tail -F %RUNDIR%/frr.log "
"|| tail -F /var/log/frr.log"
),
"format": "stdlog HOST [HOST ...]",
"help": "tail -f on the `frr.log` for the given HOST[S]",
"new-window": True,
},
{
"name": "stdout",
"exec": "tail -F %RUNDIR%/{0}.err",
"format": "stdout HOST [HOST ...] DAEMON",
"help": (
"tail -f on the stdout of the given DAEMON for the given HOST[S]"
),
"new-window": True,
},
{
"name": "stderr",
"exec": "tail -F %RUNDIR%/{0}.out",
"format": "stderr HOST [HOST ...] DAEMON",
"help": (
"tail -f on the stderr of the given DAEMON for the given HOST[S]"
),
"new-window": True,
},
#
# Non-window commands.
#
{
"name": "",
"exec": "vtysh -c '{}'",
"format": "[ROUTER ...] COMMAND",
"help": "execute vtysh COMMAND on the router[s]",
"kinds": ["frr"],
},
{
"name": "sh",
"format": "[HOST ...] sh <SHELL-COMMAND>",
"help": "execute <SHELL-COMMAND> on hosts",
"exec": "{}",
},
{
"name": "shi",
"format": "[HOST ...] shi <INTERACTIVE-COMMAND>",
"help": "execute <INTERACTIVE-COMMAND> on HOST[s]",
"exec": "{}",
"interactive": True,
},
]
}
cli.add_cli_config(self, cdict)
# shellopt = (
# self.pytest_config.getoption("--shell") if self.pytest_config else None
# )
# shellopt = shellopt if shellopt is not None else ""
# if shellopt == "all" or "." in shellopt.split(","):
# self.run_in_window("bash")
# This is expected by newer munet CLI code
self.config_dirname = ""
self.config = {}
self.logger.debug("%s: Creating", self)
@ -217,12 +315,15 @@ class Mininet(Micronet):
host.cmd_raises("ip addr add {}/{} dev {}".format(ip, plen, first_intf))
# can be used by munet cli
host.mgmt_ip = ipaddress.ip_address(ip)
if "defaultRoute" in params:
host.cmd_raises(
"ip route add default {}".format(params["defaultRoute"])
)
host.config()
host.config_host()
self.configured_hosts.add(name)
@ -248,4 +349,4 @@ class Mininet(Micronet):
Mininet.g_mnet_inst = None
def cli(self):
cli(self)
cli.cli(self)

View file

@ -213,7 +213,7 @@ class Topogen(object):
# Mininet(Micronet) to build the actual topology.
assert not inspect.isclass(topodef)
self.net = Mininet()
self.net = Mininet(rundir=self.logdir)
# Adjust the parent namespace
topotest.fix_netns_limits(self.net)
@ -753,8 +753,8 @@ class TopoRouter(TopoGear):
"""
super(TopoRouter, self).__init__(tgen, name, **params)
self.routertype = params.get("routertype", "frr")
if "privateDirs" not in params:
params["privateDirs"] = self.PRIVATE_DIRS
if "private_mounts" not in params:
params["private_mounts"] = self.PRIVATE_DIRS
# Propagate the router log directory
logfile = self._setup_tmpdir()
@ -1103,7 +1103,7 @@ class TopoHost(TopoGear):
* `ip`: the IP address (string) for the host interface
* `defaultRoute`: the default route that will be installed
(e.g. 'via 10.0.0.1')
* `privateDirs`: directories that will be mounted on a different domain
* `private_mounts`: directories that will be mounted on a different domain
(e.g. '/etc/important_dir').
"""
super(TopoHost, self).__init__(tgen, name, **params)
@ -1123,10 +1123,10 @@ class TopoHost(TopoGear):
def __str__(self):
gear = super(TopoHost, self).__str__()
gear += ' TopoHost<ip="{}",defaultRoute="{}",privateDirs="{}">'.format(
gear += ' TopoHost<ip="{}",defaultRoute="{}",private_mounts="{}">'.format(
self.params["ip"],
self.params["defaultRoute"],
str(self.params["privateDirs"]),
str(self.params["private_mounts"]),
)
return gear
@ -1149,10 +1149,10 @@ class TopoExaBGP(TopoHost):
(e.g. 'via 10.0.0.1')
Note: the different between a host and a ExaBGP peer is that this class
has a privateDirs already defined and contains functions to handle ExaBGP
things.
has a private_mounts already defined and contains functions to handle
ExaBGP things.
"""
params["privateDirs"] = self.PRIVATE_DIRS
params["private_mounts"] = self.PRIVATE_DIRS
super(TopoExaBGP, self).__init__(tgen, name, **params)
def __str__(self):

View file

@ -1318,7 +1318,7 @@ def setup_node_tmpdir(logdir, name):
class Router(Node):
"A Node with IPv4/IPv6 forwarding enabled"
def __init__(self, name, **params):
def __init__(self, name, *posargs, **params):
# Backward compatibility:
# Load configuration defaults like topogen.
@ -1347,7 +1347,7 @@ class Router(Node):
l = topolog.get_logger(name, log_level="debug", target=logfile)
params["logger"] = l
super(Router, self).__init__(name, **params)
super(Router, self).__init__(name, *posargs, **params)
self.daemondir = None
self.hasmpls = False
@ -1407,8 +1407,8 @@ class Router(Node):
# pylint: disable=W0221
# Some params are only meaningful for the parent class.
def config(self, **params):
super(Router, self).config(**params)
def config_host(self, **params):
super(Router, self).config_host(**params)
# User did not specify the daemons directory, try to autodetect it.
self.daemondir = params.get("daemondir")

View file

@ -0,0 +1,38 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# September 30 2021, Christian Hopps <chopps@labn.net>
#
# Copyright 2021, LabN Consulting, L.L.C.
#
"""A module to import various objects to root namespace."""
from .base import BaseMunet
from .base import Bridge
from .base import Commander
from .base import LinuxNamespace
from .base import SharedNamespace
from .base import cmd_error
from .base import comm_error
from .base import get_exec_path
from .base import proc_error
from .native import L3Bridge
from .native import L3NamespaceNode
from .native import Munet
from .native import to_thread
__all__ = [
"BaseMunet",
"Bridge",
"Commander",
"L3Bridge",
"L3NamespaceNode",
"LinuxNamespace",
"Munet",
"SharedNamespace",
"cmd_error",
"comm_error",
"get_exec_path",
"proc_error",
"to_thread",
]

View file

@ -0,0 +1,236 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# September 2 2021, Christian Hopps <chopps@labn.net>
#
# Copyright 2021, LabN Consulting, L.L.C.
#
"""The main function for standalone operation."""
import argparse
import asyncio
import logging
import logging.config
import os
import subprocess
import sys
from . import cli
from . import parser
from .base import get_event_loop
from .cleanup import cleanup_previous
from .compat import PytestConfig
logger = None
async def forever():
while True:
await asyncio.sleep(3600)
async def run_and_wait(args, unet):
tasks = []
if not args.topology_only:
# add the cmd.wait()s returned from unet.run()
tasks += await unet.run()
if sys.stdin.isatty() and not args.no_cli:
# Run an interactive CLI
task = cli.async_cli(unet)
else:
if args.no_wait:
logger.info("Waiting for all node cmd to complete")
task = asyncio.gather(*tasks, return_exceptions=True)
else:
logger.info("Waiting on signal to exit")
task = asyncio.create_task(forever())
task = asyncio.gather(task, *tasks, return_exceptions=True)
try:
await task
finally:
# Basically we are canceling tasks from unet.run() which are just async calls to
# node.cmd_p.wait() so we've stopped waiting for them to complete, but not
# actually canceld/killed the cmd_p process.
for task in tasks:
task.cancel()
async def async_main(args, config):
status = 3
# Setup the namespaces and network addressing.
unet = await parser.async_build_topology(
config, rundir=args.rundir, args=args, pytestconfig=PytestConfig(args)
)
logger.info("Topology up: rundir: %s", unet.rundir)
try:
status = await run_and_wait(args, unet)
except KeyboardInterrupt:
logger.info("Exiting, received KeyboardInterrupt in async_main")
except asyncio.CancelledError as ex:
logger.info("task canceled error: %s cleaning up", ex)
except Exception as error:
logger.info("Exiting, unexpected exception %s", error, exc_info=True)
else:
logger.info("Exiting normally")
logger.debug("main: async deleting")
try:
await unet.async_delete()
except KeyboardInterrupt:
status = 2
logger.warning("Received KeyboardInterrupt while cleaning up.")
except Exception as error:
status = 2
logger.info("Deleting, unexpected exception %s", error, exc_info=True)
return status
def main(*args):
ap = argparse.ArgumentParser(args)
cap = ap.add_argument_group(title="Config", description="config related options")
cap.add_argument("-c", "--config", help="config file (yaml, toml, json, ...)")
cap.add_argument(
"-d", "--rundir", help="runtime directory for tempfiles, logs, etc"
)
cap.add_argument(
"--kinds-config",
help="kinds config file, overrides default search (yaml, toml, json, ...)",
)
cap.add_argument(
"--project-root", help="directory to stop searching for kinds config at"
)
rap = ap.add_argument_group(title="Runtime", description="runtime related options")
rap.add_argument(
"-C",
"--cleanup",
action="store_true",
help="Remove the entire rundir (not just node subdirs) prior to running.",
)
rap.add_argument(
"--gdb", metavar="NODE-LIST", help="comma-sep list of hosts to run gdb on"
)
rap.add_argument(
"--gdb-breakpoints",
metavar="BREAKPOINT-LIST",
help="comma-sep list of breakpoints to set",
)
rap.add_argument(
"--host",
action="store_true",
help="no isolation for top namespace, bridges exposed to default namespace",
)
rap.add_argument(
"--pcap",
metavar="TARGET-LIST",
help="comma-sep list of capture targets (NETWORK or NODE:IFNAME)",
)
rap.add_argument(
"--shell", metavar="NODE-LIST", help="comma-sep list of nodes to open shells on"
)
rap.add_argument(
"--stderr",
metavar="NODE-LIST",
help="comma-sep list of nodes to open windows viewing stderr",
)
rap.add_argument(
"--stdout",
metavar="NODE-LIST",
help="comma-sep list of nodes to open windows viewing stdout",
)
rap.add_argument(
"--topology-only",
action="store_true",
help="Do not run any node commands",
)
rap.add_argument("--unshare-inline", action="store_true", help=argparse.SUPPRESS)
rap.add_argument(
"--validate-only",
action="store_true",
help="Validate the config against the schema definition",
)
rap.add_argument("-v", "--verbose", action="store_true", help="be verbose")
rap.add_argument(
"-V", "--version", action="store_true", help="print the verison number and exit"
)
eap = ap.add_argument_group(title="Uncommon", description="uncommonly used options")
eap.add_argument("--log-config", help="logging config file (yaml, toml, json, ...)")
eap.add_argument(
"--no-kill",
action="store_true",
help="Do not kill previous running processes",
)
eap.add_argument(
"--no-cli", action="store_true", help="Do not run the interactive CLI"
)
eap.add_argument("--no-wait", action="store_true", help="Exit after commands")
args = ap.parse_args()
if args.version:
from importlib import metadata # pylint: disable=C0415
print(metadata.version("munet"))
sys.exit(0)
rundir = args.rundir if args.rundir else "/tmp/munet"
args.rundir = rundir
if args.cleanup:
if os.path.exists(rundir):
if not os.path.exists(f"{rundir}/config.json"):
logging.critical(
'unsafe: won\'t clean up rundir "%s" as '
"previous config.json not present",
rundir,
)
sys.exit(1)
else:
subprocess.run(["/usr/bin/rm", "-rf", rundir], check=True)
subprocess.run(f"mkdir -p {rundir} && chmod 755 {rundir}", check=True, shell=True)
os.environ["MUNET_RUNDIR"] = rundir
parser.setup_logging(args)
global logger # pylint: disable=W0603
logger = logging.getLogger("munet")
config = parser.get_config(args.config)
logger.info("Loaded config from %s", config["config_pathname"])
if not config["topology"]["nodes"]:
logger.critical("No nodes defined in config file")
return 1
if not args.no_kill:
cleanup_previous()
loop = None
status = 4
try:
parser.validate_config(config, logger, args)
if args.validate_only:
return 0
# Executes the cmd for each node.
loop = get_event_loop()
status = loop.run_until_complete(async_main(args, config))
except KeyboardInterrupt:
logger.info("Exiting, received KeyboardInterrupt in main")
except Exception as error:
logger.info("Exiting, unexpected exception %s", error, exc_info=True)
finally:
if loop:
loop.close()
return status
if __name__ == "__main__":
exit_status = main()
sys.exit(exit_status)

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,114 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# September 30 2021, Christian Hopps <chopps@labn.net>
#
# Copyright 2021, LabN Consulting, L.L.C.
#
"""Provides functionality to cleanup processes on posix systems."""
import glob
import logging
import os
import signal
def get_pids_with_env(has_var, has_val=None):
result = {}
for pidenv in glob.iglob("/proc/*/environ"):
pid = pidenv.split("/")[2]
try:
with open(pidenv, "rb") as rfb:
envlist = [
x.decode("utf-8").split("=", 1) for x in rfb.read().split(b"\0")
]
envlist = [[x[0], ""] if len(x) == 1 else x for x in envlist]
envdict = dict(envlist)
if has_var not in envdict:
continue
if has_val is None:
result[pid] = envdict
elif envdict[has_var] == str(has_val):
result[pid] = envdict
except Exception:
# E.g., process exited and files are gone
pass
return result
def _kill_piddict(pids_by_upid, sig):
ourpid = str(os.getpid())
for upid, pids in pids_by_upid:
logging.info("Sending %s to (%s) of munet pid %s", sig, ", ".join(pids), upid)
for pid in pids:
try:
if pid != ourpid:
cmdline = open(f"/proc/{pid}/cmdline", "r", encoding="ascii").read()
cmdline = cmdline.replace("\x00", " ")
logging.info("killing proc %s (%s)", pid, cmdline)
os.kill(int(pid), sig)
except Exception:
pass
def _get_our_pids():
ourpid = str(os.getpid())
piddict = get_pids_with_env("MUNET_PID", ourpid)
pids = [x for x in piddict if x != ourpid]
if pids:
return {ourpid: pids}
return {}
def _get_other_pids():
piddict = get_pids_with_env("MUNET_PID")
unet_pids = {d["MUNET_PID"] for d in piddict.values()}
pids_by_upid = {p: set() for p in unet_pids}
for pid, envdict in piddict.items():
unet_pid = envdict["MUNET_PID"]
pids_by_upid[unet_pid].add(pid)
# Filter out any child pid sets whos munet pid is still running
return {x: y for x, y in pids_by_upid.items() if x not in y}
def _get_pids_by_upid(ours):
if ours:
return _get_our_pids()
return _get_other_pids()
def _cleanup_pids(ours):
pids_by_upid = _get_pids_by_upid(ours).items()
if not pids_by_upid:
return
t = "current" if ours else "previous"
logging.info("Reaping %s munet processes", t)
# _kill_piddict(pids_by_upid, signal.SIGTERM)
# # Give them 5 second to exit cleanly
# logging.info("Waiting up to 5s to allow for clean exit of abandon'd pids")
# for _ in range(0, 5):
# pids_by_upid = _get_pids_by_upid(ours).items()
# if not pids_by_upid:
# return
# time.sleep(1)
pids_by_upid = _get_pids_by_upid(ours).items()
_kill_piddict(pids_by_upid, signal.SIGKILL)
def cleanup_current():
"""Attempt to cleanup preview runs.
Currently this only scans for old processes.
"""
_cleanup_pids(True)
def cleanup_previous():
"""Attempt to cleanup preview runs.
Currently this only scans for old processes.
"""
_cleanup_pids(False)

View file

@ -0,0 +1,939 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# July 24 2021, Christian Hopps <chopps@labn.net>
#
# Copyright 2021, LabN Consulting, L.L.C.
#
"""A module that implements a CLI."""
import argparse
import asyncio
import functools
import logging
import multiprocessing
import os
import pty
import re
import readline
import select
import shlex
import socket
import subprocess
import sys
import tempfile
import termios
import tty
from . import linux
from .config import list_to_dict_with_key
ENDMARKER = b"\x00END\x00"
logger = logging.getLogger(__name__)
def lineiter(sock):
s = ""
while True:
sb = sock.recv(256)
if not sb:
return
s += sb.decode("utf-8")
i = s.find("\n")
if i != -1:
yield s[:i]
s = s[i + 1 :]
# Would be nice to convert to async, but really not needed as used
def spawn(unet, host, cmd, iow, ns_only):
if sys.stdin.isatty():
old_tty = termios.tcgetattr(sys.stdin)
tty.setraw(sys.stdin.fileno())
try:
master_fd, slave_fd = pty.openpty()
ns = unet.hosts[host] if host and host != unet else unet
popenf = ns.popen_nsonly if ns_only else ns.popen
# use os.setsid() make it run in a new process group, or bash job
# control will not be enabled
p = popenf(
cmd,
# _common_prologue, later in call chain, only does this for use_pty == False
preexec_fn=os.setsid,
stdin=slave_fd,
stdout=slave_fd,
stderr=slave_fd,
universal_newlines=True,
use_pty=True,
# XXX this is actually implementing "run on host" for real
# skip_pre_cmd=ns_only,
)
iow.write("\r")
iow.flush()
while p.poll() is None:
r, _, _ = select.select([sys.stdin, master_fd], [], [], 0.25)
if sys.stdin in r:
d = os.read(sys.stdin.fileno(), 10240)
os.write(master_fd, d)
elif master_fd in r:
o = os.read(master_fd, 10240)
if o:
iow.write(o.decode("utf-8"))
iow.flush()
finally:
# restore tty settings back
if sys.stdin.isatty():
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_tty)
def is_host_regex(restr):
return len(restr) > 2 and restr[0] == "/" and restr[-1] == "/"
def get_host_regex(restr):
if len(restr) < 3 or restr[0] != "/" or restr[-1] != "/":
return None
return re.compile(restr[1:-1])
def host_in(restr, names):
"""Determine if matcher is a regex that matches one of names."""
if not (regexp := get_host_regex(restr)):
return restr in names
for name in names:
if regexp.fullmatch(name):
return True
return False
def expand_host(restr, names):
"""Expand name or regexp into list of hosts."""
hosts = []
regexp = get_host_regex(restr)
if not regexp:
assert restr in names
hosts.append(restr)
else:
for name in names:
if regexp.fullmatch(name):
hosts.append(name)
return sorted(hosts)
def expand_hosts(restrs, names):
"""Expand list of host names or regex into list of hosts."""
hosts = []
for restr in restrs:
hosts += expand_host(restr, names)
return sorted(hosts)
def host_cmd_split(unet, line, toplevel):
all_hosts = set(unet.hosts)
csplit = line.split()
i = 0
banner = False
for i, e in enumerate(csplit):
if is_re := is_host_regex(e):
banner = True
if not host_in(e, all_hosts):
if not is_re:
break
else:
i += 1
if i == 0 and csplit and csplit[0] == "*":
hosts = sorted(all_hosts)
csplit = csplit[1:]
banner = True
elif i == 0 and csplit and csplit[0] == ".":
hosts = [unet]
csplit = csplit[1:]
else:
hosts = expand_hosts(csplit[:i], all_hosts)
csplit = csplit[i:]
if not hosts and not csplit[:i]:
if toplevel:
hosts = [unet]
else:
hosts = sorted(all_hosts)
banner = True
if not csplit:
return hosts, "", "", True
i = line.index(csplit[0])
i += len(csplit[0])
return hosts, csplit[0], line[i:].strip(), banner
def win_cmd_host_split(unet, cmd, kinds, defall):
if kinds:
all_hosts = {
x for x in unet.hosts if unet.hosts[x].config.get("kind", "") in kinds
}
else:
all_hosts = set(unet.hosts)
csplit = cmd.split()
i = 0
for i, e in enumerate(csplit):
if not host_in(e, all_hosts):
if not is_host_regex(e):
break
else:
i += 1
if i == 0 and csplit and csplit[0] == "*":
hosts = sorted(all_hosts)
csplit = csplit[1:]
elif i == 0 and csplit and csplit[0] == ".":
hosts = [unet]
csplit = csplit[1:]
else:
hosts = expand_hosts(csplit[:i], all_hosts)
if not hosts and defall and not csplit[:i]:
hosts = sorted(all_hosts)
# Filter hosts based on cmd
cmd = " ".join(csplit[i:])
return hosts, cmd
def proc_readline(fd, prompt, histfile):
"""Read a line of input from user while running in a sub-process."""
# How do we change the command though, that's what's displayed in ps normally
linux.set_process_name("Munet CLI")
try:
# For some reason sys.stdin is fileno == 16 and useless
sys.stdin = os.fdopen(0)
histfile = init_history(None, histfile)
line = input(prompt)
readline.write_history_file(histfile)
if line is None:
os.write(fd, b"\n")
os.write(fd, bytes(f":{str(line)}\n", encoding="utf-8"))
except EOFError:
os.write(fd, b"\n")
except KeyboardInterrupt:
os.write(fd, b"I\n")
except Exception as error:
os.write(fd, bytes(f"E{str(error)}\n", encoding="utf-8"))
async def async_input_reader(rfd):
"""Read a line of input from the user input sub-process pipe."""
rpipe = os.fdopen(rfd, mode="r")
reader = asyncio.StreamReader()
def protocol_factory():
return asyncio.StreamReaderProtocol(reader)
loop = asyncio.get_event_loop()
transport, _ = await loop.connect_read_pipe(protocol_factory, rpipe)
o = await reader.readline()
transport.close()
o = o.decode("utf-8").strip()
if not o:
return None
if o[0] == "I":
raise KeyboardInterrupt()
if o[0] == "E":
raise Exception(o[1:])
assert o[0] == ":"
return o[1:]
#
# A lot of work to add async `input` handling without creating a thread. We cannot use
# threads when unshare_inline is used with pid namespace per kernel clone(2)
# restriction.
#
async def async_input(prompt, histfile):
"""Asynchronously read a line from the user."""
rfd, wfd = os.pipe()
p = multiprocessing.Process(target=proc_readline, args=(wfd, prompt, histfile))
p.start()
logging.debug("started async_input input process: %s", p)
try:
return await async_input_reader(rfd)
finally:
logging.debug("joining async_input input process")
p.join()
def make_help_str(unet):
w = sorted([x if x else "" for x in unet.cli_in_window_cmds])
ww = unet.cli_in_window_cmds
u = sorted([x if x else "" for x in unet.cli_run_cmds])
uu = unet.cli_run_cmds
s = (
"""
Basic Commands:
cli :: open a secondary CLI window
help :: this help
hosts :: list hosts
quit :: quit the cli
HOST can be a host or one of the following:
- '*' for all hosts
- '.' for the parent munet
- a regex specified between '/' (e.g., '/rtr.*/')
New Window Commands:\n"""
+ "\n".join([f" {ww[v][0]}\t:: {ww[v][1]}" for v in w])
+ """\nInline Commands:\n"""
+ "\n".join([f" {uu[v][0]}\t:: {uu[v][1]}" for v in u])
+ "\n"
)
return s
def get_shcmd(unet, host, kinds, execfmt, ucmd):
if host is None:
h = None
kind = None
elif host is unet or host == "":
h = unet
kind = ""
else:
h = unet.hosts[host]
kind = h.config.get("kind", "")
if kinds and kind not in kinds:
return ""
if not isinstance(execfmt, str):
execfmt = execfmt.get(kind, {}).get("exec", "")
if not execfmt:
return ""
# Do substitutions for {} in string
numfmt = len(re.findall(r"{\d*}", execfmt))
if numfmt > 1:
ucmd = execfmt.format(*shlex.split(ucmd))
elif numfmt:
ucmd = execfmt.format(ucmd)
elif len(re.findall(r"{[a-zA-Z_][0-9a-zA-Z_\.]*}", execfmt)):
if execfmt.endswith('"'):
fstring = "f'''" + execfmt + "'''"
else:
fstring = 'f"""' + execfmt + '"""'
ucmd = eval( # pylint: disable=W0123
fstring,
globals(),
{"host": h, "unet": unet, "user_input": ucmd},
)
else:
# No variable or usercmd substitution at all.
ucmd = execfmt
# Do substitution for munet variables
ucmd = ucmd.replace("%CONFIGDIR%", str(unet.config_dirname))
if host is None or host is unet:
ucmd = ucmd.replace("%RUNDIR%", str(unet.rundir))
return ucmd.replace("%NAME%", ".")
ucmd = ucmd.replace("%RUNDIR%", str(os.path.join(unet.rundir, host)))
if h.mgmt_ip:
ucmd = ucmd.replace("%IPADDR%", str(h.mgmt_ip))
elif h.mgmt_ip6:
ucmd = ucmd.replace("%IPADDR%", str(h.mgmt_ip6))
if h.mgmt_ip6:
ucmd = ucmd.replace("%IP6ADDR%", str(h.mgmt_ip6))
return ucmd.replace("%NAME%", str(host))
async def run_command(
unet,
outf,
line,
execfmt,
banner,
hosts,
toplevel,
kinds,
ns_only=False,
interactive=False,
):
"""Runs a command on a set of hosts.
Runs `execfmt`. Prior to executing the string the following transformations are
performed on it.
`execfmt` may also be a dictionary of dicitonaries keyed on kind with `exec` holding
the kind's execfmt string.
- if `{}` is present then `str.format` is called to replace `{}` with any extra
input values after the command and hosts are removed from the input.
- else if any `{digits}` are present then `str.format` is called to replace
`{digits}` with positional args obtained from the addittional user input
first passed to `shlex.split`.
- else f-string style interpolation is performed on the string with
the local variables `host` (the current node object or None),
`unet` (the Munet object), and `user_input` (the additional command input)
defined.
The output is sent to `outf`. If `ns_only` is True then the `execfmt` is
run using `Commander.cmd_status_nsonly` otherwise it is run with
`Commander.cmd_status`.
"""
if kinds:
logging.info("Filtering hosts to kinds: %s", kinds)
hosts = [x for x in hosts if unet.hosts[x].config.get("kind", "") in kinds]
logging.info("Filtered hosts: %s", hosts)
if not hosts:
if not toplevel:
return
hosts = [unet]
# if unknowns := [x for x in hosts if x not in unet.hosts]:
# outf.write("%% Unknown host[s]: %s\n" % ", ".join(unknowns))
# return
# if sys.stdin.isatty() and interactive:
if interactive:
for host in hosts:
shcmd = get_shcmd(unet, host, kinds, execfmt, line)
if not shcmd:
continue
if len(hosts) > 1 or banner:
outf.write(f"------ Host: {host} ------\n")
spawn(unet, host if not toplevel else unet, shcmd, outf, ns_only)
if len(hosts) > 1 or banner:
outf.write(f"------- End: {host} ------\n")
outf.write("\n")
return
aws = []
for host in hosts:
shcmd = get_shcmd(unet, host, kinds, execfmt, line)
if not shcmd:
continue
if toplevel:
ns = unet
else:
ns = unet.hosts[host] if host and host != unet else unet
if ns_only:
cmdf = ns.async_cmd_status_nsonly
else:
cmdf = ns.async_cmd_status
aws.append(cmdf(shcmd, warn=False, stderr=subprocess.STDOUT))
results = await asyncio.gather(*aws, return_exceptions=True)
for host, result in zip(hosts, results):
if isinstance(result, Exception):
o = str(result) + "\n"
rc = -1
else:
rc, o, _ = result
if len(hosts) > 1 or banner:
outf.write(f"------ Host: {host} ------\n")
if rc:
outf.write(f"*** non-zero exit status: {rc}\n")
outf.write(o)
if len(hosts) > 1 or banner:
outf.write(f"------- End: {host} ------\n")
cli_builtins = ["cli", "help", "hosts", "quit"]
class Completer:
"""A completer class for the CLI."""
def __init__(self, unet):
self.unet = unet
def complete(self, text, state):
line = readline.get_line_buffer()
tokens = line.split()
# print(f"\nXXX: tokens: {tokens} text: '{text}' state: {state}'\n")
first_token = not tokens or (text and len(tokens) == 1)
# If we have already have a builtin command we are done
if tokens and tokens[0] in cli_builtins:
return [None]
cli_run_cmds = set(self.unet.cli_run_cmds.keys())
top_run_cmds = {x for x in cli_run_cmds if self.unet.cli_run_cmds[x][3]}
cli_run_cmds -= top_run_cmds
cli_win_cmds = set(self.unet.cli_in_window_cmds.keys())
hosts = set(self.unet.hosts.keys())
is_window_cmd = bool(tokens) and tokens[0] in cli_win_cmds
done_set = set()
if bool(tokens):
if text:
done_set = set(tokens[:-1])
else:
done_set = set(tokens)
# Determine the domain for completions
if not tokens or first_token:
all_cmds = (
set(cli_builtins) | hosts | cli_run_cmds | cli_win_cmds | top_run_cmds
)
elif is_window_cmd:
all_cmds = hosts
elif tokens and tokens[0] in top_run_cmds:
# nothing to complete if a top level command
pass
elif not bool(done_set & cli_run_cmds):
all_cmds = hosts | cli_run_cmds
if not text:
completes = all_cmds
else:
# print(f"\nXXX: all_cmds: {all_cmds} text: '{text}'\n")
completes = {x + " " for x in all_cmds if x.startswith(text)}
# print(f"\nXXX: completes: {completes} text: '{text}' state: {state}'\n")
# remove any completions already present
completes -= done_set
completes = sorted(completes) + [None]
return completes[state]
async def doline(
unet, line, outf, background=False, notty=False
): # pylint: disable=R0911
line = line.strip()
m = re.fullmatch(r"^(\S+)(?:\s+(.*))?$", line)
if not m:
return True
cmd = m.group(1)
nline = m.group(2) if m.group(2) else ""
if cmd in ("q", "quit"):
return False
if cmd == "help":
outf.write(make_help_str(unet))
return True
if cmd in ("h", "hosts"):
outf.write(f"% Hosts:\t{' '.join(sorted(unet.hosts.keys()))}\n")
return True
if cmd == "cli":
await remote_cli(
unet,
"secondary> ",
"Secondary CLI",
background,
)
return True
#
# In window commands
#
if cmd in unet.cli_in_window_cmds:
execfmt, toplevel, kinds, kwargs = unet.cli_in_window_cmds[cmd][2:]
# if toplevel:
# ucmd = " ".join(nline.split())
# else:
hosts, ucmd = win_cmd_host_split(unet, nline, kinds, False)
if not hosts:
if not toplevel:
return True
hosts = [unet]
if isinstance(execfmt, str):
found_brace = "{}" in execfmt
else:
found_brace = False
for d in execfmt.values():
if "{}" in d["exec"]:
found_brace = True
break
if not found_brace and ucmd and not toplevel:
# CLI command does not expect user command so treat as hosts of which some
# must be unknown
unknowns = [x for x in ucmd.split() if x not in unet.hosts]
outf.write(f"% Unknown host[s]: {' '.join(unknowns)}\n")
return True
try:
if not hosts and toplevel:
hosts = [unet]
for host in hosts:
shcmd = get_shcmd(unet, host, kinds, execfmt, ucmd)
if toplevel or host == unet:
unet.run_in_window(shcmd, **kwargs)
else:
unet.hosts[host].run_in_window(shcmd, **kwargs)
except Exception as error:
outf.write(f"% Error: {error}\n")
return True
#
# Inline commands
#
toplevel = unet.cli_run_cmds[cmd][3] if cmd in unet.cli_run_cmds else False
# if toplevel:
# logging.debug("top-level: cmd: '%s' nline: '%s'", cmd, nline)
# hosts = None
# banner = False
# else:
hosts, cmd, nline, banner = host_cmd_split(unet, line, toplevel)
hoststr = "munet" if hosts == [unet] else f"{hosts}"
logging.debug("hosts: '%s' cmd: '%s' nline: '%s'", hoststr, cmd, nline)
if cmd in unet.cli_run_cmds:
pass
elif "" in unet.cli_run_cmds:
nline = f"{cmd} {nline}"
cmd = ""
else:
outf.write(f"% Unknown command: {cmd} {nline}\n")
return True
execfmt, toplevel, kinds, ns_only, interactive = unet.cli_run_cmds[cmd][2:]
if interactive and notty:
outf.write("% Error: interactive command must be run from primary CLI\n")
return True
await run_command(
unet, outf, nline, execfmt, banner, hosts, toplevel, kinds, ns_only, interactive
)
return True
async def cli_client(sockpath, prompt="munet> "):
"""Implement the user-facing CLI for a remote munet reached by a socket."""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.settimeout(10)
sock.connect(sockpath)
# Go into full non-blocking mode now
sock.settimeout(None)
print("\n--- Munet CLI Starting ---\n\n")
while True:
line = input(prompt)
if line is None:
return
# Need to put \n back
line += "\n"
# Send the CLI command
sock.send(line.encode("utf-8"))
def bendswith(b, sentinel):
slen = len(sentinel)
return len(b) >= slen and b[-slen:] == sentinel
# Collect the output
rb = b""
while not bendswith(rb, ENDMARKER):
lb = sock.recv(4096)
if not lb:
return
rb += lb
# Remove the marker
rb = rb[: -len(ENDMARKER)]
# Write the output
sys.stdout.write(rb.decode("utf-8"))
async def local_cli(unet, outf, prompt, histfile, background):
"""Implement the user-side CLI for local munet."""
if unet:
completer = Completer(unet)
readline.parse_and_bind("tab: complete")
readline.set_completer(completer.complete)
print("\n--- Munet CLI Starting ---\n\n")
while True:
try:
line = await async_input(prompt, histfile)
if line is None:
return
assert unet is not None
if not await doline(unet, line, outf, background):
return
except KeyboardInterrupt:
outf.write("%% Caught KeyboardInterrupt\nUse ^D or 'quit' to exit")
def init_history(unet, histfile):
try:
if histfile is None:
histfile = os.path.expanduser("~/.munet-history.txt")
if not os.path.exists(histfile):
if unet:
unet.cmd("touch " + histfile)
else:
subprocess.run("touch " + histfile, shell=True, check=True)
if histfile:
readline.read_history_file(histfile)
return histfile
except Exception as error:
logging.warning("init_history failed: %s", error)
return None
async def cli_client_connected(unet, background, reader, writer):
"""Handle CLI commands inside the munet process from a socket."""
# # Go into full non-blocking mode now
# client.settimeout(None)
logging.debug("cli client connected")
while True:
line = await reader.readline()
if not line:
logging.debug("client closed cli connection")
break
line = line.decode("utf-8").strip()
# def writef(x):
# writer.write(x.encode("utf-8"))
if not await doline(unet, line, writer, background, notty=True):
logging.debug("server closing cli connection")
return
writer.write(ENDMARKER)
await writer.drain()
async def remote_cli(unet, prompt, title, background):
"""Open a CLI in a new window."""
try:
if not unet.cli_sockpath:
sockpath = os.path.join(tempfile.mkdtemp("-sockdir", "pty-"), "cli.sock")
ccfunc = functools.partial(cli_client_connected, unet, background)
s = await asyncio.start_unix_server(ccfunc, path=sockpath)
unet.cli_server = asyncio.create_task(s.serve_forever(), name="cli-task")
unet.cli_sockpath = sockpath
logging.info("server created on :\n%s\n", sockpath)
# Open a new window with a new CLI
python_path = await unet.async_get_exec_path(["python3", "python"])
us = os.path.realpath(__file__)
cmd = f"{python_path} {us}"
if unet.cli_histfile:
cmd += " --histfile=" + unet.cli_histfile
if prompt:
cmd += f" --prompt='{prompt}'"
cmd += " " + unet.cli_sockpath
unet.run_in_window(cmd, title=title, background=False)
except Exception as error:
logging.error("cli server: unexpected exception: %s", error)
def add_cli_in_window_cmd(
unet, name, helpfmt, helptxt, execfmt, toplevel, kinds, **kwargs
):
"""Adds a CLI command to the CLI.
The command `cmd` is added to the commands executable by the user from the CLI. See
`base.Commander.run_in_window` for the arguments that can be passed in `args` and
`kwargs` to this function.
Args:
unet: unet object
name: command string (no spaces)
helpfmt: format of command to display in help (left side)
helptxt: help string for command (right side)
execfmt: interpreter `cmd` to pass to `host.run_in_window()`, if {} present then
allow for user commands to be entered and inserted. May also be a dict of dict
keyed on kind with sub-key of "exec" providing the `execfmt` string for that
kind.
toplevel: run command in common top-level namespaec not inside hosts
kinds: limit CLI command to nodes which match list of kinds.
**kwargs: keyword args to pass to `host.run_in_window()`
"""
unet.cli_in_window_cmds[name] = (helpfmt, helptxt, execfmt, toplevel, kinds, kwargs)
def add_cli_run_cmd(
unet,
name,
helpfmt,
helptxt,
execfmt,
toplevel,
kinds,
ns_only=False,
interactive=False,
):
"""Adds a CLI command to the CLI.
The command `cmd` is added to the commands executable by the user from the CLI.
See `run_command` above in the `doline` function and for the arguments that can
be passed in to this function.
Args:
unet: unet object
name: command string (no spaces)
helpfmt: format of command to display in help (left side)
helptxt: help string for command (right side)
execfmt: format string to insert user cmds into for execution. May also be a
dict of dict keyed on kind with sub-key of "exec" providing the `execfmt`
string for that kind.
toplevel: run command in common top-level namespaec not inside hosts
kinds: limit CLI command to nodes which match list of kinds.
ns_only: Should execute the command on the host vs in the node namespace.
interactive: Should execute the command inside an allocated pty (interactive)
"""
unet.cli_run_cmds[name] = (
helpfmt,
helptxt,
execfmt,
toplevel,
kinds,
ns_only,
interactive,
)
def add_cli_config(unet, config):
"""Adds CLI commands based on config.
All exec strings will have %CONFIGDIR%, %NAME% and %RUNDIR% replaced with the
corresponding config directory and the current nodes `name` and `rundir`.
Additionally, the exec string will have f-string style interpolation performed
with the local variables `host` (node object or None), `unet` (Munet object) and
`user_input` (if provided to the CLI command) defined.
The format of the config dictionary can be seen in the following example.
The first list entry represents the default command because it has no `name` key.
commands:
- help: "run the given FRR command using vtysh"
format: "[HOST ...] FRR-CLI-COMMAND"
exec: "vtysh -c {}"
ns-only: false # the default
interactive: false # the default
- name: "vtysh"
help: "Open a FRR CLI inside new terminal[s] on the given HOST[s]"
format: "vtysh HOST [HOST ...]"
exec: "vtysh"
new-window: true
- name: "capture"
help: "Capture packets on a given network"
format: "pcap NETWORK"
exec: "tshark -s 9200 -i {0} -w /tmp/capture-{0}.pcap"
new-window: true
top-level: true # run in top-level container namespace, above hosts
The `new_window` key can also be a dictionary which will be passed as keyward
arguments to the `Commander.run_in_window()` function.
Args:
unet: unet object
config: dictionary of cli config
"""
for cli_cmd in config.get("commands", []):
name = cli_cmd.get("name", None)
helpfmt = cli_cmd.get("format", "")
helptxt = cli_cmd.get("help", "")
execfmt = list_to_dict_with_key(cli_cmd.get("exec-kind"), "kind")
if not execfmt:
execfmt = cli_cmd.get("exec", "bash -c '{}'")
toplevel = cli_cmd.get("top-level", False)
kinds = cli_cmd.get("kinds", [])
stdargs = (unet, name, helpfmt, helptxt, execfmt, toplevel, kinds)
new_window = cli_cmd.get("new-window", None)
if isinstance(new_window, dict):
add_cli_in_window_cmd(*stdargs, **new_window)
elif bool(new_window):
add_cli_in_window_cmd(*stdargs)
else:
# on-host is deprecated it really implemented "ns-only"
add_cli_run_cmd(
*stdargs,
cli_cmd.get("ns-only", cli_cmd.get("on-host")),
cli_cmd.get("interactive", False),
)
def cli(
unet,
histfile=None,
sockpath=None,
force_window=False,
title=None,
prompt=None,
background=True,
):
asyncio.run(
async_cli(unet, histfile, sockpath, force_window, title, prompt, background)
)
async def async_cli(
unet,
histfile=None,
sockpath=None,
force_window=False,
title=None,
prompt=None,
background=True,
):
if prompt is None:
prompt = "munet> "
if force_window or not sys.stdin.isatty():
await remote_cli(unet, prompt, title, background)
if not unet:
logger.debug("client-cli using sockpath %s", sockpath)
try:
if sockpath:
await cli_client(sockpath, prompt)
else:
await local_cli(unet, sys.stdout, prompt, histfile, background)
except KeyboardInterrupt:
print("\n...^C exiting CLI")
except EOFError:
pass
except Exception as ex:
logger.critical("cli: got exception: %s", ex, exc_info=True)
raise
if __name__ == "__main__":
# logging.basicConfig(level=logging.DEBUG, filename="/tmp/topotests/cli-client.log")
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger("cli-client")
logger.info("Start logging cli-client")
parser = argparse.ArgumentParser()
parser.add_argument("--histfile", help="file to user for history")
parser.add_argument("--prompt", help="prompt string to use")
parser.add_argument("socket", help="path to pair of sockets to communicate over")
cli_args = parser.parse_args()
cli_prompt = cli_args.prompt if cli_args.prompt else "munet> "
asyncio.run(
async_cli(
None,
cli_args.histfile,
cli_args.socket,
prompt=cli_prompt,
background=False,
)
)

View file

@ -0,0 +1,24 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# November 16 2022, Christian Hopps <chopps@labn.net>
#
# Copyright (c) 2022, LabN Consulting, L.L.C.
#
"""Provide compatible APIs."""
class PytestConfig:
"""Pytest config duck-type-compatible object using argprase args."""
def __init__(self, args):
self.args = vars(args)
def getoption(self, name, default=None, skip=False):
assert not skip
if name.startswith("--"):
name = name[2:]
name = name.replace("-", "_")
if name in self.args:
return self.args[name] if self.args[name] is not None else default
return default

View file

@ -0,0 +1,158 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# June 25 2022, Christian Hopps <chopps@gmail.com>
#
# Copyright (c) 2021-2022, LabN Consulting, L.L.C.
#
"""A module that defines common configuration utility functions."""
import logging
from collections.abc import Iterable
from copy import deepcopy
from typing import overload
def find_with_kv(lst, k, v):
if lst:
for e in lst:
if k in e and e[k] == v:
return e
return {}
def find_all_with_kv(lst, k, v):
rv = []
if lst:
for e in lst:
if k in e and e[k] == v:
rv.append(e)
return rv
def find_matching_net_config(name, cconf, oconf):
p = find_all_with_kv(oconf.get("connections", {}), "to", name)
if not p:
return {}
rname = cconf.get("remote-name", None)
if not rname:
return p[0]
return find_with_kv(p, "name", rname)
def merge_using_key(a, b, k):
# First get a dict of indexes in `a` for the key value of `k` in objects of `a`
m = list(a)
mi = {o[k]: i for i, o in enumerate(m)}
for o in b:
bkv = o[k]
if bkv in mi:
m[mi[bkv]] = o
else:
mi[bkv] = len(m)
m.append(o)
return m
def list_to_dict_with_key(lst, k):
"""Convert a YANG styl list of objects to dict of objects.
This function converts a YANG style list of objects (dictionaries) to a plain python
dictionary of objects (dictionaries). The value for the supplied key for each
object is used to store the object in the new diciontary.
This only works for lists of objects which are keyed on a single contained value.
Args:
lst: a *list* of python dictionary objects.
k: the key value contained in each dictionary object in the list.
Returns:
A dictionary of objects (dictionaries).
"""
return {x[k]: x for x in (lst if lst else [])}
def config_to_dict_with_key(c, ck, k):
"""Convert the config item from a list of objects to dict.
Use :py:func:`list_to_dict_with_key` to convert the list of objects
at ``c[ck]`` to a dict of the objects using the key ``k``.
Args:
c: config dictionary
ck: The key identifying the list of objects from ``c``.
k: The key to pass to :py:func:`list_to_dict_with_key`.
Returns:
A dictionary of objects (dictionaries).
"""
c[ck] = list_to_dict_with_key(c.get(ck, []), k)
return c[ck]
@overload
def config_subst(config: str, **kwargs) -> str:
...
@overload
def config_subst(config: Iterable, **kwargs) -> Iterable:
...
def config_subst(config: Iterable, **kwargs) -> Iterable:
if isinstance(config, str):
if "%RUNDIR%/%NAME%" in config:
config = config.replace("%RUNDIR%/%NAME%", "%RUNDIR%")
logging.warning(
"config '%RUNDIR%/%NAME%' should be changed to '%RUNDIR%' only, "
"converting automatically for now."
)
for name, value in kwargs.items():
config = config.replace(f"%{name.upper()}%", str(value))
elif isinstance(config, Iterable):
try:
return {k: config_subst(config[k], **kwargs) for k in config}
except (KeyError, TypeError):
return [config_subst(x, **kwargs) for x in config]
return config
def value_merge_deepcopy(s1, s2):
"""Merge values using deepcopy.
Create a deepcopy of the result of merging the values from dicts ``s1`` and ``s2``.
If a key exists in both ``s1`` and ``s2`` the value from ``s2`` is used."
"""
d = {}
for k, v in s1.items():
if k in s2:
d[k] = deepcopy(s2[k])
else:
d[k] = deepcopy(v)
return d
def merge_kind_config(kconf, config):
mergekeys = kconf.get("merge", [])
config = deepcopy(config)
new = deepcopy(kconf)
for k in new:
if k not in config:
continue
if k not in mergekeys:
new[k] = config[k]
elif isinstance(new[k], list):
new[k].extend(config[k])
elif isinstance(new[k], dict):
new[k] = {**new[k], **config[k]}
else:
new[k] = config[k]
for k in config:
if k not in new:
new[k] = config[k]
return new

View file

@ -0,0 +1,84 @@
version: 1
kinds:
- name: frr
cap-add:
# Zebra requires these
- NET_ADMIN
- NET_RAW
- SYS_ADMIN
- AUDIT_WRITE # needed for ssh pty allocation
- name: ceos
init: false
shell: false
merge: ["env"]
# Should we cap-drop some of these in privileged mode?
# ceos kind is special. munet will add args to /sbin/init for each
# environment variable of the form `systemd.setenv=ENVNAME=VALUE` for each
# environment varialbe named ENVNAME with a value of `VALUE`. If cmd: is
# changed to anything but `/sbin/init` munet will not do this.
cmd: /sbin/init
privileged: true
env:
- name: "EOS_PLATFORM"
value: "ceoslab"
- name: "container"
value: "docker"
- name: "ETBA"
value: "4"
- name: "SKIP_ZEROTOUCH_BARRIER_IN_SYSDBINIT"
value: "1"
- name: "INTFTYPE"
value: "eth"
- name: "MAPETH0"
value: "1"
- name: "MGMT_INTF"
value: "eth0"
- name: "CEOS"
value: "1"
# cap-add:
# # cEOS requires these, except GNMI still doesn't work
# # - NET_ADMIN
# # - NET_RAW
# # - SYS_ADMIN
# # - SYS_RESOURCE # Required for the CLI
# All Caps
# - AUDIT_CONTROL
# - AUDIT_READ
# - AUDIT_WRITE
# - BLOCK_SUSPEND
# - CHOWN
# - DAC_OVERRIDE
# - DAC_READ_SEARCH
# - FOWNER
# - FSETID
# - IPC_LOCK
# - IPC_OWNER
# - KILL
# - LEASE
# - LINUX_IMMUTABLE
# - MKNOD
# - NET_ADMIN
# - NET_BIND_SERVICE
# - NET_BROADCAST
# - NET_RAW
# - SETFCAP
# - SETGID
# - SETPCAP
# - SETUID
# - SYSLOG
# - SYS_ADMIN
# - SYS_BOOT
# - SYS_CHROOT
# - SYS_MODULE
# - SYS_NICE
# - SYS_PACCT
# - SYS_PTRACE
# - SYS_RAWIO
# - SYS_RESOURCE
# - SYS_TIME
# - SYS_TTY_CONFIG
# - WAKE_ALARM
# - MAC_ADMIN - Smack project?
# - MAC_OVERRIDE - Smack project?

View file

@ -0,0 +1,267 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# June 10 2022, Christian Hopps <chopps@labn.net>
#
# Copyright (c) 2022, LabN Consulting, L.L.C.
#
"""A module that gives access to linux unshare system call."""
import ctypes # pylint: disable=C0415
import ctypes.util # pylint: disable=C0415
import errno
import functools
import os
libc = None
def raise_oserror(enum):
s = errno.errorcode[enum] if enum in errno.errorcode else str(enum)
error = OSError(s)
error.errno = enum
error.strerror = s
raise error
def _load_libc():
global libc # pylint: disable=W0601,W0603
if libc:
return
lcpath = ctypes.util.find_library("c")
libc = ctypes.CDLL(lcpath, use_errno=True)
def pause():
if not libc:
_load_libc()
libc.pause()
MS_RDONLY = 1
MS_NOSUID = 1 << 1
MS_NODEV = 1 << 2
MS_NOEXEC = 1 << 3
MS_SYNCHRONOUS = 1 << 4
MS_REMOUNT = 1 << 5
MS_MANDLOCK = 1 << 6
MS_DIRSYNC = 1 << 7
MS_NOSYMFOLLOW = 1 << 8
MS_NOATIME = 1 << 10
MS_NODIRATIME = 1 << 11
MS_BIND = 1 << 12
MS_MOVE = 1 << 13
MS_REC = 1 << 14
MS_SILENT = 1 << 15
MS_POSIXACL = 1 << 16
MS_UNBINDABLE = 1 << 17
MS_PRIVATE = 1 << 18
MS_SLAVE = 1 << 19
MS_SHARED = 1 << 20
MS_RELATIME = 1 << 21
MS_KERNMOUNT = 1 << 22
MS_I_VERSION = 1 << 23
MS_STRICTATIME = 1 << 24
MS_LAZYTIME = 1 << 25
def mount(source, target, fs, flags=0, options=""):
if not libc:
_load_libc()
libc.mount.argtypes = (
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.c_ulong,
ctypes.c_char_p,
)
fsenc = fs.encode() if fs else None
optenc = options.encode() if options else None
ret = libc.mount(source.encode(), target.encode(), fsenc, flags, optenc)
if ret < 0:
err = ctypes.get_errno()
raise OSError(
err,
f"Error mounting {source} ({fs}) on {target}"
f" with options '{options}': {os.strerror(err)}",
)
# unmout options
MNT_FORCE = 0x1
MNT_DETACH = 0x2
MNT_EXPIRE = 0x4
UMOUNT_NOFOLLOW = 0x8
def umount(target, options):
if not libc:
_load_libc()
libc.umount.argtypes = (ctypes.c_char_p, ctypes.c_uint)
ret = libc.umount(target.encode(), int(options))
if ret < 0:
err = ctypes.get_errno()
raise OSError(
err,
f"Error umounting {target} with options '{options}': {os.strerror(err)}",
)
def pidfd_open(pid, flags=0):
if hasattr(os, "pidfd_open") and os.pidfd_open is not pidfd_open:
return os.pidfd_open(pid, flags) # pylint: disable=no-member
if not libc:
_load_libc()
try:
pfof = libc.pidfd_open
except AttributeError:
__NR_pidfd_open = 434
_pidfd_open = libc.syscall
_pidfd_open.restype = ctypes.c_int
_pidfd_open.argtypes = ctypes.c_long, ctypes.c_uint, ctypes.c_uint
pfof = functools.partial(_pidfd_open, __NR_pidfd_open)
fd = pfof(int(pid), int(flags))
if fd == -1:
raise_oserror(ctypes.get_errno())
return fd
if not hasattr(os, "pidfd_open"):
os.pidfd_open = pidfd_open
def setns(fd, nstype): # noqa: D402
"""See setns(2) manpage."""
if not libc:
_load_libc()
if libc.setns(int(fd), int(nstype)) == -1:
raise_oserror(ctypes.get_errno())
def unshare(flags): # noqa: D402
"""See unshare(2) manpage."""
if not libc:
_load_libc()
if libc.unshare(int(flags)) == -1:
raise_oserror(ctypes.get_errno())
CLONE_NEWTIME = 0x00000080
CLONE_VM = 0x00000100
CLONE_FS = 0x00000200
CLONE_FILES = 0x00000400
CLONE_SIGHAND = 0x00000800
CLONE_PIDFD = 0x00001000
CLONE_PTRACE = 0x00002000
CLONE_VFORK = 0x00004000
CLONE_PARENT = 0x00008000
CLONE_THREAD = 0x00010000
CLONE_NEWNS = 0x00020000
CLONE_SYSVSEM = 0x00040000
CLONE_SETTLS = 0x00080000
CLONE_PARENT_SETTID = 0x00100000
CLONE_CHILD_CLEARTID = 0x00200000
CLONE_DETACHED = 0x00400000
CLONE_UNTRACED = 0x00800000
CLONE_CHILD_SETTID = 0x01000000
CLONE_NEWCGROUP = 0x02000000
CLONE_NEWUTS = 0x04000000
CLONE_NEWIPC = 0x08000000
CLONE_NEWUSER = 0x10000000
CLONE_NEWPID = 0x20000000
CLONE_NEWNET = 0x40000000
CLONE_IO = 0x80000000
clone_flag_names = {
CLONE_NEWTIME: "CLONE_NEWTIME",
CLONE_VM: "CLONE_VM",
CLONE_FS: "CLONE_FS",
CLONE_FILES: "CLONE_FILES",
CLONE_SIGHAND: "CLONE_SIGHAND",
CLONE_PIDFD: "CLONE_PIDFD",
CLONE_PTRACE: "CLONE_PTRACE",
CLONE_VFORK: "CLONE_VFORK",
CLONE_PARENT: "CLONE_PARENT",
CLONE_THREAD: "CLONE_THREAD",
CLONE_NEWNS: "CLONE_NEWNS",
CLONE_SYSVSEM: "CLONE_SYSVSEM",
CLONE_SETTLS: "CLONE_SETTLS",
CLONE_PARENT_SETTID: "CLONE_PARENT_SETTID",
CLONE_CHILD_CLEARTID: "CLONE_CHILD_CLEARTID",
CLONE_DETACHED: "CLONE_DETACHED",
CLONE_UNTRACED: "CLONE_UNTRACED",
CLONE_CHILD_SETTID: "CLONE_CHILD_SETTID",
CLONE_NEWCGROUP: "CLONE_NEWCGROUP",
CLONE_NEWUTS: "CLONE_NEWUTS",
CLONE_NEWIPC: "CLONE_NEWIPC",
CLONE_NEWUSER: "CLONE_NEWUSER",
CLONE_NEWPID: "CLONE_NEWPID",
CLONE_NEWNET: "CLONE_NEWNET",
CLONE_IO: "CLONE_IO",
}
def clone_flag_string(flags):
ns = [v for k, v in clone_flag_names.items() if k & flags]
if ns:
return "|".join(ns)
return "None"
namespace_files = {
CLONE_NEWUSER: "ns/user",
CLONE_NEWCGROUP: "ns/cgroup",
CLONE_NEWIPC: "ns/ipc",
CLONE_NEWUTS: "ns/uts",
CLONE_NEWNET: "ns/net",
CLONE_NEWPID: "ns/pid_for_children",
CLONE_NEWNS: "ns/mnt",
CLONE_NEWTIME: "ns/time_for_children",
}
PR_SET_PDEATHSIG = 1
PR_GET_PDEATHSIG = 2
PR_SET_NAME = 15
PR_GET_NAME = 16
def set_process_name(name):
if not libc:
_load_libc()
# Why does uncommenting this cause failure?
# libc.prctl.argtypes = (
# ctypes.c_int,
# ctypes.c_ulong,
# ctypes.c_ulong,
# ctypes.c_ulong,
# ctypes.c_ulong,
# )
s = ctypes.create_string_buffer(bytes(name, encoding="ascii"))
sr = ctypes.byref(s)
libc.prctl(PR_SET_NAME, sr, 0, 0, 0)
def set_parent_death_signal(signum):
if not libc:
_load_libc()
# Why does uncommenting this cause failure?
libc.prctl.argtypes = (
ctypes.c_int,
ctypes.c_ulong,
ctypes.c_ulong,
ctypes.c_ulong,
ctypes.c_ulong,
)
libc.prctl(PR_SET_PDEATHSIG, signum, 0, 0, 0)

View file

@ -0,0 +1,84 @@
version: 1
formatters:
brief:
format: '%(levelname)5s: %(message)s'
operfmt:
class: munet.mulog.ColorFormatter
format: ' ------| %(message)s'
exec:
format: '%(asctime)s %(levelname)5s: %(name)s: %(message)s'
output:
format: '%(asctime)s %(levelname)5s: OUTPUT: %(message)s'
results:
# format: '%(asctime)s %(levelname)5s: %(message)s'
format: '%(message)s'
handlers:
console:
level: WARNING
class: logging.StreamHandler
formatter: brief
stream: ext://sys.stderr
info_console:
level: INFO
class: logging.StreamHandler
formatter: brief
stream: ext://sys.stderr
oper_console:
level: DEBUG
class: logging.StreamHandler
formatter: operfmt
stream: ext://sys.stderr
exec:
level: DEBUG
class: logging.FileHandler
formatter: exec
filename: mutest-exec.log
mode: w
output:
level: DEBUG
class: munet.mulog.MultiFileHandler
root_path: "mutest.output"
formatter: output
filename: mutest-output.log
mode: w
results:
level: INFO
class: munet.mulog.MultiFileHandler
root_path: "mutest.results"
new_handler_level: DEBUG
formatter: results
filename: mutest-results.log
mode: w
root:
level: DEBUG
handlers: [ "console", "exec" ]
loggers:
# These are some loggers that get used...
# munet:
# level: DEBUG
# propagate: true
# munet.base.commander
# level: DEBUG
# propagate: true
# mutest.error:
# level: DEBUG
# propagate: true
mutest.output:
level: DEBUG
handlers: ["output", "exec"]
propagate: false
mutest.results:
level: DEBUG
handlers: [ "info_console", "exec", "output", "results" ]
# We don't propagate this b/c we want a lower level accept on the console
# Instead we use info_console and exec to cover what root would log to.
propagate: false
# This is used to debug the operation of mutest
mutest.oper:
# Records are emitted at DEBUG so this will normally filter everything
level: INFO
handlers: [ "oper_console" ]
propagate: false

View file

@ -0,0 +1,32 @@
version: 1
formatters:
brief:
format: '%(asctime)s: %(levelname)s: %(message)s'
precise:
format: '%(asctime)s %(levelname)s: %(name)s: %(message)s'
handlers:
console:
class: logging.StreamHandler
formatter: brief
level: INFO
stream: ext://sys.stderr
file:
class: logging.FileHandler
formatter: precise
level: DEBUG
filename: munet-exec.log
mode: w
root:
level: DEBUG
handlers: [ "console", "file" ]
# these are some loggers that get used.
# loggers:
# munet:
# level: DEBUG
# propagate: true
# munet.base.commander
# level: DEBUG
# propagate: true

View file

@ -0,0 +1,111 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# December 5 2021, Christian Hopps <chopps@labn.net>
#
# Copyright 2021, LabN Consulting, L.L.C.
#
"""A command that allows external command execution inside nodes."""
import argparse
import json
import os
import subprocess
import sys
from pathlib import Path
def newest_file_in(filename, paths, has_sibling=None):
new = None
newst = None
items = (x for y in paths for x in Path(y).rglob(filename))
for e in items:
st = os.stat(e)
if has_sibling and not e.parent.joinpath(has_sibling).exists():
continue
if not new or st.st_mtime_ns > newst.st_mtime_ns:
new = e
newst = st
continue
return new, newst
def main(*args):
ap = argparse.ArgumentParser(args)
ap.add_argument("-d", "--rundir", help="runtime directory for tempfiles, logs, etc")
ap.add_argument("node", nargs="?", help="node to enter or run command inside")
ap.add_argument(
"shellcmd",
nargs=argparse.REMAINDER,
help="optional shell-command to execute on NODE",
)
args = ap.parse_args()
if args.rundir:
configpath = Path(args.rundir).joinpath("config.json")
else:
configpath, _ = newest_file_in(
"config.json",
["/tmp/munet", "/tmp/mutest", "/tmp/unet-test"],
has_sibling=args.node,
)
print(f'Using "{configpath}"')
if not configpath.exists():
print(f'"{configpath}" not found')
return 1
rundir = configpath.parent
nodes = []
config = json.load(open(configpath, encoding="utf-8"))
nodes = list(config.get("topology", {}).get("nodes", []))
envcfg = config.get("mucmd", {}).get("env", {})
# If args.node is not a node it's part of shellcmd
if args.node and args.node not in nodes:
if args.node != ".":
args.shellcmd[0:0] = [args.node]
args.node = None
if args.node:
name = args.node
nodedir = rundir.joinpath(name)
if not nodedir.exists():
print('"{name}" node doesn\'t exist in "{rundir}"')
return 1
rundir = nodedir
else:
name = "munet"
pidpath = rundir.joinpath("nspid")
pid = open(pidpath, encoding="ascii").read().strip()
env = {**os.environ}
env["MUNET_NODENAME"] = name
env["MUNET_RUNDIR"] = str(rundir)
for k in envcfg:
envcfg[k] = envcfg[k].replace("%NAME%", str(name))
envcfg[k] = envcfg[k].replace("%RUNDIR%", str(rundir))
# Can't use -F if it's a new pid namespace
ecmd = "/usr/bin/nsenter"
eargs = [ecmd]
output = subprocess.check_output(["/usr/bin/nsenter", "--help"], encoding="utf-8")
if " -a," in output:
eargs.append("-a")
else:
# -U doesn't work
for flag in ["-u", "-i", "-m", "-n", "-C", "-T"]:
if f" {flag}," in output:
eargs.append(flag)
eargs.append(f"--pid=/proc/{pid}/ns/pid_for_children")
eargs.append(f"--wd={rundir}")
eargs.extend(["-t", pid])
eargs += args.shellcmd
# print("Using ", eargs)
return os.execvpe(ecmd, eargs, {**env, **envcfg})
if __name__ == "__main__":
exit_status = main()
sys.exit(exit_status)

View file

@ -0,0 +1,122 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# December 4 2022, Christian Hopps <chopps@labn.net>
#
# Copyright (c) 2022, LabN Consulting, L.L.C.
#
"""Utilities for logging in munet."""
import logging
from pathlib import Path
class MultiFileHandler(logging.FileHandler):
"""A logging handler that logs to new files based on the logger name.
The MultiFileHandler operates as a FileHandler with additional functionality. In
addition to logging to the specified logging file MultiFileHandler also creates new
FileHandlers for child loggers based on a root logging name path.
The ``root_path`` determines when to create a new FileHandler. For each received log
record, ``root_path`` is removed from the logger name of the record if present, and
the resulting channel path (if any) determines the directory for a new log file to
also emit the record to. The new file path is constructed by starting with the
directory ``filename`` resides in, then joining the path determined above after
converting "." to "/" and finally by adding back the basename of ``filename``.
record logger path => mutest.output.testingfoo
root_path => mutest.output
base filename => /tmp/mutest/mutest-exec.log
new logfile => /tmp/mutest/testingfoo/mutest-exec.log
All messages are also emitted to the common FileLogger for ``filename``.
If a log record is from a logger that does not start with ``root_path`` no file is
created and the normal emit occurs.
Args:
root_path: the logging path of the root level for this handler.
new_handler_level: logging level for newly created handlers
log_dir: the log directory to put log files in.
filename: the base log file.
"""
def __init__(self, root_path, filename=None, **kwargs):
self.__root_path = root_path
self.__basename = Path(filename).name
if root_path[-1] != ".":
self.__root_path += "."
self.__root_pathlen = len(self.__root_path)
self.__kwargs = kwargs
self.__log_dir = Path(filename).absolute().parent
self.__log_dir.mkdir(parents=True, exist_ok=True)
self.__filenames = {}
self.__added = set()
if "new_handler_level" not in kwargs:
self.__new_handler_level = logging.NOTSET
else:
new_handler_level = kwargs["new_handler_level"]
del kwargs["new_handler_level"]
self.__new_handler_level = new_handler_level
super().__init__(filename=filename, **kwargs)
if self.__new_handler_level is None:
self.__new_handler_level = self.level
def __log_filename(self, name):
if name in self.__filenames:
return self.__filenames[name]
if not name.startswith(self.__root_path):
newname = None
else:
newname = name[self.__root_pathlen :]
newname = Path(newname.replace(".", "/"))
newname = self.__log_dir.joinpath(newname)
newname = newname.joinpath(self.__basename)
self.__filenames[name] = newname
self.__filenames[name] = newname
return newname
def emit(self, record):
newname = self.__log_filename(record.name)
if newname:
if newname not in self.__added:
self.__added.add(newname)
h = logging.FileHandler(filename=newname, **self.__kwargs)
h.setLevel(self.__new_handler_level)
h.setFormatter(self.formatter)
logging.getLogger(record.name).addHandler(h)
h.emit(record)
super().emit(record)
class ColorFormatter(logging.Formatter):
"""A formatter that adds color sequences based on level."""
def __init__(self, fmt=None, datefmt=None, style="%", **kwargs):
grey = "\x1b[90m"
yellow = "\x1b[33m"
red = "\x1b[31m"
bold_red = "\x1b[31;1m"
reset = "\x1b[0m"
# basefmt = " ------| %(message)s "
self.formatters = {
logging.DEBUG: logging.Formatter(grey + fmt + reset),
logging.INFO: logging.Formatter(grey + fmt + reset),
logging.WARNING: logging.Formatter(yellow + fmt + reset),
logging.ERROR: logging.Formatter(red + fmt + reset),
logging.CRITICAL: logging.Formatter(bold_red + fmt + reset),
}
# Why are we even bothering?
super().__init__(fmt, datefmt, style, **kwargs)
def format(self, record):
formatter = self.formatters.get(record.levelno)
return formatter.format(record)

View file

@ -0,0 +1,654 @@
{
"title": "labn-munet-config",
"$schema": "https://json-schema.org/draft/2020-12/schema",
"description": "Generated by pyang from module labn-munet-config",
"type": "object",
"properties": {
"cli": {
"type": "object",
"properties": {
"commands": {
"type": "array",
"items": {
"type": "object",
"properties": {
"exec": {
"type": "string"
},
"exec-kind": {
"type": "array",
"items": {
"type": "object",
"properties": {
"kind": {
"type": "string"
},
"exec": {
"type": "string"
}
}
}
},
"format": {
"type": "string"
},
"help": {
"type": "string"
},
"interactive": {
"type": "boolean"
},
"kinds": {
"type": "array",
"items": {
"type": "string"
}
},
"name": {
"type": "string"
},
"new-window": {
"type": "boolean"
},
"top-level": {
"type": "boolean"
}
}
}
}
}
},
"kinds": {
"type": "array",
"items": {
"type": "object",
"properties": {
"merge": {
"type": "array",
"items": {
"type": "string"
}
},
"cap-add": {
"type": "array",
"items": {
"type": "string"
}
},
"cap-remove": {
"type": "array",
"items": {
"type": "string"
}
},
"cmd": {
"type": "string"
},
"cleanup-cmd": {
"type": "string"
},
"ready-cmd": {
"type": "string"
},
"image": {
"type": "string"
},
"server": {
"type": "string"
},
"server-port": {
"type": "number"
},
"qemu": {
"type": "object",
"properties": {
"bios": {
"type": "string"
},
"disk": {
"type": "string"
},
"kerenel": {
"type": "string"
},
"initrd": {
"type": "string"
},
"kvm": {
"type": "boolean"
},
"ncpu": {
"type": "integer"
},
"memory": {
"type": "string"
},
"root": {
"type": "string"
},
"cmdline-extra": {
"type": "string"
},
"extra-args": {
"type": "string"
},
"console": {
"type": "object",
"properties": {
"user": {
"type": "string"
},
"password": {
"type": "string"
},
"expects": {
"type": "array",
"items": {
"type": "string"
}
},
"sends": {
"type": "array",
"items": {
"type": "string"
}
},
"timeout": {
"type": "integer"
}
}
}
}
},
"connections": {
"type": "array",
"items": {
"type": "object",
"properties": {
"to": {
"type": "string"
},
"ip": {
"type": "string"
},
"ipv6": {
"type": "string"
},
"name": {
"type": "string"
},
"hostintf": {
"type": "string"
},
"physical": {
"type": "string"
},
"remote-name": {
"type": "string"
},
"driver": {
"type": "string"
},
"delay": {
"type": "integer"
},
"jitter": {
"type": "integer"
},
"jitter-correlation": {
"type": "string"
},
"loss": {
"type": "integer"
},
"loss-correlation": {
"type": "string"
},
"rate": {
"type": "object",
"properties": {
"rate": {
"oneOf": [
{
"type": "integer"
},
{
"type": "string"
}
]
},
"limit": {
"oneOf": [
{
"type": "integer"
},
{
"type": "string"
}
]
},
"burst": {
"oneOf": [
{
"type": "integer"
},
{
"type": "string"
}
]
}
}
}
}
}
},
"env": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
}
}
}
},
"gdb-cmd": {
"type": "string"
},
"gdb-target-cmds": {
"type": "array",
"items": {
"type": "string"
}
},
"gdb-run-cmds": {
"type": "array",
"items": {
"type": "string"
}
},
"init": {
"oneOf": [
{
"type": "boolean"
},
{
"type": "string"
}
]
},
"mounts": {
"type": "array",
"items": {
"type": "object",
"properties": {
"destination": {
"type": "string"
},
"source": {
"type": "string"
},
"tmpfs-size": {
"type": "string"
},
"type": {
"type": "string"
}
}
}
},
"name": {
"type": "string"
},
"podman": {
"type": "object",
"properties": {
"extra-args": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
"privileged": {
"type": "boolean"
},
"shell": {
"oneOf": [
{
"type": "boolean"
},
{
"type": "string"
}
]
},
"volumes": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
},
"topology": {
"type": "object",
"properties": {
"dns-network": {
"type": "string"
},
"ipv6-enable": {
"type": "boolean"
},
"networks-autonumber": {
"type": "boolean"
},
"networks": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"ip": {
"type": "string"
},
"ipv6": {
"type": "string"
}
}
}
},
"nodes": {
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"type": "integer"
},
"kind": {
"type": "string"
},
"cap-add": {
"type": "array",
"items": {
"type": "string"
}
},
"cap-remove": {
"type": "array",
"items": {
"type": "string"
}
},
"cmd": {
"type": "string"
},
"cleanup-cmd": {
"type": "string"
},
"ready-cmd": {
"type": "string"
},
"image": {
"type": "string"
},
"server": {
"type": "string"
},
"server-port": {
"type": "number"
},
"qemu": {
"type": "object",
"properties": {
"bios": {
"type": "string"
},
"disk": {
"type": "string"
},
"kerenel": {
"type": "string"
},
"initrd": {
"type": "string"
},
"kvm": {
"type": "boolean"
},
"ncpu": {
"type": "integer"
},
"memory": {
"type": "string"
},
"root": {
"type": "string"
},
"cmdline-extra": {
"type": "string"
},
"extra-args": {
"type": "string"
},
"console": {
"type": "object",
"properties": {
"user": {
"type": "string"
},
"password": {
"type": "string"
},
"expects": {
"type": "array",
"items": {
"type": "string"
}
},
"sends": {
"type": "array",
"items": {
"type": "string"
}
},
"timeout": {
"type": "integer"
}
}
}
}
},
"connections": {
"type": "array",
"items": {
"type": "object",
"properties": {
"to": {
"type": "string"
},
"ip": {
"type": "string"
},
"ipv6": {
"type": "string"
},
"name": {
"type": "string"
},
"hostintf": {
"type": "string"
},
"physical": {
"type": "string"
},
"remote-name": {
"type": "string"
},
"driver": {
"type": "string"
},
"delay": {
"type": "integer"
},
"jitter": {
"type": "integer"
},
"jitter-correlation": {
"type": "string"
},
"loss": {
"type": "integer"
},
"loss-correlation": {
"type": "string"
},
"rate": {
"type": "object",
"properties": {
"rate": {
"oneOf": [
{
"type": "integer"
},
{
"type": "string"
}
]
},
"limit": {
"oneOf": [
{
"type": "integer"
},
{
"type": "string"
}
]
},
"burst": {
"oneOf": [
{
"type": "integer"
},
{
"type": "string"
}
]
}
}
}
}
}
},
"env": {
"type": "array",
"items": {
"type": "object",
"properties": {
"name": {
"type": "string"
},
"value": {
"type": "string"
}
}
}
},
"gdb-cmd": {
"type": "string"
},
"gdb-target-cmds": {
"type": "array",
"items": {
"type": "string"
}
},
"gdb-run-cmds": {
"type": "array",
"items": {
"type": "string"
}
},
"init": {
"oneOf": [
{
"type": "boolean"
},
{
"type": "string"
}
]
},
"mounts": {
"type": "array",
"items": {
"type": "object",
"properties": {
"destination": {
"type": "string"
},
"source": {
"type": "string"
},
"tmpfs-size": {
"type": "string"
},
"type": {
"type": "string"
}
}
}
},
"name": {
"type": "string"
},
"podman": {
"type": "object",
"properties": {
"extra-args": {
"type": "array",
"items": {
"type": "string"
}
}
}
},
"privileged": {
"type": "boolean"
},
"shell": {
"oneOf": [
{
"type": "boolean"
},
{
"type": "string"
}
]
},
"volumes": {
"type": "array",
"items": {
"type": "string"
}
}
}
}
}
}
},
"version": {
"type": "integer"
}
}
}

View file

@ -0,0 +1,445 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# December 2 2022, Christian Hopps <chopps@labn.net>
#
# Copyright (c) 2022, LabN Consulting, L.L.C.
#
"""Command to execute mutests."""
import asyncio
import logging
import os
import subprocess
import sys
import time
from argparse import ArgumentParser
from argparse import Namespace
from copy import deepcopy
from pathlib import Path
from typing import Union
from munet import parser
from munet.base import Bridge
from munet.base import get_event_loop
from munet.mutest import userapi as uapi
from munet.native import L3NodeMixin
from munet.native import Munet
from munet.parser import async_build_topology
from munet.parser import get_config
# We want all but critical to fit in 5 characters for alignment
logging.addLevelName(logging.WARNING, "WARN")
root_logger = logging.getLogger("")
exec_formatter = logging.Formatter("%(asctime)s %(levelname)5s: %(name)s: %(message)s")
async def get_unet(config: dict, croot: Path, rundir: Path, unshare: bool = False):
"""Create and run a new Munet topology.
The topology is built from the given ``config`` to run inside the path indicated
by ``rundir``. If ``unshare`` is True then the process will unshare into it's
own private namespace.
Args:
config: a config dictionary obtained from ``munet.parser.get_config``. This
value will be modified and stored in the built ``Munet`` object.
croot: common root of all tests, used to search for ``kinds.yaml`` files.
rundir: the path to the run directory for this topology.
unshare: True to unshare the process into it's own private namespace.
Yields:
Munet: The constructed and running topology.
"""
tasks = []
unet = None
try:
try:
unet = await async_build_topology(
config, rundir=str(rundir), unshare_inline=unshare
)
except Exception as error:
logging.debug("unet build failed: %s", error, exc_info=True)
raise
try:
tasks = await unet.run()
except Exception as error:
logging.debug("unet run failed: %s", error, exc_info=True)
raise
logging.debug("unet topology running")
try:
yield unet
except Exception as error:
logging.error("unet fixture: yield unet unexpected exception: %s", error)
raise
except KeyboardInterrupt:
logging.info("Received keyboard while building topology")
raise
finally:
if unet:
await unet.async_delete()
# No one ever awaits these so cancel them
logging.debug("unet fixture: cleanup")
for task in tasks:
task.cancel()
# Reset the class variables so auto number is predictable
logging.debug("unet fixture: resetting ords to 1")
L3NodeMixin.next_ord = 1
Bridge.next_ord = 1
def common_root(path1: Union[str, Path], path2: Union[str, Path]) -> Path:
"""Find the common root between 2 paths.
Args:
path1: Path
path2: Path
Returns:
Path: the shared root components between ``path1`` and ``path2``.
Examples:
>>> common_root("/foo/bar/baz", "/foo/bar/zip/zap")
PosixPath('/foo/bar')
>>> common_root("/foo/bar/baz", "/fod/bar/zip/zap")
PosixPath('/')
"""
apath1 = Path(path1).absolute().parts
apath2 = Path(path2).absolute().parts
alen = min(len(apath1), len(apath2))
common = None
for a, b in zip(apath1[:alen], apath2[:alen]):
if a != b:
break
common = common.joinpath(a) if common else Path(a)
return common
async def collect(args: Namespace):
"""Collect test files.
Files must match the pattern ``mutest_*.py``, and their containing
directory must have a munet config file present. This function also changes
the current directory to the common parent of all the tests, and paths are
returned relative to the common directory.
Args:
args: argparse results
Returns:
(commondir, tests, configs): where ``commondir`` is the path representing
the common parent directory of all the testsd, ``tests`` is a
dictionary of lists of test files, keyed on their containing directory
path, and ``configs`` is a dictionary of config dictionaries also keyed
on its containing directory path. The directory paths are relative to a
common ancestor.
"""
file_select = args.file_select
upaths = args.paths if args.paths else ["."]
globpaths = set()
for upath in (Path(x) for x in upaths):
if upath.is_file():
paths = {upath.absolute()}
else:
paths = {x.absolute() for x in Path(upath).rglob(file_select)}
globpaths |= paths
tests = {}
configs = {}
# Find the common root
# We don't actually need this anymore, the idea was prefix test names
# with uncommon paths elements to automatically differentiate them.
common = None
sortedpaths = []
for path in sorted(globpaths):
sortedpaths.append(path)
dirpath = path.parent
common = common_root(common, dirpath) if common else dirpath
ocwd = Path().absolute()
try:
os.chdir(common)
# Work with relative paths to the common directory
for path in (x.relative_to(common) for x in sortedpaths):
dirpath = path.parent
if dirpath not in configs:
try:
configs[dirpath] = get_config(search=[dirpath])
except FileNotFoundError:
logging.warning(
"Skipping '%s' as munet.{yaml,toml,json} not found in '%s'",
path,
dirpath,
)
continue
if dirpath not in tests:
tests[dirpath] = []
tests[dirpath].append(path.absolute())
finally:
os.chdir(ocwd)
return common, tests, configs
async def execute_test(
unet: Munet,
test: Path,
args: Namespace,
test_num: int,
exec_handler: logging.Handler,
) -> (int, int, int, Exception):
"""Execute a test case script.
Using the built and running topology in ``unet`` for targets
execute the test case script file ``test``.
Args:
unet: a running topology.
test: path to the test case script file.
args: argparse results.
test_num: the number of this test case in the run.
exec_handler: exec file handler to add to test loggers which do not propagate.
"""
test_name = testname_from_path(test)
# Get test case loggers
logger = logging.getLogger(f"mutest.output.{test_name}")
reslog = logging.getLogger(f"mutest.results.{test_name}")
logger.addHandler(exec_handler)
reslog.addHandler(exec_handler)
# We need to send an info level log to cause the speciifc handler to be
# created, otherwise all these debug ones don't get through
reslog.info("")
# reslog.debug("START: %s:%s from %s", test_num, test_name, test.stem)
# reslog.debug("-" * 70)
targets = dict(unet.hosts.items())
targets["."] = unet
tc = uapi.TestCase(
str(test_num), test_name, test, targets, logger, reslog, args.full_summary
)
passed, failed, e = tc.execute()
run_time = time.time() - tc.info.start_time
status = "PASS" if not (failed or e) else "FAIL"
# Turn off for now
reslog.debug("-" * 70)
reslog.debug(
"stats: %d steps, %d pass, %d fail, %s abort, %4.2fs elapsed",
passed + failed,
passed,
failed,
1 if e else 0,
run_time,
)
reslog.debug("-" * 70)
reslog.debug("END: %s %s:%s\n", status, test_num, test_name)
return passed, failed, e
def testname_from_path(path: Path) -> str:
"""Return test name based on the path to the test file.
Args:
path: path to the test file.
Returns:
str: the name of the test.
"""
return str(Path(path).stem).replace("/", ".")
def print_header(reslog, unet):
targets = dict(unet.hosts.items())
nmax = max(len(x) for x in targets)
nmax = max(nmax, len("TARGET"))
sum_fmt = uapi.TestCase.sum_fmt.format(nmax)
reslog.info(sum_fmt, "NUMBER", "STAT", "TARGET", "TIME", "DESCRIPTION")
reslog.info("-" * 70)
async def run_tests(args):
reslog = logging.getLogger("mutest.results")
common, tests, configs = await collect(args)
results = []
errlog = logging.getLogger("mutest.error")
reslog = logging.getLogger("mutest.results")
printed_header = False
tnum = 0
start_time = time.time()
try:
for dirpath in tests:
test_files = tests[dirpath]
for test in test_files:
tnum += 1
config = deepcopy(configs[dirpath])
test_name = testname_from_path(test)
rundir = args.rundir.joinpath(test_name)
# Add an test case exec file handler to the root logger and result
# logger
exec_path = rundir.joinpath("mutest-exec.log")
exec_path.parent.mkdir(parents=True, exist_ok=True)
exec_handler = logging.FileHandler(exec_path, "w")
exec_handler.setFormatter(exec_formatter)
root_logger.addHandler(exec_handler)
try:
async for unet in get_unet(config, common, rundir):
if not printed_header:
print_header(reslog, unet)
printed_header = True
passed, failed, e = await execute_test(
unet, test, args, tnum, exec_handler
)
except KeyboardInterrupt as error:
errlog.warning("KeyboardInterrupt while running test %s", test_name)
passed, failed, e = 0, 0, error
raise
except Exception as error:
logging.error(
"Error executing test %s: %s", test, error, exc_info=True
)
errlog.error(
"Error executing test %s: %s", test, error, exc_info=True
)
passed, failed, e = 0, 0, error
finally:
# Remove the test case exec file handler form the root logger.
root_logger.removeHandler(exec_handler)
results.append((test_name, passed, failed, e))
except KeyboardInterrupt:
pass
run_time = time.time() - start_time
tnum = 0
tpassed = 0
tfailed = 0
texc = 0
spassed = 0
sfailed = 0
for result in results:
_, passed, failed, e = result
tnum += 1
spassed += passed
sfailed += failed
if e:
texc += 1
if failed or e:
tfailed += 1
else:
tpassed += 1
reslog.info("")
reslog.info(
"run stats: %s steps, %s pass, %s fail, %s abort, %4.2fs elapsed",
spassed + sfailed,
spassed,
sfailed,
texc,
run_time,
)
reslog.info("-" * 70)
tnum = 0
for result in results:
test_name, passed, failed, e = result
tnum += 1
s = "FAIL" if failed or e else "PASS"
reslog.info(" %s %s:%s", s, tnum, test_name)
reslog.info("-" * 70)
reslog.info(
"END RUN: %s test scripts, %s passed, %s failed", tnum, tpassed, tfailed
)
return 1 if tfailed else 0
async def async_main(args):
status = 3
try:
# For some reson we are not catching exceptions raised inside
status = await run_tests(args)
except KeyboardInterrupt:
logging.info("Exiting (async_main), received KeyboardInterrupt in main")
except Exception as error:
logging.info(
"Exiting (async_main), unexpected exception %s", error, exc_info=True
)
logging.debug("async_main returns %s", status)
return status
def main():
ap = ArgumentParser()
ap.add_argument(
"--dist",
type=int,
nargs="?",
const=-1,
default=0,
action="store",
metavar="NUM-THREADS",
help="Run in parallel, value is num. of threads or no value for auto",
)
ap.add_argument("-d", "--rundir", help="runtime directory for tempfiles, logs, etc")
ap.add_argument(
"--file-select", default="mutest_*.py", help="shell glob for finding tests"
)
ap.add_argument("--log-config", help="logging config file (yaml, toml, json, ...)")
ap.add_argument(
"-V",
"--full-summary",
action="store_true",
help="print full summary headers from docstrings",
)
ap.add_argument(
"-v", dest="verbose", action="count", default=0, help="More -v's, more verbose"
)
ap.add_argument("paths", nargs="*", help="Paths to collect tests from")
args = ap.parse_args()
rundir = args.rundir if args.rundir else "/tmp/mutest"
args.rundir = Path(rundir)
os.environ["MUNET_RUNDIR"] = rundir
subprocess.run(f"mkdir -p {rundir} && chmod 755 {rundir}", check=True, shell=True)
config = parser.setup_logging(args, config_base="logconf-mutest")
# Grab the exec formatter from the logging config
if fconfig := config.get("formatters", {}).get("exec"):
global exec_formatter # pylint: disable=W291,W0603
exec_formatter = logging.Formatter(
fconfig.get("format"), fconfig.get("datefmt")
)
loop = None
status = 4
try:
loop = get_event_loop()
status = loop.run_until_complete(async_main(args))
except KeyboardInterrupt:
logging.info("Exiting (main), received KeyboardInterrupt in main")
except Exception as error:
logging.info("Exiting (main), unexpected exception %s", error, exc_info=True)
finally:
if loop:
loop.close()
sys.exit(status)
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,254 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# January 28 2023, Christian Hopps <chopps@labn.net>
#
# Copyright (c) 2023, LabN Consulting, L.L.C.
#
"""A tiny init for namespaces in python inspired by the C program tini."""
import argparse
import errno
import logging
import os
import shlex
import signal
import subprocess
import sys
import threading
import time
from signal import Signals as S
from . import linux
from .base import commander
child_pid = -1
very_verbose = False
restore_signals = set()
def vdebug(*args, **kwargs):
if very_verbose:
logging.debug(*args, **kwargs)
def exit_with_status(pid, status):
try:
ec = status >> 8 if bool(status & 0xFF00) else status | 0x80
logging.debug("reaped our child, exiting %s", ec)
sys.exit(ec)
except ValueError:
vdebug("pid %s didn't actually exit", pid)
def waitpid(tag):
logging.debug("%s: waitid for exiting processes", tag)
idobj = os.waitid(os.P_ALL, 0, os.WEXITED)
pid = idobj.si_pid
status = idobj.si_status
if pid == child_pid:
exit_with_status(pid, status)
else:
logging.debug("%s: reaped zombie pid %s with status %s", tag, pid, status)
def new_process_group():
pid = os.getpid()
try:
pgid = os.getpgrp()
if pgid == pid:
logging.debug("already process group leader %s", pgid)
else:
logging.debug("creating new process group %s", pid)
os.setpgid(pid, 0)
except Exception as error:
logging.warning("unable to get new process group: %s", error)
return
# Block these in order to allow foregrounding, otherwise we'd get SIGTTOU blocked
signal.signal(S.SIGTTIN, signal.SIG_IGN)
signal.signal(S.SIGTTOU, signal.SIG_IGN)
fd = sys.stdin.fileno()
if not os.isatty(fd):
logging.debug("stdin not a tty no foregrounding required")
else:
try:
# This will error if our session no longer associated with controlling tty.
pgid = os.tcgetpgrp(fd)
if pgid == pid:
logging.debug("process group already in foreground %s", pgid)
else:
logging.debug("making us the foreground pgid backgrounding %s", pgid)
os.tcsetpgrp(fd, pid)
except OSError as error:
if error.errno == errno.ENOTTY:
logging.debug("session is no longer associated with controlling tty")
else:
logging.warning("unable to foreground pgid %s: %s", pid, error)
signal.signal(S.SIGTTIN, signal.SIG_DFL)
signal.signal(S.SIGTTOU, signal.SIG_DFL)
def exec_child(exec_args):
# Restore signals to default handling:
for snum in restore_signals:
signal.signal(snum, signal.SIG_DFL)
# Create new process group.
new_process_group()
estring = shlex.join(exec_args)
try:
# and exec the process
logging.debug("child: executing '%s'", estring)
os.execvp(exec_args[0], exec_args)
# NOTREACHED
except Exception as error:
logging.warning("child: unable to execute '%s': %s", estring, error)
raise
def is_creating_pid_namespace():
p1name = subprocess.check_output(
"readlink /proc/self/pid", stderr=subprocess.STDOUT, shell=True
)
p2name = subprocess.check_output(
"readlink /proc/self/pid_for_children", stderr=subprocess.STDOUT, shell=True
)
return p1name != p2name
def restore_namespace(ppid_fd, uflags):
fd = ppid_fd
retry = 3
for i in range(0, retry):
try:
linux.setns(fd, uflags)
except OSError as error:
logging.warning("could not reset to old namespace fd %s: %s", fd, error)
if i == retry - 1:
raise
time.sleep(1)
os.close(fd)
def create_thread_test():
def runthread(name):
logging.info("In thread: %s", name)
logging.info("Create thread")
thread = threading.Thread(target=runthread, args=(1,))
logging.info("Run thread")
thread.start()
logging.info("Join thread")
thread.join()
def run(args):
del args
# We look for this b/c the unshare pid will share with /sibn/init
# nselm = "pid_for_children"
# nsflags.append(f"--pid={pp / nselm}")
# mutini now forks when created this way
# cmd.append("--pid")
# cmd.append("--fork")
# cmd.append("--kill-child")
# cmd.append("--mount-proc")
uflags = linux.CLONE_NEWPID
nslist = ["pid_for_children"]
uflags |= linux.CLONE_NEWNS
nslist.append("mnt")
uflags |= linux.CLONE_NEWNET
nslist.append("net")
# Before values
pid = os.getpid()
nsdict = {x: os.readlink(f"/tmp/mu-global-proc/{pid}/ns/{x}") for x in nslist}
#
# UNSHARE
#
create_thread_test()
ppid = os.getppid()
ppid_fd = linux.pidfd_open(ppid)
linux.unshare(uflags)
# random syscall's fail until we fork a child to establish the new pid namespace.
global child_pid # pylint: disable=global-statement
child_pid = os.fork()
if not child_pid:
logging.info("In child sleeping")
time.sleep(1200)
sys.exit(1)
# verify after values differ
nnsdict = {x: os.readlink(f"/tmp/mu-global-proc/{pid}/ns/{x}") for x in nslist}
assert not {k for k in nsdict if nsdict[k] == nnsdict[k]}
# Remount / and any future mounts below it as private
commander.cmd_raises("mount --make-rprivate /")
# Mount a new /proc in our new namespace
commander.cmd_raises("mount -t proc proc /proc")
#
# In NEW NS
#
cid = os.fork()
if not cid:
logging.info("In second child sleeping")
time.sleep(4)
sys.exit(1)
logging.info("Waiting for second child")
os.waitpid(cid, 0)
try:
create_thread_test()
except Exception as error:
print(error)
#
# RESTORE
#
logging.info("In new namespace, restoring old")
# Make sure we can go back, not sure since this is PID namespace, but maybe
restore_namespace(ppid_fd, uflags)
# verify after values the same
nnsdict = {x: os.readlink(f"/proc/self/ns/{x}") for x in nslist}
assert nsdict == nnsdict
def main():
ap = argparse.ArgumentParser()
ap.add_argument(
"-v", dest="verbose", action="count", default=0, help="More -v's, more verbose"
)
ap.add_argument("rest", nargs=argparse.REMAINDER)
args = ap.parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
if args.verbose > 1:
global very_verbose # pylint: disable=global-statement
very_verbose = True
logging.basicConfig(
level=level, format="%(asctime)s mutini: %(levelname)s: %(message)s"
)
status = 4
try:
run(args)
except KeyboardInterrupt:
logging.info("exiting (main), received KeyboardInterrupt in main")
except Exception as error:
logging.info("exiting (main), unexpected exception %s", error, exc_info=True)
sys.exit(status)
if __name__ == "__main__":
main()

428
tests/topotests/munet/mutini.py Executable file
View file

@ -0,0 +1,428 @@
#!/usr/bin/env python3
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# January 28 2023, Christian Hopps <chopps@labn.net>
#
# Copyright (c) 2023, LabN Consulting, L.L.C.
#
"""A tiny init for namespaces in python inspired by the C program tini."""
# pylint: disable=global-statement
import argparse
import errno
import logging
import os
import re
import shlex
import signal
import subprocess
import sys
from signal import Signals as S
try:
from munet import linux
except ModuleNotFoundError:
# We cannot use relative imports and still run this module directly as a script, and
# there are some use cases where we want to run this file as a script.
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import linux
class g:
"""Global variables for our program."""
child_pid = -1
orig_pid = os.getpid()
exit_signal = False
pid_status_cache = {}
restore_signals = set()
very_verbose = False
unshare_flags = {
"C": linux.CLONE_NEWCGROUP,
"i": linux.CLONE_NEWIPC,
"m": linux.CLONE_NEWNS,
"n": linux.CLONE_NEWNET,
"p": linux.CLONE_NEWPID,
"u": linux.CLONE_NEWUTS,
"T": linux.CLONE_NEWTIME,
}
ignored_signals = {
S.SIGTTIN,
S.SIGTTOU,
}
abort_signals = {
S.SIGABRT,
S.SIGBUS,
S.SIGFPE,
S.SIGILL,
S.SIGKILL,
S.SIGSEGV,
S.SIGSTOP,
S.SIGSYS,
S.SIGTRAP,
}
no_prop_signals = abort_signals | ignored_signals | {S.SIGCHLD}
def vdebug(*args, **kwargs):
if g.very_verbose:
logging.debug(*args, **kwargs)
def get_pid_status_item(status, stat):
m = re.search(rf"(?:^|\n){stat}:\t(.*)(?:\n|$)", status)
return m.group(1).strip() if m else None
def pget_pid_status_item(pid, stat):
if pid not in g.pid_status_cache:
with open(f"/proc/{pid}/status", "r", encoding="utf-8") as f:
g.pid_status_cache[pid] = f.read().strip()
return get_pid_status_item(g.pid_status_cache[pid], stat).strip()
def get_pid_name(pid):
try:
return get_pid_status_item(g.pid_status_cache[pid], "Name")
except Exception:
return str(pid)
# def init_get_child_pids():
# """Return list of "children" pids.
# We consider any process with a 0 parent pid to also be our child as it
# nsentered our pid namespace from an external parent.
# """
# g.pid_status_cache.clear()
# pids = (int(x) for x in os.listdir("/proc") if x.isdigit() and x != "1")
# return (
# x for x in pids if x == g.child_pid or pget_pid_status_item(x, "PPid") == "0"
# )
def exit_with_status(status):
if os.WIFEXITED(status):
ec = os.WEXITSTATUS(status)
elif os.WIFSIGNALED(status):
ec = 0x80 | os.WTERMSIG(status)
else:
ec = 255
logging.debug("exiting with code %s", ec)
sys.exit(ec)
def waitpid(tag):
logging.debug("%s: waitid for exiting process", tag)
idobj = os.waitid(os.P_ALL, 0, os.WEXITED)
pid = idobj.si_pid
status = idobj.si_status
if pid != g.child_pid:
pidname = get_pid_name(pid)
logging.debug(
"%s: reaped zombie %s (%s) w/ status %s", tag, pid, pidname, status
)
return
logging.debug("reaped child with status %s", status)
exit_with_status(status)
# NOTREACHED
def sig_trasmit(signum, _):
signame = signal.Signals(signum).name
if g.child_pid == -1:
# We've received a signal after setting up to be init proc
# but prior to fork or fork returning with child pid
logging.debug("received %s prior to child exec, exiting", signame)
sys.exit(0x80 | signum)
try:
os.kill(g.child_pid, signum)
except OSError as error:
if error.errno != errno.ESRCH:
logging.error(
"error forwarding signal %s to child, exiting: %s", signum, error
)
sys.exit(0x80 | signum)
logging.debug("child pid %s exited prior to signaling", g.child_pid)
def sig_sigchld(signum, _):
assert signum == S.SIGCHLD
try:
waitpid("SIGCHLD")
except ChildProcessError as error:
logging.warning("got SIGCHLD but no pid to wait on: %s", error)
def setup_init_signals():
valid = set(signal.valid_signals())
named = set(x.value for x in signal.Signals)
for snum in sorted(named):
if snum not in valid:
continue
if S.SIGRTMIN <= snum <= S.SIGRTMAX:
continue
sname = signal.Signals(snum).name
if snum == S.SIGCHLD:
vdebug("installing local handler for %s", sname)
signal.signal(snum, sig_sigchld)
g.restore_signals.add(snum)
elif snum in ignored_signals:
vdebug("installing ignore handler for %s", sname)
signal.signal(snum, signal.SIG_IGN)
g.restore_signals.add(snum)
elif snum in abort_signals:
vdebug("leaving default handler for %s", sname)
# signal.signal(snum, signal.SIG_DFL)
else:
vdebug("installing trasmit signal handler for %s", sname)
try:
signal.signal(snum, sig_trasmit)
g.restore_signals.add(snum)
except OSError as error:
logging.warning(
"failed installing signal handler for %s: %s", sname, error
)
def new_process_group():
"""Create and lead a new process group.
This function will create a new process group if we are not yet leading one, and
additionally foreground said process group in our session. This foregrounding
action is copied from tini, and I believe serves a purpose when serving as init
for a container (e.g., podman).
"""
pid = os.getpid()
try:
pgid = os.getpgrp()
if pgid == pid:
logging.debug("already process group leader %s", pgid)
else:
logging.debug("creating new process group %s", pid)
os.setpgid(pid, 0)
except Exception as error:
logging.warning("unable to get new process group: %s", error)
return
# Block these in order to allow foregrounding, otherwise we'd get SIGTTOU blocked
signal.signal(S.SIGTTIN, signal.SIG_IGN)
signal.signal(S.SIGTTOU, signal.SIG_IGN)
fd = sys.stdin.fileno()
if not os.isatty(fd):
logging.debug("stdin not a tty no foregrounding required")
else:
try:
# This will error if our session no longer associated with controlling tty.
pgid = os.tcgetpgrp(fd)
if pgid == pid:
logging.debug("process group already in foreground %s", pgid)
else:
logging.debug("making us the foreground pgid backgrounding %s", pgid)
os.tcsetpgrp(fd, pid)
except OSError as error:
if error.errno == errno.ENOTTY:
logging.debug("session is no longer associated with controlling tty")
else:
logging.warning("unable to foreground pgid %s: %s", pid, error)
signal.signal(S.SIGTTIN, signal.SIG_DFL)
signal.signal(S.SIGTTOU, signal.SIG_DFL)
def is_creating_pid_namespace():
p1name = subprocess.check_output(
"readlink /proc/self/pid", stderr=subprocess.STDOUT, shell=True
)
p2name = subprocess.check_output(
"readlink /proc/self/pid_for_children", stderr=subprocess.STDOUT, shell=True
)
return p1name != p2name
def be_init(new_pg, exec_args):
#
# Arrange for us to be killed when our parent dies, this will subsequently also kill
# all procs in any PID namespace we are init for.
#
logging.debug("set us to be SIGKILLed when parent exits")
linux.set_parent_death_signal(signal.SIGKILL)
# If we are createing a new PID namespace for children...
if g.orig_pid != 1:
logging.debug("started as pid %s", g.orig_pid)
# assert is_creating_pid_namespace()
# Fork to become pid 1
logging.debug("forking to become pid 1")
child_pid = os.fork()
if child_pid:
logging.debug("in parent waiting on child pid %s to exit", child_pid)
status = os.wait()
logging.debug("got child exit status %s", status)
exit_with_status(status)
# NOTREACHED
# We must be pid 1 now.
logging.debug("in child as pid %s", os.getpid())
assert os.getpid() == 1
# We need a new /proc now.
logging.debug("mount new /proc")
linux.mount("proc", "/proc", "proc")
# If the parent exists kill us using SIGKILL
logging.debug("set us to be SIGKILLed when parent exits")
linux.set_parent_death_signal(signal.SIGKILL)
if not exec_args:
if not new_pg:
logging.debug("no exec args, no new process group")
# # if 0 == os.getpgid(0):
# status = os.setpgid(0, 1)
# logging.debug("os.setpgid(0, 1) == %s", status)
else:
logging.debug("no exec args, creating new process group")
# No exec so we are the "child".
new_process_group()
while True:
logging.info("parent: waiting to reap zombies")
linux.pause()
# NOTREACHED
# Set (parent) signal handlers before any fork to avoid race
setup_init_signals()
logging.debug("forking to execute child")
g.child_pid = os.fork()
if g.child_pid == 0:
# In child, restore signals to default handling:
for snum in g.restore_signals:
signal.signal(snum, signal.SIG_DFL)
# XXX is a new pg right?
new_process_group()
logging.debug("child: executing '%s'", shlex.join(exec_args))
os.execvp(exec_args[0], exec_args)
# NOTREACHED
while True:
logging.info("parent: waiting for child pid %s to exit", g.child_pid)
waitpid("parent")
def unshare(flags):
"""Unshare into new namespaces."""
uflags = 0
for flag in flags:
if flag not in unshare_flags:
raise ValueError(f"unknown unshare flag '{flag}'")
uflags |= unshare_flags[flag]
new_pid = bool(uflags & linux.CLONE_NEWPID)
new_mnt = bool(uflags & linux.CLONE_NEWNS)
logging.debug("unshareing with flags: %s", linux.clone_flag_string(uflags))
linux.unshare(uflags)
if new_pid and not new_mnt:
try:
# If we are not creating new mount namspace, remount /proc private
# so that our mount of a new /proc doesn't affect parent namespace
logging.debug("remount /proc recursive private")
linux.mount("none", "/proc", None, linux.MS_REC | linux.MS_PRIVATE)
except OSError as error:
# EINVAL is OK b/c /proc not mounted may cause an error
if error.errno != errno.EINVAL:
raise
if new_mnt:
# Remount root as recursive private.
logging.debug("remount / recursive private")
linux.mount("none", "/", None, linux.MS_REC | linux.MS_PRIVATE)
# if new_pid:
# logging.debug("mount new /proc")
# linux.mount("proc", "/proc", "proc")
return new_pid
def main():
#
# Parse CLI args.
#
ap = argparse.ArgumentParser()
ap.add_argument(
"-P",
"--no-proc-group",
action="store_true",
help="set to inherit the process group",
)
valid_flags = "".join(unshare_flags)
ap.add_argument(
"--unshare-flags",
help=(
f"string of unshare(1) flags. Supported values from '{valid_flags}'."
" 'm' will remount `/` recursive private. 'p' will remount /proc"
" and fork, and the child will be signaled to exit on exit of parent.."
),
)
ap.add_argument(
"-v", dest="verbose", action="count", default=0, help="more -v's, more verbose"
)
ap.add_argument("rest", nargs=argparse.REMAINDER)
args = ap.parse_args()
#
# Setup logging.
#
level = logging.DEBUG if args.verbose else logging.INFO
if args.verbose > 1:
g.very_verbose = True
logging.basicConfig(
level=level, format="%(asctime)s mutini: %(levelname)s: %(message)s"
)
#
# Run program
#
status = 5
try:
new_pid = False
if args.unshare_flags:
new_pid = unshare(args.unshare_flags)
if g.orig_pid != 1 and not new_pid:
# Simply hold the namespaces
while True:
logging.info("holding namespace waiting to be signaled to exit")
linux.pause()
# NOTREACHED
be_init(not args.no_proc_group, args.rest)
# NOTREACHED
logging.critical("Exited from be_init!")
except KeyboardInterrupt:
logging.info("exiting (main), received KeyboardInterrupt in main")
status = 0x80 | signal.SIGINT
except Exception as error:
logging.info("exiting (main), do to exception %s", error, exc_info=True)
sys.exit(status)
if __name__ == "__main__":
main()

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,374 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# September 30 2021, Christian Hopps <chopps@labn.net>
#
# Copyright 2021, LabN Consulting, L.L.C.
#
"""A module that implements the standalone parser."""
import asyncio
import importlib.resources
import json
import logging
import logging.config
import os
import subprocess
import sys
import tempfile
from pathlib import Path
try:
import jsonschema # pylint: disable=C0415
import jsonschema.validators # pylint: disable=C0415
from jsonschema.exceptions import ValidationError # pylint: disable=C0415
except ImportError:
jsonschema = None
from .config import list_to_dict_with_key
from .native import Munet
def get_schema():
if get_schema.schema is None:
with importlib.resources.path("munet", "munet-schema.json") as datapath:
search = [str(datapath.parent)]
get_schema.schema = get_config(basename="munet-schema", search=search)
return get_schema.schema
get_schema.schema = None
project_root_contains = [
".git",
"pyproject.toml",
"tox.ini",
"setup.cfg",
"setup.py",
"pytest.ini",
".projectile",
]
def is_project_root(path: Path) -> bool:
for contains in project_root_contains:
if path.joinpath(contains).exists():
return True
return False
def find_project_root(config_path: Path, project_root=None):
if project_root is not None:
project_root = Path(project_root)
if project_root in config_path.parents:
return project_root
logging.warning(
"project_root %s is not a common ancestor of config file %s",
project_root,
config_path,
)
return config_path.parent
for ppath in config_path.parents:
if is_project_root(ppath):
return ppath
return config_path.parent
def get_config(pathname=None, basename="munet", search=None, logf=logging.debug):
cwd = os.getcwd()
if not search:
search = [cwd]
elif isinstance(search, (str, Path)):
search = [search]
if pathname:
pathname = os.path.join(cwd, pathname)
if not os.path.exists(pathname):
raise FileNotFoundError(pathname)
else:
for d in search:
logf("%s", f'searching in "{d}" for "{basename}".{{yaml, toml, json}}')
for ext in ("yaml", "toml", "json"):
pathname = os.path.join(d, basename + "." + ext)
if os.path.exists(pathname):
logf("%s", f'Found "{pathname}"')
break
else:
continue
break
else:
raise FileNotFoundError(basename + ".{json,toml,yaml} in " + f"{search}")
_, ext = pathname.rsplit(".", 1)
if ext == "json":
config = json.load(open(pathname, encoding="utf-8"))
elif ext == "toml":
import toml # pylint: disable=C0415
config = toml.load(pathname)
elif ext == "yaml":
import yaml # pylint: disable=C0415
config = yaml.safe_load(open(pathname, encoding="utf-8"))
else:
raise ValueError("Filename does not end with (.json|.toml|.yaml)")
config["config_pathname"] = os.path.realpath(pathname)
return config
def setup_logging(args, config_base="logconf"):
# Create rundir and arrange for future commands to run in it.
# Change CWD to the rundir prior to parsing config
old = os.getcwd()
os.chdir(args.rundir)
try:
search = [old]
with importlib.resources.path("munet", config_base + ".yaml") as datapath:
search.append(str(datapath.parent))
def logf(msg, *p, **k):
if args.verbose:
print("PRELOG: " + msg % p, **k, file=sys.stderr)
config = get_config(args.log_config, config_base, search, logf=logf)
pathname = config["config_pathname"]
del config["config_pathname"]
if "info_console" in config["handlers"]:
# mutest case
if args.verbose > 1:
config["handlers"]["console"]["level"] = "DEBUG"
config["handlers"]["info_console"]["level"] = "DEBUG"
elif args.verbose:
config["handlers"]["console"]["level"] = "INFO"
config["handlers"]["info_console"]["level"] = "DEBUG"
elif args.verbose:
# munet case
config["handlers"]["console"]["level"] = "DEBUG"
# add the rundir path to the filenames
for v in config["handlers"].values():
filename = v.get("filename")
if not filename:
continue
v["filename"] = os.path.join(args.rundir, filename)
logging.config.dictConfig(dict(config))
logging.info("Loaded logging config %s", pathname)
return config
finally:
os.chdir(old)
def append_hosts_files(unet, netname):
if not netname:
return
entries = []
for name in ("munet", *list(unet.hosts)):
if name == "munet":
node = unet.switches[netname]
ifname = None
else:
node = unet.hosts[name]
if not hasattr(node, "_intf_addrs"):
continue
ifname = node.get_ifname(netname)
for b in (False, True):
ifaddr = node.get_intf_addr(ifname, ipv6=b)
if ifaddr and hasattr(ifaddr, "ip"):
entries.append((name, ifaddr.ip))
for name in ("munet", *list(unet.hosts)):
node = unet if name == "munet" else unet.hosts[name]
if not hasattr(node, "rundir"):
continue
with open(os.path.join(node.rundir, "hosts.txt"), "a+", encoding="ascii") as hf:
hf.write("\n")
for e in entries:
hf.write(f"{e[1]}\t{e[0]}\n")
def validate_config(config, logger, args):
if jsonschema is None:
logger.debug("No validation w/o jsonschema module")
return True
old = os.getcwd()
if args:
os.chdir(args.rundir)
try:
validator = jsonschema.validators.Draft202012Validator(get_schema())
validator.validate(instance=config)
logger.debug("Validated %s", config["config_pathname"])
return True
except FileNotFoundError as error:
logger.info("No schema found: %s", error)
return False
except ValidationError as error:
logger.info("Validation failed: %s", error)
return False
finally:
if args:
os.chdir(old)
def load_kinds(args, search=None):
# Change CWD to the rundir prior to parsing config
cwd = os.getcwd()
if args:
os.chdir(args.rundir)
args_config = args.kinds_config if args else None
try:
if search is None:
search = [cwd]
with importlib.resources.path("munet", "kinds.yaml") as datapath:
search.append(str(datapath.parent))
configs = []
if args_config:
configs.append(get_config(args_config, "kinds", search=[]))
else:
# prefer directories at the front of the list
for kdir in search:
try:
configs.append(get_config(basename="kinds", search=[kdir]))
except FileNotFoundError:
continue
kinds = {}
for config in configs:
# XXX need to fix the issue with `connections: ["net0"]` not validating
# if jsonschema is not None:
# validator = jsonschema.validators.Draft202012Validator(get_schema())
# validator.validate(instance=config)
kinds_list = config.get("kinds", [])
kinds_dict = list_to_dict_with_key(kinds_list, "name")
if kinds_dict:
logging.info("Loading kinds config from %s", config["config_pathname"])
if "kinds" in kinds:
kinds["kinds"].update(**kinds_dict)
else:
kinds["kinds"] = kinds_dict
cli_list = config.get("cli", {}).get("commands", [])
if cli_list:
logging.info("Loading cli comands from %s", config["config_pathname"])
if "cli" not in kinds:
kinds["cli"] = {}
if "commands" not in kinds["cli"]:
kinds["cli"]["commands"] = []
kinds["cli"]["commands"].extend(cli_list)
return kinds
except FileNotFoundError as error:
# if we have kinds in args but the file doesn't exist, raise the error
if args_config is not None:
raise error
return {}
finally:
if args:
os.chdir(cwd)
async def async_build_topology(
config=None,
logger=None,
rundir=None,
args=None,
unshare_inline=False,
pytestconfig=None,
search_root=None,
top_level_pidns=True,
):
if not rundir:
rundir = tempfile.mkdtemp(prefix="unet")
subprocess.run(f"mkdir -p {rundir} && chmod 755 {rundir}", check=True, shell=True)
isolated = not args.host if args else True
if not config:
config = get_config(basename="munet")
# create search directories from common root if given
cpath = Path(config["config_pathname"]).absolute()
project_root = args.project_root if args else None
if not search_root:
search_root = find_project_root(cpath, project_root)
if not search_root:
search = [cpath.parent]
else:
search_root = Path(search_root).absolute()
if search_root in cpath.parents:
search = list(cpath.parents)
if remcount := len(search_root.parents):
search = search[0:-remcount]
# load kinds along search path and merge into config
kinds = load_kinds(args, search=search)
config_kinds_dict = list_to_dict_with_key(config.get("kinds", []), "name")
config["kinds"] = {**kinds.get("kinds", {}), **config_kinds_dict}
# mere CLI command from kinds into config as well.
kinds_cli_list = kinds.get("cli", {}).get("commands", [])
config_cli_list = config.get("cli", {}).get("commands", [])
if config_cli_list:
if kinds_cli_list:
config_cli_list.extend(list(kinds_cli_list))
elif kinds_cli_list:
if "cli" not in config:
config["cli"] = {}
if "commands" not in config["cli"]:
config["cli"]["commands"] = []
config["cli"]["commands"].extend(list(kinds_cli_list))
unet = Munet(
rundir=rundir,
config=config,
pytestconfig=pytestconfig,
isolated=isolated,
pid=top_level_pidns,
unshare_inline=args.unshare_inline if args else unshare_inline,
logger=logger,
)
try:
await unet._async_build(logger) # pylint: disable=W0212
except Exception as error:
logging.critical("Failure building munet topology: %s", error, exc_info=True)
await unet.async_delete()
raise
except KeyboardInterrupt:
await unet.async_delete()
raise
topoconf = config.get("topology")
if not topoconf:
return unet
dns_network = topoconf.get("dns-network")
if dns_network:
append_hosts_files(unet, dns_network)
# Write our current config to the run directory
with open(f"{unet.rundir}/config.json", "w", encoding="utf-8") as f:
json.dump(unet.config, f, indent=2)
return unet
def build_topology(config=None, logger=None, rundir=None, args=None, pytestconfig=None):
return asyncio.run(async_build_topology(config, logger, rundir, args, pytestconfig))

View file

@ -0,0 +1 @@
"""Sub-package supporting munet use in pytest."""

View file

@ -0,0 +1,447 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# April 22 2022, Christian Hopps <chopps@gmail.com>
#
# Copyright (c) 2022, LabN Consulting, L.L.C
#
"""A module that implements pytest fixtures.
To use in your project, in your conftest.py add:
from munet.testing.fixtures import *
"""
import contextlib
import logging
import os
from pathlib import Path
from typing import Union
import pytest
import pytest_asyncio
from ..base import BaseMunet
from ..base import Bridge
from ..base import get_event_loop
from ..cleanup import cleanup_current
from ..cleanup import cleanup_previous
from ..native import L3NodeMixin
from ..parser import async_build_topology
from ..parser import get_config
from .util import async_pause_test
from .util import pause_test
@contextlib.asynccontextmanager
async def achdir(ndir: Union[str, Path], desc=""):
odir = os.getcwd()
os.chdir(ndir)
if desc:
logging.debug("%s: chdir from %s to %s", desc, odir, ndir)
try:
yield
finally:
if desc:
logging.debug("%s: chdir back from %s to %s", desc, ndir, odir)
os.chdir(odir)
@contextlib.contextmanager
def chdir(ndir: Union[str, Path], desc=""):
odir = os.getcwd()
os.chdir(ndir)
if desc:
logging.debug("%s: chdir from %s to %s", desc, odir, ndir)
try:
yield
finally:
if desc:
logging.debug("%s: chdir back from %s to %s", desc, ndir, odir)
os.chdir(odir)
def get_test_logdir(nodeid=None, module=False):
"""Get log directory relative pathname."""
xdist_worker = os.getenv("PYTEST_XDIST_WORKER", "")
mode = os.getenv("PYTEST_XDIST_MODE", "no")
# nodeid: all_protocol_startup/test_all_protocol_startup.py::test_router_running
# may be missing "::testname" if module is True
if not nodeid:
nodeid = os.environ["PYTEST_CURRENT_TEST"].split(" ")[0]
cur_test = nodeid.replace("[", "_").replace("]", "_")
if module:
idx = cur_test.rfind("::")
path = cur_test if idx == -1 else cur_test[:idx]
testname = ""
else:
path, testname = cur_test.split("::")
testname = testname.replace("/", ".")
path = path[:-3].replace("/", ".")
# We use different logdir paths based on how xdist is running.
if mode == "each":
if module:
return os.path.join(path, "worker-logs", xdist_worker)
return os.path.join(path, testname, xdist_worker)
assert mode in ("no", "load", "loadfile", "loadscope"), f"Unknown dist mode {mode}"
return path if module else os.path.join(path, testname)
def _push_log_handler(desc, logpath):
logpath = os.path.abspath(logpath)
logging.debug("conftest: adding %s logging at %s", desc, logpath)
root_logger = logging.getLogger()
handler = logging.FileHandler(logpath, mode="w")
fmt = logging.Formatter("%(asctime)s %(levelname)5s: %(message)s")
handler.setFormatter(fmt)
root_logger.addHandler(handler)
return handler
def _pop_log_handler(handler):
root_logger = logging.getLogger()
logging.debug("conftest: removing logging handler %s", handler)
root_logger.removeHandler(handler)
@contextlib.contextmanager
def log_handler(desc, logpath):
handler = _push_log_handler(desc, logpath)
try:
yield
finally:
_pop_log_handler(handler)
# =================
# Sessions Fixtures
# =================
@pytest.fixture(autouse=True, scope="session")
def session_autouse():
if "PYTEST_TOPOTEST_WORKER" not in os.environ:
is_worker = False
elif not os.environ["PYTEST_TOPOTEST_WORKER"]:
is_worker = False
else:
is_worker = True
if not is_worker:
# This is unfriendly to multi-instance
cleanup_previous()
# We never pop as we want to keep logging
_push_log_handler("session", "/tmp/unet-test/pytest-session.log")
yield
if not is_worker:
cleanup_current()
# ===============
# Module Fixtures
# ===============
@pytest.fixture(autouse=True, scope="module")
def module_autouse(request):
logpath = get_test_logdir(request.node.name, True)
logpath = os.path.join("/tmp/unet-test", logpath, "pytest-exec.log")
with log_handler("module", logpath):
sdir = os.path.dirname(os.path.realpath(request.fspath))
with chdir(sdir, "module autouse fixture"):
yield
if BaseMunet.g_unet:
raise Exception("Base Munet was not cleaned up/deleted")
@pytest.fixture(scope="module")
def event_loop():
"""Create an instance of the default event loop for the session."""
loop = get_event_loop()
try:
logging.info("event_loop_fixture: yielding with new event loop watcher")
yield loop
finally:
loop.close()
@pytest.fixture(scope="module")
def rundir_module():
d = os.path.join("/tmp/unet-test", get_test_logdir(module=True))
logging.debug("conftest: test module rundir %s", d)
return d
async def _unet_impl(
_rundir, _pytestconfig, unshare=None, top_level_pidns=None, param=None
):
try:
# Default is not to unshare inline if not specified otherwise
unshare_default = False
pidns_default = True
if isinstance(param, (tuple, list)):
pidns_default = bool(param[2]) if len(param) > 2 else True
unshare_default = bool(param[1]) if len(param) > 1 else False
param = str(param[0])
elif isinstance(param, bool):
unshare_default = param
param = None
if unshare is None:
unshare = unshare_default
if top_level_pidns is None:
top_level_pidns = pidns_default
logging.info("unet fixture: basename=%s unshare_inline=%s", param, unshare)
_unet = await async_build_topology(
config=get_config(basename=param) if param else None,
rundir=_rundir,
unshare_inline=unshare,
top_level_pidns=top_level_pidns,
pytestconfig=_pytestconfig,
)
except Exception as error:
logging.debug(
"unet fixture: unet build failed: %s\nparam: %s",
error,
param,
exc_info=True,
)
pytest.skip(
f"unet fixture: unet build failed: {error}", allow_module_level=True
)
raise
try:
tasks = await _unet.run()
except Exception as error:
logging.debug("unet fixture: unet run failed: %s", error, exc_info=True)
await _unet.async_delete()
pytest.skip(f"unet fixture: unet run failed: {error}", allow_module_level=True)
raise
logging.debug("unet fixture: containers running")
# Pytest is supposed to always return even if exceptions
try:
yield _unet
except Exception as error:
logging.error("unet fixture: yield unet unexpected exception: %s", error)
logging.debug("unet fixture: module done, deleting unet")
await _unet.async_delete()
# No one ever awaits these so cancel them
logging.debug("unet fixture: cleanup")
for task in tasks:
task.cancel()
# Reset the class variables so auto number is predictable
logging.debug("unet fixture: resetting ords to 1")
L3NodeMixin.next_ord = 1
Bridge.next_ord = 1
@pytest.fixture(scope="module")
async def unet(request, rundir_module, pytestconfig): # pylint: disable=W0621
"""A unet creating fixutre.
The request param is either the basename of the config file or a tuple of the form:
(basename, unshare, top_level_pidns), with the second and third elements boolean and
optional, defaulting to False, True.
"""
param = request.param if hasattr(request, "param") else None
sdir = os.path.dirname(os.path.realpath(request.fspath))
async with achdir(sdir, "unet fixture"):
async for x in _unet_impl(rundir_module, pytestconfig, param=param):
yield x
@pytest.fixture(scope="module")
async def unet_share(request, rundir_module, pytestconfig): # pylint: disable=W0621
"""A unet creating fixutre.
This share variant keeps munet from unsharing the process to a new namespace so that
root level commands and actions are execute on the host, normally they are executed
in the munet namespace which allowing things like scapy inline in tests to work.
The request param is either the basename of the config file or a tuple of the form:
(basename, top_level_pidns), the second value is a boolean.
"""
param = request.param if hasattr(request, "param") else None
if isinstance(param, (tuple, list)):
param = (param[0], False, param[1])
sdir = os.path.dirname(os.path.realpath(request.fspath))
async with achdir(sdir, "unet_share fixture"):
async for x in _unet_impl(
rundir_module, pytestconfig, unshare=False, param=param
):
yield x
@pytest.fixture(scope="module")
async def unet_unshare(request, rundir_module, pytestconfig): # pylint: disable=W0621
"""A unet creating fixutre.
This unshare variant has the top level munet unshare the process inline so that
root level commands and actions are execute in a new namespace. This allows things
like scapy inline in tests to work.
The request param is either the basename of the config file or a tuple of the form:
(basename, top_level_pidns), the second value is a boolean.
"""
param = request.param if hasattr(request, "param") else None
if isinstance(param, (tuple, list)):
param = (param[0], True, param[1])
sdir = os.path.dirname(os.path.realpath(request.fspath))
async with achdir(sdir, "unet_unshare fixture"):
async for x in _unet_impl(
rundir_module, pytestconfig, unshare=True, param=param
):
yield x
# =================
# Function Fixtures
# =================
@pytest.fixture(autouse=True, scope="function")
async def function_autouse(request):
async with achdir(
os.path.dirname(os.path.realpath(request.fspath)), "func.fixture"
):
yield
@pytest.fixture(autouse=True)
async def check_for_pause(request, pytestconfig):
# When we unshare inline we can't pause in the pytest_runtest_makereport hook
# so do it here.
if BaseMunet.g_unet and BaseMunet.g_unet.unshare_inline:
pause = bool(pytestconfig.getoption("--pause"))
if pause:
await async_pause_test(f"XXX before test '{request.node.name}'")
yield
@pytest.fixture(scope="function")
def stepf(pytestconfig):
class Stepnum:
"""Track the stepnum in closure."""
num = 0
def inc(self):
self.num += 1
pause = pytestconfig.getoption("pause")
stepnum = Stepnum()
def stepfunction(desc=""):
desc = f": {desc}" if desc else ""
if pause:
pause_test(f"before step {stepnum.num}{desc}")
logging.info("STEP %s%s", stepnum.num, desc)
stepnum.inc()
return stepfunction
@pytest_asyncio.fixture(scope="function")
async def astepf(pytestconfig):
class Stepnum:
"""Track the stepnum in closure."""
num = 0
def inc(self):
self.num += 1
pause = pytestconfig.getoption("pause")
stepnum = Stepnum()
async def stepfunction(desc=""):
desc = f": {desc}" if desc else ""
if pause:
await async_pause_test(f"before step {stepnum.num}{desc}")
logging.info("STEP %s%s", stepnum.num, desc)
stepnum.inc()
return stepfunction
@pytest.fixture(scope="function")
def rundir():
d = os.path.join("/tmp/unet-test", get_test_logdir(module=False))
logging.debug("conftest: test function rundir %s", d)
return d
# Configure logging
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
def pytest_runtest_setup(item):
d = os.path.join(
"/tmp/unet-test", get_test_logdir(nodeid=item.nodeid, module=False)
)
config = item.config
logging_plugin = config.pluginmanager.get_plugin("logging-plugin")
filename = Path(d, "pytest-exec.log")
logging_plugin.set_log_path(str(filename))
logging.debug("conftest: test function setup: rundir %s", d)
yield
@pytest.fixture
async def unet_perfunc(request, rundir, pytestconfig): # pylint: disable=W0621
param = request.param if hasattr(request, "param") else None
async for x in _unet_impl(rundir, pytestconfig, param=param):
yield x
@pytest.fixture
async def unet_perfunc_unshare(request, rundir, pytestconfig): # pylint: disable=W0621
"""Build unet per test function with an optional topology basename parameter.
The fixture can be parameterized to choose different config files.
For example, use as follows to run the test with unet_perfunc configured
first with a config file named `cfg1.yaml` then with config file `cfg2.yaml`
(where the actual files could end with `json` or `toml` rather than `yaml`).
@pytest.mark.parametrize(
"unet_perfunc", ["cfg1", "cfg2]", indirect=["unet_perfunc"]
)
def test_example(unet_perfunc)
"""
param = request.param if hasattr(request, "param") else None
async for x in _unet_impl(rundir, pytestconfig, unshare=True, param=param):
yield x
@pytest.fixture
async def unet_perfunc_share(request, rundir, pytestconfig): # pylint: disable=W0621
"""Build unet per test function with an optional topology basename parameter.
This share variant keeps munet from unsharing the process to a new namespace so that
root level commands and actions are execute on the host, normally they are executed
in the munet namespace which allowing things like scapy inline in tests to work.
The fixture can be parameterized to choose different config files. For example, use
as follows to run the test with unet_perfunc configured first with a config file
named `cfg1.yaml` then with config file `cfg2.yaml` (where the actual files could
end with `json` or `toml` rather than `yaml`).
@pytest.mark.parametrize(
"unet_perfunc", ["cfg1", "cfg2]", indirect=["unet_perfunc"]
)
def test_example(unet_perfunc)
"""
param = request.param if hasattr(request, "param") else None
async for x in _unet_impl(rundir, pytestconfig, unshare=False, param=param):
yield x

View file

@ -0,0 +1,225 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# April 22 2022, Christian Hopps <chopps@gmail.com>
#
# Copyright (c) 2022, LabN Consulting, L.L.C
#
"""A module that implements pytest hooks.
To use in your project, in your conftest.py add:
from munet.testing.hooks import *
"""
import logging
import os
import sys
import traceback
import pytest
from ..base import BaseMunet # pylint: disable=import-error
from ..cli import cli # pylint: disable=import-error
from .util import pause_test
# ===================
# Hooks (non-fixture)
# ===================
def pytest_addoption(parser):
parser.addoption(
"--cli-on-error",
action="store_true",
help="CLI on test failure",
)
parser.addoption(
"--coverage",
action="store_true",
help="Enable coverage gathering if supported",
)
parser.addoption(
"--gdb",
default="",
metavar="HOST[,HOST...]",
help="Comma-separated list of nodes to launch gdb on, or 'all'",
)
parser.addoption(
"--gdb-breakpoints",
default="",
metavar="BREAKPOINT[,BREAKPOINT...]",
help="Comma-separated list of breakpoints",
)
parser.addoption(
"--gdb-use-emacs",
action="store_true",
help="Use emacsclient to run gdb instead of a shell",
)
parser.addoption(
"--pcap",
default="",
metavar="NET[,NET...]",
help="Comma-separated list of networks to capture packets on, or 'all'",
)
parser.addoption(
"--pause",
action="store_true",
help="Pause after each test",
)
parser.addoption(
"--pause-at-end",
action="store_true",
help="Pause before taking munet down",
)
parser.addoption(
"--pause-on-error",
action="store_true",
help="Pause after (disables default when --shell or -vtysh given)",
)
parser.addoption(
"--no-pause-on-error",
dest="pause_on_error",
action="store_false",
help="Do not pause after (disables default when --shell or -vtysh given)",
)
parser.addoption(
"--shell",
default="",
metavar="NODE[,NODE...]",
help="Comma-separated list of nodes to spawn shell on, or 'all'",
)
parser.addoption(
"--stdout",
default="",
metavar="NODE[,NODE...]",
help="Comma-separated list of nodes to open tail-f stdout window on, or 'all'",
)
parser.addoption(
"--stderr",
default="",
metavar="NODE[,NODE...]",
help="Comma-separated list of nodes to open tail-f stderr window on, or 'all'",
)
def pytest_configure(config):
if "PYTEST_XDIST_WORKER" not in os.environ:
os.environ["PYTEST_XDIST_MODE"] = config.getoption("dist", "no")
os.environ["PYTEST_IS_WORKER"] = ""
is_xdist = os.environ["PYTEST_XDIST_MODE"] != "no"
is_worker = False
else:
os.environ["PYTEST_IS_WORKER"] = os.environ["PYTEST_XDIST_WORKER"]
is_xdist = True
is_worker = True
# Turn on live logging if user specified verbose and the config has a CLI level set
if config.getoption("--verbose") and not is_xdist and not config.getini("log_cli"):
if config.getoption("--log-cli-level", None) is None:
# By setting the CLI option to the ini value it enables log_cli=1
cli_level = config.getini("log_cli_level")
if cli_level is not None:
config.option.log_cli_level = cli_level
have_tmux = bool(os.getenv("TMUX", ""))
have_screen = not have_tmux and bool(os.getenv("STY", ""))
have_xterm = not have_tmux and not have_screen and bool(os.getenv("DISPLAY", ""))
have_windows = have_tmux or have_screen or have_xterm
have_windows_pause = have_tmux or have_xterm
xdist_no_windows = is_xdist and not is_worker and not have_windows_pause
for winopt in ["--shell", "--stdout", "--stderr"]:
b = config.getoption(winopt)
if b and xdist_no_windows:
pytest.exit(
f"{winopt} use requires byobu/TMUX/XTerm "
f"under dist {os.environ['PYTEST_XDIST_MODE']}"
)
elif b and not is_xdist and not have_windows:
pytest.exit(f"{winopt} use requires byobu/TMUX/SCREEN/XTerm")
def pytest_runtest_makereport(item, call):
"""Pause or invoke CLI as directed by config."""
isatty = sys.stdout.isatty()
pause = bool(item.config.getoption("--pause"))
skipped = False
if call.excinfo is None:
error = False
elif call.excinfo.typename == "Skipped":
skipped = True
error = False
pause = False
else:
error = True
modname = item.parent.module.__name__
exval = call.excinfo.value
logging.error(
"test %s/%s failed: %s: stdout: '%s' stderr: '%s'",
modname,
item.name,
exval,
exval.stdout if hasattr(exval, "stdout") else "NA",
exval.stderr if hasattr(exval, "stderr") else "NA",
)
if not pause:
pause = item.config.getoption("--pause-on-error")
if error and isatty and item.config.getoption("--cli-on-error"):
if not BaseMunet.g_unet:
logging.error("Could not launch CLI b/c no munet exists yet")
else:
print(f"\nCLI-ON-ERROR: {call.excinfo.typename}")
print(f"CLI-ON-ERROR:\ntest {modname}/{item.name} failed: {exval}")
if hasattr(exval, "stdout") and exval.stdout:
print("stdout: " + exval.stdout.replace("\n", "\nstdout: "))
if hasattr(exval, "stderr") and exval.stderr:
print("stderr: " + exval.stderr.replace("\n", "\nstderr: "))
cli(BaseMunet.g_unet)
if pause:
if skipped:
item.skip_more_pause = True
elif hasattr(item, "skip_more_pause"):
pass
elif call.when == "setup":
if error:
item.skip_more_pause = True
# we can't asyncio.run() (which pause does) if we are unhsare_inline
# at this point, count on an autouse fixture to pause instead in this
# case
if not BaseMunet.g_unet or not BaseMunet.g_unet.unshare_inline:
pause_test(f"before test '{item.nodeid}'")
# check for a result to try and catch setup (or module setup) failure
# e.g., after a module level fixture fails, we do not want to pause on every
# skipped test.
elif call.when == "teardown" and call.excinfo:
logging.warning(
"Caught exception during teardown: %s\n:Traceback:\n%s",
call.excinfo,
"".join(traceback.format_tb(call.excinfo.tb)),
)
pause_test(f"after teardown after test '{item.nodeid}'")
elif call.when == "teardown" and call.result:
pause_test(f"after test '{item.nodeid}'")
elif error:
item.skip_more_pause = True
print(f"\nPAUSE-ON-ERROR: {call.excinfo.typename}")
print(f"PAUSE-ON-ERROR:\ntest {modname}/{item.name} failed: {exval}")
if hasattr(exval, "stdout") and exval.stdout:
print("stdout: " + exval.stdout.replace("\n", "\nstdout: "))
if hasattr(exval, "stderr") and exval.stderr:
print("stderr: " + exval.stderr.replace("\n", "\nstderr: "))
pause_test(f"PAUSE-ON-ERROR: '{item.nodeid}'")

View file

@ -0,0 +1,110 @@
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
# SPDX-License-Identifier: GPL-2.0-or-later
#
# April 22 2022, Christian Hopps <chopps@gmail.com>
#
# Copyright (c) 2022, LabN Consulting, L.L.C
#
"""Utility functions useful when using munet testing functionailty in pytest."""
import asyncio
import datetime
import functools
import logging
import sys
import time
from ..base import BaseMunet
from ..cli import async_cli
# =================
# Utility Functions
# =================
async def async_pause_test(desc=""):
isatty = sys.stdout.isatty()
if not isatty:
desc = f" for {desc}" if desc else ""
logging.info("NO PAUSE on non-tty terminal%s", desc)
return
while True:
if desc:
print(f"\n== PAUSING: {desc} ==")
try:
user = input('PAUSED, "cli" for CLI, "pdb" to debug, "Enter" to continue: ')
except EOFError:
print("^D...continuing")
break
user = user.strip()
if user == "cli":
await async_cli(BaseMunet.g_unet)
elif user == "pdb":
breakpoint() # pylint: disable=W1515
elif user:
print(f'Unrecognized input: "{user}"')
else:
break
def pause_test(desc=""):
asyncio.run(async_pause_test(desc))
def retry(retry_timeout, initial_wait=0, expected=True):
"""decorator: retry while functions return is not None or raises an exception.
* `retry_timeout`: Retry for at least this many seconds; after waiting
initial_wait seconds
* `initial_wait`: Sleeps for this many seconds before first executing function
* `expected`: if False then the return logic is inverted, except for exceptions,
(i.e., a non None ends the retry loop, and returns that value)
"""
def _retry(func):
@functools.wraps(func)
def func_retry(*args, **kwargs):
retry_sleep = 2
# Allow the wrapped function's args to override the fixtures
_retry_timeout = kwargs.pop("retry_timeout", retry_timeout)
_expected = kwargs.pop("expected", expected)
_initial_wait = kwargs.pop("initial_wait", initial_wait)
retry_until = datetime.datetime.now() + datetime.timedelta(
seconds=_retry_timeout + _initial_wait
)
if initial_wait > 0:
logging.info("Waiting for [%s]s as initial delay", initial_wait)
time.sleep(initial_wait)
while True:
seconds_left = (retry_until - datetime.datetime.now()).total_seconds()
try:
ret = func(*args, **kwargs)
if _expected and ret is None:
logging.debug("Function succeeds")
return ret
logging.debug("Function returned %s", ret)
except Exception as error:
logging.info("Function raised exception: %s", str(error))
ret = error
if seconds_left < 0:
logging.info("Retry timeout of %ds reached", _retry_timeout)
if isinstance(ret, Exception):
raise ret
return ret
logging.info(
"Sleeping %ds until next retry with %.1f retry time left",
retry_sleep,
seconds_left,
)
time.sleep(retry_sleep)
func_retry._original = func # pylint: disable=W0212
return func_retry
return _retry

View file

@ -24,7 +24,7 @@ log_file_date_format = %Y-%m-%d %H:%M:%S
junit_logging = all
junit_log_passing_tests = true
norecursedirs = .git example_test example_topojson_test lib docker
norecursedirs = .git example_test example_topojson_test lib munet docker
# Directory to store test results and run logs in, default shown
# rundir = /tmp/topotests