2023-04-19 10:55:04 +02:00
|
|
|
# -*- coding: utf-8 eval: (blacken-mode 1) -*-
|
2017-06-15 05:25:54 +02:00
|
|
|
"""
|
|
|
|
Topotest conftest.py file.
|
|
|
|
"""
|
2022-07-14 17:33:39 +02:00
|
|
|
# pylint: disable=consider-using-f-string
|
2017-06-15 05:25:54 +02:00
|
|
|
|
2023-05-27 18:11:48 +02:00
|
|
|
import contextlib
|
2021-06-12 11:07:24 +02:00
|
|
|
import glob
|
2023-04-23 14:25:51 +02:00
|
|
|
import logging
|
2021-03-04 03:56:46 +01:00
|
|
|
import os
|
2021-06-12 11:07:24 +02:00
|
|
|
import re
|
2023-03-24 14:06:38 +01:00
|
|
|
import resource
|
2021-09-07 21:44:58 +02:00
|
|
|
import subprocess
|
2021-07-27 01:23:20 +02:00
|
|
|
import sys
|
|
|
|
import time
|
2023-05-27 18:11:48 +02:00
|
|
|
from pathlib import Path
|
2021-03-04 03:56:46 +01:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
import lib.fixtures
|
2023-04-19 10:20:45 +02:00
|
|
|
import pytest
|
2023-04-24 15:22:42 +02:00
|
|
|
from lib.micronet_compat import Mininet
|
2021-07-27 01:23:20 +02:00
|
|
|
from lib.topogen import diagnose_env, get_topogen
|
2023-04-23 14:25:51 +02:00
|
|
|
from lib.topolog import get_test_logdir, logger
|
2021-07-27 01:23:20 +02:00
|
|
|
from lib.topotest import json_cmp_result
|
2023-04-19 10:20:45 +02:00
|
|
|
from munet import cli
|
2023-03-24 14:06:38 +01:00
|
|
|
from munet.base import Commander, proc_error
|
|
|
|
from munet.cleanup import cleanup_current, cleanup_previous
|
2023-04-24 15:22:42 +02:00
|
|
|
from munet.config import ConfigOptionsProxy
|
2023-04-19 10:55:04 +02:00
|
|
|
from munet.testing.util import pause_test
|
2023-04-19 10:20:45 +02:00
|
|
|
|
2023-04-19 10:55:04 +02:00
|
|
|
from lib import topolog, topotest
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2023-04-23 14:25:51 +02:00
|
|
|
try:
|
|
|
|
# Used by munet native tests
|
|
|
|
from munet.testing.fixtures import event_loop, unet # pylint: disable=all # noqa
|
|
|
|
|
|
|
|
@pytest.fixture(scope="module")
|
|
|
|
def rundir_module(pytestconfig):
|
|
|
|
d = os.path.join(pytestconfig.option.rundir, get_test_logdir())
|
|
|
|
logging.debug("rundir_module: test module rundir %s", d)
|
|
|
|
return d
|
|
|
|
|
|
|
|
except (AttributeError, ImportError):
|
|
|
|
pass
|
|
|
|
|
2021-04-08 19:04:26 +02:00
|
|
|
|
2023-05-27 18:11:48 +02:00
|
|
|
# Remove this and use munet version when we move to pytest_asyncio
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def chdir(ndir, desc=""):
|
|
|
|
odir = os.getcwd()
|
|
|
|
os.chdir(ndir)
|
|
|
|
if desc:
|
|
|
|
logging.debug("%s: chdir from %s to %s", desc, odir, ndir)
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
if desc:
|
|
|
|
logging.debug("%s: chdir back from %s to %s", desc, ndir, odir)
|
|
|
|
os.chdir(odir)
|
|
|
|
|
|
|
|
|
|
|
|
@contextlib.contextmanager
|
|
|
|
def log_handler(basename, logpath):
|
|
|
|
topolog.logstart(basename, logpath)
|
|
|
|
try:
|
|
|
|
yield
|
|
|
|
finally:
|
|
|
|
topolog.logfinish(basename, logpath)
|
|
|
|
|
|
|
|
|
2017-06-15 05:25:54 +02:00
|
|
|
def pytest_addoption(parser):
|
|
|
|
"""
|
|
|
|
Add topology-only option to the topology tester. This option makes pytest
|
|
|
|
only run the setup_module() to setup the topology without running any tests.
|
|
|
|
"""
|
2021-07-09 11:22:51 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--asan-abort",
|
|
|
|
action="store_true",
|
|
|
|
help="Configure address sanitizer to abort process on error",
|
|
|
|
)
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--cli-on-error",
|
|
|
|
action="store_true",
|
|
|
|
help="Mininet cli on test failure",
|
|
|
|
)
|
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
parser.addoption(
|
|
|
|
"--gdb-breakpoints",
|
|
|
|
metavar="SYMBOL[,SYMBOL...]",
|
|
|
|
help="Comma-separated list of functions to set gdb breakpoints on",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--gdb-daemons",
|
|
|
|
metavar="DAEMON[,DAEMON...]",
|
|
|
|
help="Comma-separated list of daemons to spawn gdb on, or 'all'",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--gdb-routers",
|
|
|
|
metavar="ROUTER[,ROUTER...]",
|
|
|
|
help="Comma-separated list of routers to spawn gdb on, or 'all'",
|
|
|
|
)
|
|
|
|
|
2023-10-23 11:10:50 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--gdb-use-emacs",
|
|
|
|
action="store_true",
|
|
|
|
help="Use emacsclient to run gdb instead of a shell",
|
|
|
|
)
|
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
parser.addoption(
|
2023-04-19 06:48:11 +02:00
|
|
|
"--logd",
|
|
|
|
action="append",
|
|
|
|
metavar="DAEMON[,ROUTER[,...]",
|
|
|
|
help=(
|
2023-04-24 03:53:44 +02:00
|
|
|
"Tail-F the DAEMON log file on all or a subset of ROUTERs."
|
|
|
|
" Option can be given multiple times."
|
2023-04-19 06:48:11 +02:00
|
|
|
),
|
|
|
|
)
|
|
|
|
|
2022-10-03 02:31:31 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--memleaks",
|
|
|
|
action="store_true",
|
|
|
|
help="Report memstat results as errors",
|
|
|
|
)
|
|
|
|
|
2023-04-19 06:48:11 +02:00
|
|
|
parser.addoption(
|
2021-07-27 01:23:20 +02:00
|
|
|
"--pause",
|
2021-03-04 03:56:46 +01:00
|
|
|
action="store_true",
|
2021-07-27 01:23:20 +02:00
|
|
|
help="Pause after each test",
|
2021-03-04 03:56:46 +01:00
|
|
|
)
|
|
|
|
|
2023-04-19 06:40:48 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--pause-at-end",
|
|
|
|
action="store_true",
|
|
|
|
help="Pause before taking munet down",
|
|
|
|
)
|
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
parser.addoption(
|
2021-07-27 01:23:20 +02:00
|
|
|
"--pause-on-error",
|
2021-03-04 03:56:46 +01:00
|
|
|
action="store_true",
|
2021-07-27 01:23:20 +02:00
|
|
|
help="Do not pause after (disables default when --shell or -vtysh given)",
|
2021-03-04 03:56:46 +01:00
|
|
|
)
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--no-pause-on-error",
|
|
|
|
dest="pause_on_error",
|
|
|
|
action="store_false",
|
|
|
|
help="Do not pause after (disables default when --shell or -vtysh given)",
|
|
|
|
)
|
|
|
|
|
2023-04-19 06:40:48 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--pcap",
|
|
|
|
default="",
|
|
|
|
metavar="NET[,NET...]",
|
|
|
|
help="Comma-separated list of networks to capture packets on, or 'all'",
|
|
|
|
)
|
|
|
|
|
2023-04-24 03:53:44 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--perf",
|
|
|
|
action="append",
|
|
|
|
metavar="DAEMON[,ROUTER[,...]",
|
|
|
|
help=(
|
|
|
|
"Collect performance data from given DAEMON on all or a subset of ROUTERs."
|
|
|
|
" Option can be given multiple times."
|
|
|
|
),
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--perf-options",
|
|
|
|
metavar="OPTS",
|
|
|
|
default="-g",
|
|
|
|
help="Options to pass to `perf record`.",
|
|
|
|
)
|
|
|
|
|
2021-09-03 14:47:30 +02:00
|
|
|
rundir_help = "directory for running in and log files"
|
2021-07-27 01:23:20 +02:00
|
|
|
parser.addini("rundir", rundir_help, default="/tmp/topotests")
|
|
|
|
parser.addoption("--rundir", metavar="DIR", help=rundir_help)
|
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
parser.addoption(
|
|
|
|
"--shell",
|
|
|
|
metavar="ROUTER[,ROUTER...]",
|
|
|
|
help="Comma-separated list of routers to spawn shell on, or 'all'",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--shell-on-error",
|
|
|
|
action="store_true",
|
|
|
|
help="Spawn shell on all routers on test failure",
|
|
|
|
)
|
|
|
|
|
2021-07-09 11:22:51 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--strace-daemons",
|
|
|
|
metavar="DAEMON[,DAEMON...]",
|
|
|
|
help="Comma-separated list of daemons to strace, or 'all'",
|
|
|
|
)
|
|
|
|
|
2017-06-15 05:25:54 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--topology-only",
|
|
|
|
action="store_true",
|
2021-03-23 23:15:58 +01:00
|
|
|
default=False,
|
2017-06-15 05:25:54 +02:00
|
|
|
help="Only set up this topology, don't run tests",
|
|
|
|
)
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2021-06-12 11:07:24 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--valgrind-extra",
|
|
|
|
action="store_true",
|
|
|
|
help="Generate suppression file, and enable more precise (slower) valgrind checks",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--valgrind-memleaks",
|
|
|
|
action="store_true",
|
|
|
|
help="Run all daemons under valgrind for memleak detection",
|
|
|
|
)
|
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
parser.addoption(
|
|
|
|
"--vtysh",
|
|
|
|
metavar="ROUTER[,ROUTER...]",
|
|
|
|
help="Comma-separated list of routers to spawn vtysh on, or 'all'",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--vtysh-on-error",
|
|
|
|
action="store_true",
|
|
|
|
help="Spawn vtysh on all routers on test failure",
|
|
|
|
)
|
|
|
|
|
2017-06-15 05:25:54 +02:00
|
|
|
|
2022-10-03 02:31:31 +02:00
|
|
|
def check_for_valgrind_memleaks():
|
2023-04-19 10:55:04 +02:00
|
|
|
assert topotest.g_pytest_config.option.valgrind_memleaks
|
2021-06-12 11:07:24 +02:00
|
|
|
|
|
|
|
leaks = []
|
2022-07-14 17:33:39 +02:00
|
|
|
tgen = get_topogen() # pylint: disable=redefined-outer-name
|
2021-06-12 11:07:24 +02:00
|
|
|
latest = []
|
|
|
|
existing = []
|
|
|
|
if tgen is not None:
|
2021-07-27 01:23:20 +02:00
|
|
|
logdir = tgen.logdir
|
2021-06-12 11:07:24 +02:00
|
|
|
if hasattr(tgen, "valgrind_existing_files"):
|
|
|
|
existing = tgen.valgrind_existing_files
|
|
|
|
latest = glob.glob(os.path.join(logdir, "*.valgrind.*"))
|
2022-07-14 17:33:39 +02:00
|
|
|
latest = [x for x in latest if "core" not in x]
|
2021-06-12 11:07:24 +02:00
|
|
|
|
2021-09-21 02:39:09 +02:00
|
|
|
daemons = set()
|
2021-06-12 11:07:24 +02:00
|
|
|
for vfile in latest:
|
|
|
|
if vfile in existing:
|
|
|
|
continue
|
2022-07-14 17:33:39 +02:00
|
|
|
# do not consider memleaks from parent fork (i.e., owned by root)
|
|
|
|
if os.stat(vfile).st_uid == 0:
|
|
|
|
existing.append(vfile) # do not check again
|
|
|
|
logger.debug("Skipping valgrind file %s owned by root", vfile)
|
|
|
|
continue
|
|
|
|
logger.debug("Checking valgrind file %s not owned by root", vfile)
|
2021-09-21 02:39:09 +02:00
|
|
|
with open(vfile, encoding="ascii") as vf:
|
2021-06-12 11:07:24 +02:00
|
|
|
vfcontent = vf.read()
|
|
|
|
match = re.search(r"ERROR SUMMARY: (\d+) errors", vfcontent)
|
2022-07-14 17:33:39 +02:00
|
|
|
if match:
|
|
|
|
existing.append(vfile) # have summary don't check again
|
2021-06-12 11:07:24 +02:00
|
|
|
if match and match.group(1) != "0":
|
2021-07-27 01:23:20 +02:00
|
|
|
emsg = "{} in {}".format(match.group(1), vfile)
|
2021-06-12 11:07:24 +02:00
|
|
|
leaks.append(emsg)
|
2022-07-14 17:33:39 +02:00
|
|
|
daemon = re.match(r".*\.valgrind\.(.*)\.\d+", vfile).group(1)
|
|
|
|
daemons.add("{}({})".format(daemon, match.group(1)))
|
2021-09-21 02:39:09 +02:00
|
|
|
|
|
|
|
if tgen is not None:
|
|
|
|
tgen.valgrind_existing_files = existing
|
2021-06-12 11:07:24 +02:00
|
|
|
|
|
|
|
if leaks:
|
2021-09-21 02:39:09 +02:00
|
|
|
logger.error("valgrind memleaks found:\n\t%s", "\n\t".join(leaks))
|
|
|
|
pytest.fail("valgrind memleaks found for daemons: " + " ".join(daemons))
|
2021-06-12 11:07:24 +02:00
|
|
|
|
|
|
|
|
2022-10-03 02:31:31 +02:00
|
|
|
def check_for_memleaks():
|
|
|
|
leaks = []
|
|
|
|
tgen = get_topogen() # pylint: disable=redefined-outer-name
|
|
|
|
latest = []
|
|
|
|
existing = []
|
|
|
|
if tgen is not None:
|
|
|
|
logdir = tgen.logdir
|
|
|
|
if hasattr(tgen, "memstat_existing_files"):
|
|
|
|
existing = tgen.memstat_existing_files
|
|
|
|
latest = glob.glob(os.path.join(logdir, "*/*.err"))
|
|
|
|
|
|
|
|
daemons = []
|
|
|
|
for vfile in latest:
|
|
|
|
if vfile in existing:
|
|
|
|
continue
|
|
|
|
with open(vfile, encoding="ascii") as vf:
|
|
|
|
vfcontent = vf.read()
|
|
|
|
num = vfcontent.count("memstats:")
|
|
|
|
if num:
|
|
|
|
existing.append(vfile) # have summary don't check again
|
|
|
|
emsg = "{} types in {}".format(num, vfile)
|
|
|
|
leaks.append(emsg)
|
|
|
|
daemon = re.match(r".*test[a-z_A-Z0-9\+]*/(.*)\.err", vfile).group(1)
|
|
|
|
daemons.append("{}({})".format(daemon, num))
|
|
|
|
|
|
|
|
if tgen is not None:
|
|
|
|
tgen.memstat_existing_files = existing
|
|
|
|
|
|
|
|
if leaks:
|
|
|
|
logger.error("memleaks found:\n\t%s", "\n\t".join(leaks))
|
|
|
|
pytest.fail("memleaks found for daemons: " + " ".join(daemons))
|
|
|
|
|
|
|
|
|
2023-06-29 21:43:47 +02:00
|
|
|
def check_for_core_dumps():
|
|
|
|
tgen = get_topogen() # pylint: disable=redefined-outer-name
|
2023-07-15 06:04:05 +02:00
|
|
|
if not tgen:
|
|
|
|
return
|
2023-06-29 21:43:47 +02:00
|
|
|
|
2023-07-15 06:04:05 +02:00
|
|
|
if not hasattr(tgen, "existing_core_files"):
|
|
|
|
tgen.existing_core_files = set()
|
|
|
|
existing = tgen.existing_core_files
|
2023-06-29 21:43:47 +02:00
|
|
|
|
2023-07-15 06:04:05 +02:00
|
|
|
cores = glob.glob(os.path.join(tgen.logdir, "*/*.dmp"))
|
|
|
|
latest = {x for x in cores if x not in existing}
|
|
|
|
if latest:
|
|
|
|
existing |= latest
|
|
|
|
tgen.existing_core_files = existing
|
|
|
|
|
|
|
|
emsg = "New core[s] found: " + ", ".join(latest)
|
|
|
|
logger.error(emsg)
|
|
|
|
pytest.fail(emsg)
|
2023-06-29 21:43:47 +02:00
|
|
|
|
|
|
|
|
2023-06-29 21:12:57 +02:00
|
|
|
def check_for_backtraces():
|
|
|
|
tgen = get_topogen() # pylint: disable=redefined-outer-name
|
2023-07-15 06:04:05 +02:00
|
|
|
if not tgen:
|
|
|
|
return
|
2023-06-29 21:12:57 +02:00
|
|
|
|
2023-07-15 06:04:05 +02:00
|
|
|
if not hasattr(tgen, "existing_backtrace_files"):
|
|
|
|
tgen.existing_backtrace_files = {}
|
|
|
|
existing = tgen.existing_backtrace_files
|
|
|
|
|
|
|
|
latest = glob.glob(os.path.join(tgen.logdir, "*/*.log"))
|
|
|
|
backtraces = []
|
2023-06-29 21:12:57 +02:00
|
|
|
for vfile in latest:
|
|
|
|
with open(vfile, encoding="ascii") as vf:
|
|
|
|
vfcontent = vf.read()
|
2023-07-15 06:04:05 +02:00
|
|
|
btcount = vfcontent.count("Backtrace:")
|
|
|
|
if not btcount:
|
|
|
|
continue
|
|
|
|
if vfile not in existing:
|
|
|
|
existing[vfile] = 0
|
|
|
|
if btcount == existing[vfile]:
|
|
|
|
continue
|
|
|
|
existing[vfile] = btcount
|
|
|
|
backtraces.append(vfile)
|
2023-06-29 21:12:57 +02:00
|
|
|
|
|
|
|
if backtraces:
|
2023-07-15 06:04:05 +02:00
|
|
|
emsg = "New backtraces found in: " + ", ".join(backtraces)
|
|
|
|
logger.error(emsg)
|
|
|
|
pytest.fail(emsg)
|
2023-06-29 21:12:57 +02:00
|
|
|
|
|
|
|
|
2023-05-27 18:11:48 +02:00
|
|
|
@pytest.fixture(autouse=True, scope="module")
|
|
|
|
def module_autouse(request):
|
|
|
|
basename = get_test_logdir(request.node.nodeid, True)
|
|
|
|
logdir = Path(topotest.g_pytest_config.option.rundir) / basename
|
|
|
|
logpath = logdir / "exec.log"
|
|
|
|
|
|
|
|
subprocess.check_call("mkdir -p -m 1777 {}".format(logdir), shell=True)
|
|
|
|
|
|
|
|
with log_handler(basename, logpath):
|
|
|
|
sdir = os.path.dirname(os.path.realpath(request.fspath))
|
|
|
|
with chdir(sdir, "module autouse fixture"):
|
|
|
|
yield
|
|
|
|
|
|
|
|
|
2022-07-14 17:33:39 +02:00
|
|
|
@pytest.fixture(autouse=True, scope="module")
|
|
|
|
def module_check_memtest(request):
|
|
|
|
yield
|
2023-04-19 10:55:04 +02:00
|
|
|
if request.config.option.valgrind_memleaks:
|
2022-10-03 02:31:31 +02:00
|
|
|
if get_topogen() is not None:
|
|
|
|
check_for_valgrind_memleaks()
|
|
|
|
if request.config.option.memleaks:
|
2022-07-14 17:33:39 +02:00
|
|
|
if get_topogen() is not None:
|
|
|
|
check_for_memleaks()
|
|
|
|
|
|
|
|
|
2023-05-27 18:11:48 +02:00
|
|
|
#
|
|
|
|
# Disable per test function logging as FRR CI system can't handle it.
|
|
|
|
#
|
|
|
|
# @pytest.fixture(autouse=True, scope="function")
|
|
|
|
# def function_autouse(request):
|
|
|
|
# # For tests we actually use the logdir name as the logfile base
|
|
|
|
# logbase = get_test_logdir(nodeid=request.node.nodeid, module=False)
|
|
|
|
# logbase = os.path.join(topotest.g_pytest_config.option.rundir, logbase)
|
|
|
|
# logpath = Path(logbase)
|
|
|
|
# path = Path(f"{logpath.parent}/exec-{logpath.name}.log")
|
|
|
|
# subprocess.check_call("mkdir -p -m 1777 {}".format(logpath.parent), shell=True)
|
|
|
|
# with log_handler(request.node.nodeid, path):
|
|
|
|
# yield
|
2021-07-27 01:23:20 +02:00
|
|
|
|
|
|
|
|
2021-09-21 02:39:09 +02:00
|
|
|
@pytest.hookimpl(hookwrapper=True)
|
|
|
|
def pytest_runtest_call(item: pytest.Item) -> None:
|
|
|
|
"Hook the function that is called to execute the test."
|
|
|
|
|
|
|
|
# For topology only run the CLI then exit
|
2023-04-19 10:55:04 +02:00
|
|
|
if item.config.option.topology_only:
|
2021-09-21 02:39:09 +02:00
|
|
|
get_topogen().cli()
|
|
|
|
pytest.exit("exiting after --topology-only")
|
2017-06-29 15:49:11 +02:00
|
|
|
|
2021-09-21 02:39:09 +02:00
|
|
|
# Let the default pytest_runtest_call execute the test function
|
|
|
|
yield
|
|
|
|
|
2023-07-15 06:04:05 +02:00
|
|
|
check_for_backtraces()
|
|
|
|
check_for_core_dumps()
|
|
|
|
|
2021-09-21 02:39:09 +02:00
|
|
|
# Check for leaks if requested
|
2023-04-19 10:55:04 +02:00
|
|
|
if item.config.option.valgrind_memleaks:
|
2022-10-03 02:31:31 +02:00
|
|
|
check_for_valgrind_memleaks()
|
2023-07-15 06:04:05 +02:00
|
|
|
|
2022-10-03 02:31:31 +02:00
|
|
|
if item.config.option.memleaks:
|
2021-09-21 02:39:09 +02:00
|
|
|
check_for_memleaks()
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-06-29 17:18:46 +02:00
|
|
|
|
|
|
|
def pytest_assertrepr_compare(op, left, right):
|
|
|
|
"""
|
|
|
|
Show proper assertion error message for json_cmp results.
|
|
|
|
"""
|
2021-03-04 03:56:46 +01:00
|
|
|
del op
|
|
|
|
|
2017-06-29 17:18:46 +02:00
|
|
|
json_result = left
|
|
|
|
if not isinstance(json_result, json_cmp_result):
|
|
|
|
json_result = right
|
|
|
|
if not isinstance(json_result, json_cmp_result):
|
|
|
|
return None
|
|
|
|
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
return json_result.gen_report()
|
2017-07-18 21:44:27 +02:00
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-07-18 21:44:27 +02:00
|
|
|
def pytest_configure(config):
|
2021-03-04 03:56:46 +01:00
|
|
|
"""
|
|
|
|
Assert that the environment is correctly configured, and get extra config.
|
|
|
|
"""
|
2023-04-19 10:55:04 +02:00
|
|
|
topotest.g_pytest_config = ConfigOptionsProxy(config)
|
2019-05-02 20:43:18 +02:00
|
|
|
|
2022-09-06 17:43:26 +02:00
|
|
|
if config.getoption("--collect-only"):
|
|
|
|
return
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
if "PYTEST_XDIST_WORKER" not in os.environ:
|
|
|
|
os.environ["PYTEST_XDIST_MODE"] = config.getoption("dist", "no")
|
|
|
|
os.environ["PYTEST_TOPOTEST_WORKER"] = ""
|
|
|
|
is_xdist = os.environ["PYTEST_XDIST_MODE"] != "no"
|
|
|
|
is_worker = False
|
2023-05-27 18:11:48 +02:00
|
|
|
wname = ""
|
2021-07-27 01:23:20 +02:00
|
|
|
else:
|
2023-05-27 18:11:48 +02:00
|
|
|
wname = os.environ["PYTEST_XDIST_WORKER"]
|
|
|
|
os.environ["PYTEST_TOPOTEST_WORKER"] = wname
|
2021-07-27 01:23:20 +02:00
|
|
|
is_xdist = True
|
|
|
|
is_worker = True
|
|
|
|
|
2021-11-17 14:51:14 +01:00
|
|
|
resource.setrlimit(
|
|
|
|
resource.RLIMIT_CORE, (resource.RLIM_INFINITY, resource.RLIM_INFINITY)
|
|
|
|
)
|
2021-07-27 01:23:20 +02:00
|
|
|
# -----------------------------------------------------
|
|
|
|
# Set some defaults for the pytest.ini [pytest] section
|
|
|
|
# ---------------------------------------------------
|
|
|
|
|
2023-04-19 10:55:04 +02:00
|
|
|
rundir = config.option.rundir
|
2021-07-27 01:23:20 +02:00
|
|
|
if not rundir:
|
|
|
|
rundir = config.getini("rundir")
|
|
|
|
if not rundir:
|
|
|
|
rundir = "/tmp/topotests"
|
2023-04-19 10:55:04 +02:00
|
|
|
config.option.rundir = rundir
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
if not config.getoption("--junitxml"):
|
|
|
|
config.option.xmlpath = os.path.join(rundir, "topotests.xml")
|
|
|
|
xmlpath = config.option.xmlpath
|
|
|
|
|
|
|
|
# Save an existing topotest.xml
|
|
|
|
if os.path.exists(xmlpath):
|
|
|
|
fmtime = time.localtime(os.path.getmtime(xmlpath))
|
|
|
|
suffix = "-" + time.strftime("%Y%m%d%H%M%S", fmtime)
|
|
|
|
commander = Commander("pytest")
|
|
|
|
mv_path = commander.get_exec_path("mv")
|
|
|
|
commander.cmd_status([mv_path, xmlpath, xmlpath + suffix])
|
|
|
|
|
|
|
|
# Set the log_file (exec) to inside the rundir if not specified
|
|
|
|
if not config.getoption("--log-file") and not config.getini("log_file"):
|
|
|
|
config.option.log_file = os.path.join(rundir, "exec.log")
|
|
|
|
|
2023-05-27 18:11:48 +02:00
|
|
|
# Handle pytest-xdist each worker get's it's own top level log file
|
|
|
|
# `exec-worker-N.log`
|
|
|
|
if wname:
|
|
|
|
wname = wname.replace("gw", "worker-")
|
|
|
|
cpath = Path(config.option.log_file).absolute()
|
|
|
|
config.option.log_file = f"{cpath.parent}/{cpath.stem}-{wname}{cpath.suffix}"
|
|
|
|
elif is_xdist:
|
|
|
|
cpath = Path(config.option.log_file).absolute()
|
|
|
|
config.option.log_file = f"{cpath.parent}/{cpath.stem}-xdist{cpath.suffix}"
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
# Turn on live logging if user specified verbose and the config has a CLI level set
|
|
|
|
if config.getoption("--verbose") and not is_xdist and not config.getini("log_cli"):
|
|
|
|
if config.getoption("--log-cli-level", None) is None:
|
|
|
|
# By setting the CLI option to the ini value it enables log_cli=1
|
|
|
|
cli_level = config.getini("log_cli_level")
|
|
|
|
if cli_level is not None:
|
|
|
|
config.option.log_cli_level = cli_level
|
2021-09-06 00:59:26 +02:00
|
|
|
|
2021-09-07 21:44:58 +02:00
|
|
|
have_tmux = bool(os.getenv("TMUX", ""))
|
|
|
|
have_screen = not have_tmux and bool(os.getenv("STY", ""))
|
|
|
|
have_xterm = not have_tmux and not have_screen and bool(os.getenv("DISPLAY", ""))
|
|
|
|
have_windows = have_tmux or have_screen or have_xterm
|
|
|
|
have_windows_pause = have_tmux or have_xterm
|
|
|
|
xdist_no_windows = is_xdist and not is_worker and not have_windows_pause
|
|
|
|
|
|
|
|
def assert_feature_windows(b, feature):
|
|
|
|
if b and xdist_no_windows:
|
|
|
|
pytest.exit(
|
|
|
|
"{} use requires byobu/TMUX/XTerm under dist {}".format(
|
|
|
|
feature, os.environ["PYTEST_XDIST_MODE"]
|
|
|
|
)
|
|
|
|
)
|
|
|
|
elif b and not is_xdist and not have_windows:
|
|
|
|
pytest.exit("{} use requires byobu/TMUX/SCREEN/XTerm".format(feature))
|
|
|
|
|
2023-04-19 10:55:04 +02:00
|
|
|
#
|
|
|
|
# Check for window capability if given options that require window
|
|
|
|
#
|
|
|
|
assert_feature_windows(config.option.gdb_routers, "GDB")
|
|
|
|
assert_feature_windows(config.option.gdb_daemons, "GDB")
|
|
|
|
assert_feature_windows(config.option.cli_on_error, "--cli-on-error")
|
|
|
|
assert_feature_windows(config.option.shell, "--shell")
|
|
|
|
assert_feature_windows(config.option.shell_on_error, "--shell-on-error")
|
|
|
|
assert_feature_windows(config.option.vtysh, "--vtysh")
|
|
|
|
assert_feature_windows(config.option.vtysh_on_error, "--vtysh-on-error")
|
|
|
|
|
|
|
|
if config.option.topology_only and is_xdist:
|
2021-09-21 02:39:09 +02:00
|
|
|
pytest.exit("Cannot use --topology-only with distributed test mode")
|
2019-05-02 20:43:18 +02:00
|
|
|
|
2022-10-03 02:31:31 +02:00
|
|
|
pytest.exit("Cannot use --topology-only with distributed test mode")
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
# Check environment now that we have config
|
|
|
|
if not diagnose_env(rundir):
|
2022-08-20 20:45:06 +02:00
|
|
|
pytest.exit("environment has errors, please read the logs in %s" % rundir)
|
2021-07-27 01:23:20 +02:00
|
|
|
|
2022-10-03 02:31:31 +02:00
|
|
|
# slave TOPOTESTS_CHECK_MEMLEAK to memleaks flag
|
|
|
|
if config.option.memleaks:
|
|
|
|
if "TOPOTESTS_CHECK_MEMLEAK" not in os.environ:
|
|
|
|
os.environ["TOPOTESTS_CHECK_MEMLEAK"] = "/dev/null"
|
|
|
|
else:
|
|
|
|
if "TOPOTESTS_CHECK_MEMLEAK" in os.environ:
|
|
|
|
del os.environ["TOPOTESTS_CHECK_MEMLEAK"]
|
|
|
|
if "TOPOTESTS_CHECK_STDERR" in os.environ:
|
|
|
|
del os.environ["TOPOTESTS_CHECK_STDERR"]
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
|
|
|
|
@pytest.fixture(autouse=True, scope="session")
|
|
|
|
def setup_session_auto():
|
2023-05-27 18:11:48 +02:00
|
|
|
# Aligns logs nicely
|
|
|
|
logging.addLevelName(logging.WARNING, " WARN")
|
|
|
|
logging.addLevelName(logging.INFO, " INFO")
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
if "PYTEST_TOPOTEST_WORKER" not in os.environ:
|
|
|
|
is_worker = False
|
|
|
|
elif not os.environ["PYTEST_TOPOTEST_WORKER"]:
|
|
|
|
is_worker = False
|
|
|
|
else:
|
|
|
|
is_worker = True
|
|
|
|
|
|
|
|
logger.debug("Before the run (is_worker: %s)", is_worker)
|
|
|
|
if not is_worker:
|
|
|
|
cleanup_previous()
|
|
|
|
yield
|
|
|
|
if not is_worker:
|
|
|
|
cleanup_current()
|
|
|
|
logger.debug("After the run (is_worker: %s)", is_worker)
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2021-09-06 00:59:26 +02:00
|
|
|
def pytest_runtest_setup(item):
|
|
|
|
module = item.parent.module
|
|
|
|
script_dir = os.path.abspath(os.path.dirname(module.__file__))
|
|
|
|
os.environ["PYTEST_TOPOTEST_SCRIPTDIR"] = script_dir
|
|
|
|
|
|
|
|
|
2017-07-27 19:52:51 +02:00
|
|
|
def pytest_runtest_makereport(item, call):
|
|
|
|
"Log all assert messages to default logger with error level"
|
|
|
|
|
2023-03-03 08:49:10 +01:00
|
|
|
pause = bool(item.config.getoption("--pause"))
|
2021-09-03 14:47:30 +02:00
|
|
|
title = "unset"
|
2021-07-27 01:23:20 +02:00
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
if call.excinfo is None:
|
|
|
|
error = False
|
|
|
|
else:
|
|
|
|
parent = item.parent
|
|
|
|
modname = parent.module.__name__
|
|
|
|
|
|
|
|
# Treat skips as non errors, don't pause after
|
2021-06-30 17:43:42 +02:00
|
|
|
if call.excinfo.typename == "Skipped":
|
2021-03-04 03:56:46 +01:00
|
|
|
pause = False
|
|
|
|
error = False
|
|
|
|
logger.info(
|
2021-06-30 17:43:42 +02:00
|
|
|
'test skipped at "{}/{}": {}'.format(
|
2021-03-04 03:56:46 +01:00
|
|
|
modname, item.name, call.excinfo.value
|
|
|
|
)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
error = True
|
|
|
|
# Handle assert failures
|
2021-04-08 19:04:26 +02:00
|
|
|
parent._previousfailed = item # pylint: disable=W0212
|
2021-03-04 03:56:46 +01:00
|
|
|
logger.error(
|
2021-06-30 17:43:42 +02:00
|
|
|
'test failed at "{}/{}": {}'.format(
|
2021-04-08 19:04:26 +02:00
|
|
|
modname, item.name, call.excinfo.value
|
|
|
|
)
|
2017-07-28 02:44:51 +02:00
|
|
|
)
|
2021-07-27 01:23:20 +02:00
|
|
|
title = "{}/{}".format(modname, item.name)
|
2019-10-01 02:15:15 +02:00
|
|
|
|
2021-07-09 11:22:51 +02:00
|
|
|
# We want to pause, if requested, on any error not just test cases
|
|
|
|
# (e.g., call.when == "setup")
|
|
|
|
if not pause:
|
2023-04-19 10:55:04 +02:00
|
|
|
pause = item.config.option.pause_on_error or item.config.option.pause
|
2021-07-09 11:22:51 +02:00
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
# (topogen) Set topology error to avoid advancing in the test.
|
2022-07-14 17:33:39 +02:00
|
|
|
tgen = get_topogen() # pylint: disable=redefined-outer-name
|
2021-03-04 03:56:46 +01:00
|
|
|
if tgen is not None:
|
|
|
|
# This will cause topogen to report error on `routers_have_failure`.
|
|
|
|
tgen.set_error("{}/{}".format(modname, item.name))
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
commander = Commander("pytest")
|
|
|
|
isatty = sys.stdout.isatty()
|
|
|
|
error_cmd = None
|
2021-03-04 03:56:46 +01:00
|
|
|
|
2023-04-19 10:55:04 +02:00
|
|
|
if error and item.config.option.vtysh_on_error:
|
2021-07-27 01:23:20 +02:00
|
|
|
error_cmd = commander.get_exec_path(["vtysh"])
|
2023-04-19 10:55:04 +02:00
|
|
|
elif error and item.config.option.shell_on_error:
|
2021-07-27 01:23:20 +02:00
|
|
|
error_cmd = os.getenv("SHELL", commander.get_exec_path(["bash"]))
|
|
|
|
|
|
|
|
if error_cmd:
|
2021-09-07 21:44:58 +02:00
|
|
|
is_tmux = bool(os.getenv("TMUX", ""))
|
|
|
|
is_screen = not is_tmux and bool(os.getenv("STY", ""))
|
|
|
|
is_xterm = not is_tmux and not is_screen and bool(os.getenv("DISPLAY", ""))
|
|
|
|
|
|
|
|
channel = None
|
2021-07-27 01:23:20 +02:00
|
|
|
win_info = None
|
|
|
|
wait_for_channels = []
|
2021-09-07 21:44:58 +02:00
|
|
|
wait_for_procs = []
|
|
|
|
# Really would like something better than using this global here.
|
|
|
|
# Not all tests use topogen though so get_topogen() won't work.
|
2021-07-27 01:23:20 +02:00
|
|
|
for node in Mininet.g_mnet_inst.hosts.values():
|
2021-03-04 03:56:46 +01:00
|
|
|
pause = True
|
|
|
|
|
2021-09-07 21:44:58 +02:00
|
|
|
if is_tmux:
|
|
|
|
channel = (
|
|
|
|
"{}-{}".format(os.getpid(), Commander.tmux_wait_gen)
|
|
|
|
if not isatty
|
|
|
|
else None
|
|
|
|
)
|
|
|
|
Commander.tmux_wait_gen += 1
|
|
|
|
wait_for_channels.append(channel)
|
2021-07-27 01:23:20 +02:00
|
|
|
|
|
|
|
pane_info = node.run_in_window(
|
|
|
|
error_cmd,
|
|
|
|
new_window=win_info is None,
|
|
|
|
background=True,
|
|
|
|
title="{} ({})".format(title, node.name),
|
|
|
|
name=title,
|
|
|
|
tmux_target=win_info,
|
2021-09-03 14:47:30 +02:00
|
|
|
wait_for=channel,
|
2021-07-27 01:23:20 +02:00
|
|
|
)
|
2021-09-07 21:44:58 +02:00
|
|
|
if is_tmux:
|
|
|
|
if win_info is None:
|
|
|
|
win_info = pane_info
|
|
|
|
elif is_xterm:
|
|
|
|
assert isinstance(pane_info, subprocess.Popen)
|
|
|
|
wait_for_procs.append(pane_info)
|
2021-07-27 01:23:20 +02:00
|
|
|
|
|
|
|
# Now wait on any channels
|
|
|
|
for channel in wait_for_channels:
|
|
|
|
logger.debug("Waiting on TMUX channel %s", channel)
|
|
|
|
commander.cmd_raises([commander.get_exec_path("tmux"), "wait", channel])
|
2021-09-07 21:44:58 +02:00
|
|
|
for p in wait_for_procs:
|
|
|
|
logger.debug("Waiting on TMUX xterm process %s", p)
|
|
|
|
o, e = p.communicate()
|
|
|
|
if p.wait():
|
|
|
|
logger.warning("xterm proc failed: %s:", proc_error(p, o, e))
|
2021-07-27 01:23:20 +02:00
|
|
|
|
2023-04-19 10:55:04 +02:00
|
|
|
if error and item.config.option.cli_on_error:
|
2021-07-27 01:23:20 +02:00
|
|
|
# Really would like something better than using this global here.
|
|
|
|
# Not all tests use topogen though so get_topogen() won't work.
|
|
|
|
if Mininet.g_mnet_inst:
|
2023-03-24 14:06:38 +01:00
|
|
|
cli.cli(Mininet.g_mnet_inst, title=title, background=False)
|
2021-07-27 01:23:20 +02:00
|
|
|
else:
|
|
|
|
logger.error("Could not launch CLI b/c no mininet exists yet")
|
2021-03-04 03:56:46 +01:00
|
|
|
|
2023-04-19 10:55:04 +02:00
|
|
|
if pause and isatty:
|
|
|
|
pause_test()
|
2021-07-27 01:23:20 +02:00
|
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
# Add common fixtures available to all tests as parameters
|
|
|
|
#
|
2021-11-17 14:51:14 +01:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
tgen = pytest.fixture(lib.fixtures.tgen)
|
|
|
|
topo = pytest.fixture(lib.fixtures.topo)
|