2017-06-15 05:25:54 +02:00
|
|
|
"""
|
|
|
|
Topotest conftest.py file.
|
|
|
|
"""
|
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
import os
|
|
|
|
import pdb
|
|
|
|
import pytest
|
|
|
|
|
2017-07-18 21:44:27 +02:00
|
|
|
from lib.topogen import get_topogen, diagnose_env
|
2017-06-29 17:18:46 +02:00
|
|
|
from lib.topotest import json_cmp_result
|
2021-03-04 03:56:46 +01:00
|
|
|
from lib.topotest import g_extra_config as topotest_extra_config
|
2017-07-27 19:52:51 +02:00
|
|
|
from lib.topolog import logger
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-06-15 05:25:54 +02:00
|
|
|
def pytest_addoption(parser):
|
|
|
|
"""
|
|
|
|
Add topology-only option to the topology tester. This option makes pytest
|
|
|
|
only run the setup_module() to setup the topology without running any tests.
|
|
|
|
"""
|
2021-03-04 03:56:46 +01:00
|
|
|
parser.addoption(
|
|
|
|
"--gdb-breakpoints",
|
|
|
|
metavar="SYMBOL[,SYMBOL...]",
|
|
|
|
help="Comma-separated list of functions to set gdb breakpoints on",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--gdb-daemons",
|
|
|
|
metavar="DAEMON[,DAEMON...]",
|
|
|
|
help="Comma-separated list of daemons to spawn gdb on, or 'all'",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--gdb-routers",
|
|
|
|
metavar="ROUTER[,ROUTER...]",
|
|
|
|
help="Comma-separated list of routers to spawn gdb on, or 'all'",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--mininet-on-error",
|
|
|
|
action="store_true",
|
|
|
|
help="Mininet cli on test failure",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--pause-after",
|
|
|
|
action="store_true",
|
|
|
|
help="Pause after each test",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--shell",
|
|
|
|
metavar="ROUTER[,ROUTER...]",
|
|
|
|
help="Comma-separated list of routers to spawn shell on, or 'all'",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--shell-on-error",
|
|
|
|
action="store_true",
|
|
|
|
help="Spawn shell on all routers on test failure",
|
|
|
|
)
|
|
|
|
|
2017-06-15 05:25:54 +02:00
|
|
|
parser.addoption(
|
|
|
|
"--topology-only",
|
|
|
|
action="store_true",
|
|
|
|
help="Only set up this topology, don't run tests",
|
|
|
|
)
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
parser.addoption(
|
|
|
|
"--vtysh",
|
|
|
|
metavar="ROUTER[,ROUTER...]",
|
|
|
|
help="Comma-separated list of routers to spawn vtysh on, or 'all'",
|
|
|
|
)
|
|
|
|
|
|
|
|
parser.addoption(
|
|
|
|
"--vtysh-on-error",
|
|
|
|
action="store_true",
|
|
|
|
help="Spawn vtysh on all routers on test failure",
|
|
|
|
)
|
|
|
|
|
2017-06-15 05:25:54 +02:00
|
|
|
|
|
|
|
def pytest_runtest_call():
|
|
|
|
"""
|
|
|
|
This function must be run after setup_module(), it does standarized post
|
|
|
|
setup routines. It is only being used for the 'topology-only' option.
|
|
|
|
"""
|
2021-03-04 03:56:46 +01:00
|
|
|
if topotest_extra_config["topology_only"]:
|
2017-06-29 15:49:11 +02:00
|
|
|
tgen = get_topogen()
|
|
|
|
if tgen is not None:
|
|
|
|
# Allow user to play with the setup.
|
|
|
|
tgen.mininet_cli()
|
|
|
|
|
2017-06-15 05:25:54 +02:00
|
|
|
pytest.exit("the topology executed successfully")
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-06-29 17:18:46 +02:00
|
|
|
|
|
|
|
def pytest_assertrepr_compare(op, left, right):
|
|
|
|
"""
|
|
|
|
Show proper assertion error message for json_cmp results.
|
|
|
|
"""
|
2021-03-04 03:56:46 +01:00
|
|
|
del op
|
|
|
|
|
2017-06-29 17:18:46 +02:00
|
|
|
json_result = left
|
|
|
|
if not isinstance(json_result, json_cmp_result):
|
|
|
|
json_result = right
|
|
|
|
if not isinstance(json_result, json_cmp_result):
|
|
|
|
return None
|
|
|
|
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
return json_result.gen_report()
|
2017-07-18 21:44:27 +02:00
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-07-18 21:44:27 +02:00
|
|
|
def pytest_configure(config):
|
2021-03-04 03:56:46 +01:00
|
|
|
"""
|
|
|
|
Assert that the environment is correctly configured, and get extra config.
|
|
|
|
"""
|
2019-05-02 20:43:18 +02:00
|
|
|
|
2017-07-18 21:44:27 +02:00
|
|
|
if not diagnose_env():
|
2021-03-04 03:56:46 +01:00
|
|
|
pytest.exit("environment has errors, please read the logs")
|
|
|
|
|
|
|
|
gdb_routers = config.getoption("--gdb-routers")
|
|
|
|
gdb_routers = gdb_routers.split(",") if gdb_routers else []
|
|
|
|
topotest_extra_config["gdb_routers"] = gdb_routers
|
|
|
|
|
|
|
|
gdb_daemons = config.getoption("--gdb-daemons")
|
|
|
|
gdb_daemons = gdb_daemons.split(",") if gdb_daemons else []
|
|
|
|
topotest_extra_config["gdb_daemons"] = gdb_daemons
|
|
|
|
|
|
|
|
gdb_breakpoints = config.getoption("--gdb-breakpoints")
|
|
|
|
gdb_breakpoints = gdb_breakpoints.split(",") if gdb_breakpoints else []
|
|
|
|
topotest_extra_config["gdb_breakpoints"] = gdb_breakpoints
|
|
|
|
|
|
|
|
mincli_on_error = config.getoption("--mininet-on-error")
|
|
|
|
topotest_extra_config["mininet_on_error"] = mincli_on_error
|
2017-07-27 19:52:51 +02:00
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
shell = config.getoption("--shell")
|
|
|
|
topotest_extra_config["shell"] = shell.split(",") if shell else []
|
|
|
|
|
|
|
|
pause_after = config.getoption("--pause-after")
|
|
|
|
|
|
|
|
shell_on_error = config.getoption("--shell-on-error")
|
|
|
|
topotest_extra_config["shell_on_error"] = shell_on_error
|
|
|
|
|
|
|
|
vtysh = config.getoption("--vtysh")
|
|
|
|
topotest_extra_config["vtysh"] = vtysh.split(",") if vtysh else []
|
|
|
|
|
|
|
|
vtysh_on_error = config.getoption("--vtysh-on-error")
|
|
|
|
topotest_extra_config["vtysh_on_error"] = vtysh_on_error
|
|
|
|
|
|
|
|
topotest_extra_config["pause_after"] = (
|
|
|
|
pause_after or shell or vtysh
|
|
|
|
)
|
|
|
|
|
|
|
|
topotest_extra_config["topology_only"] = config.getoption("--topology-only")
|
2019-05-02 20:43:18 +02:00
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-07-27 19:52:51 +02:00
|
|
|
def pytest_runtest_makereport(item, call):
|
|
|
|
"Log all assert messages to default logger with error level"
|
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
# Nothing happened
|
|
|
|
if call.when == "call":
|
|
|
|
pause = topotest_extra_config["pause_after"]
|
|
|
|
else:
|
|
|
|
pause = False
|
2017-07-28 02:44:51 +02:00
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
if call.excinfo is None:
|
|
|
|
error = False
|
|
|
|
else:
|
|
|
|
parent = item.parent
|
|
|
|
modname = parent.module.__name__
|
|
|
|
|
|
|
|
# Treat skips as non errors, don't pause after
|
|
|
|
if call.excinfo.typename != "AssertionError":
|
|
|
|
pause = False
|
|
|
|
error = False
|
|
|
|
logger.info(
|
|
|
|
'assert skipped at "{}/{}": {}'.format(
|
|
|
|
modname, item.name, call.excinfo.value
|
|
|
|
)
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
error = True
|
|
|
|
# Handle assert failures
|
|
|
|
parent._previousfailed = item # pylint: disable=W0212
|
|
|
|
logger.error(
|
|
|
|
'assert failed at "{}/{}": {}'.format(modname, item.name, call.excinfo.value)
|
2017-07-28 02:44:51 +02:00
|
|
|
)
|
2019-10-01 02:15:15 +02:00
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
# (topogen) Set topology error to avoid advancing in the test.
|
|
|
|
tgen = get_topogen()
|
|
|
|
if tgen is not None:
|
|
|
|
# This will cause topogen to report error on `routers_have_failure`.
|
|
|
|
tgen.set_error("{}/{}".format(modname, item.name))
|
|
|
|
|
|
|
|
|
|
|
|
if error and topotest_extra_config["shell_on_error"]:
|
|
|
|
for router in tgen.routers():
|
|
|
|
pause = True
|
|
|
|
tgen.net[router].runInWindow(os.getenv("SHELL", "bash"))
|
|
|
|
|
|
|
|
if error and topotest_extra_config["vtysh_on_error"]:
|
|
|
|
for router in tgen.routers():
|
|
|
|
pause = True
|
|
|
|
tgen.net[router].runInWindow("vtysh")
|
|
|
|
|
|
|
|
if error and topotest_extra_config["mininet_on_error"]:
|
|
|
|
tgen.mininet_cli()
|
|
|
|
|
|
|
|
if pause:
|
|
|
|
try:
|
|
|
|
user = raw_input('Testing paused, "pdb" to debug, "Enter" to continue: ')
|
|
|
|
except NameError:
|
|
|
|
user = input('Testing paused, "pdb" to debug, "Enter" to continue: ')
|
|
|
|
if user.strip() == "pdb":
|
|
|
|
pdb.set_trace()
|