2017-01-30 22:50:48 +01:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
|
|
|
#
|
|
|
|
# topotest.py
|
|
|
|
# Library of helper functions for NetDEF Topology Tests
|
|
|
|
#
|
|
|
|
# Copyright (c) 2016 by
|
|
|
|
# Network Device Education Foundation, Inc. ("NetDEF")
|
|
|
|
#
|
|
|
|
# Permission to use, copy, modify, and/or distribute this software
|
|
|
|
# for any purpose with or without fee is hereby granted, provided
|
|
|
|
# that the above copyright notice and this permission notice appear
|
|
|
|
# in all copies.
|
|
|
|
#
|
|
|
|
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
|
|
|
|
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
|
|
|
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
|
|
|
|
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
|
|
|
|
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
|
|
|
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
|
|
|
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
|
|
|
|
# OF THIS SOFTWARE.
|
|
|
|
#
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
import difflib
|
2017-04-27 04:54:25 +02:00
|
|
|
import errno
|
2018-08-02 01:02:59 +02:00
|
|
|
import functools
|
2017-01-30 22:50:48 +01:00
|
|
|
import glob
|
2021-07-27 01:23:20 +02:00
|
|
|
import json
|
|
|
|
import os
|
|
|
|
import pdb
|
|
|
|
import platform
|
|
|
|
import re
|
|
|
|
import resource
|
|
|
|
import signal
|
2017-01-30 22:50:48 +01:00
|
|
|
import subprocess
|
2021-07-27 01:23:20 +02:00
|
|
|
import sys
|
2017-06-15 05:25:54 +02:00
|
|
|
import tempfile
|
2017-07-24 16:53:19 +02:00
|
|
|
import time
|
2021-07-27 01:23:20 +02:00
|
|
|
from copy import deepcopy
|
2017-01-30 22:50:48 +01:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
import lib.topolog as topolog
|
2017-07-07 14:57:07 +02:00
|
|
|
from lib.topolog import logger
|
|
|
|
|
2019-06-11 18:53:13 +02:00
|
|
|
if sys.version_info[0] > 2:
|
|
|
|
import configparser
|
|
|
|
else:
|
|
|
|
import ConfigParser as configparser
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
from lib import micronet
|
|
|
|
from lib.micronet_compat import Node
|
2017-01-30 22:50:48 +01:00
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
g_extra_config = {}
|
2020-10-07 23:22:26 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
def get_logs_path(rundir):
|
|
|
|
logspath = topolog.get_test_logdir()
|
|
|
|
return os.path.join(rundir, logspath)
|
|
|
|
|
2021-04-08 19:04:26 +02:00
|
|
|
|
2020-07-14 23:30:28 +02:00
|
|
|
def gdb_core(obj, daemon, corefiles):
|
|
|
|
gdbcmds = """
|
|
|
|
info threads
|
|
|
|
bt full
|
|
|
|
disassemble
|
|
|
|
up
|
|
|
|
disassemble
|
|
|
|
up
|
|
|
|
disassemble
|
|
|
|
up
|
|
|
|
disassemble
|
|
|
|
up
|
|
|
|
disassemble
|
|
|
|
up
|
|
|
|
disassemble
|
2020-10-07 23:22:26 +02:00
|
|
|
"""
|
2020-07-14 23:30:28 +02:00
|
|
|
gdbcmds = [["-ex", i.strip()] for i in gdbcmds.strip().split("\n")]
|
|
|
|
gdbcmds = [item for sl in gdbcmds for item in sl]
|
|
|
|
|
|
|
|
daemon_path = os.path.join(obj.daemondir, daemon)
|
|
|
|
backtrace = subprocess.check_output(
|
|
|
|
["gdb", daemon_path, corefiles[0], "--batch"] + gdbcmds
|
|
|
|
)
|
|
|
|
sys.stderr.write(
|
|
|
|
"\n%s: %s crashed. Core file found - Backtrace follows:\n" % (obj.name, daemon)
|
|
|
|
)
|
|
|
|
sys.stderr.write("%s" % backtrace)
|
|
|
|
return backtrace
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2020-10-07 23:22:26 +02:00
|
|
|
|
2017-06-29 17:18:46 +02:00
|
|
|
class json_cmp_result(object):
|
|
|
|
"json_cmp result class for better assertion messages"
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
self.errors = []
|
|
|
|
|
|
|
|
def add_error(self, error):
|
|
|
|
"Append error message to the result"
|
2017-09-19 22:05:07 +02:00
|
|
|
for line in error.splitlines():
|
|
|
|
self.errors.append(line)
|
2017-06-29 17:18:46 +02:00
|
|
|
|
|
|
|
def has_errors(self):
|
|
|
|
"Returns True if there were errors, otherwise False."
|
|
|
|
return len(self.errors) > 0
|
|
|
|
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
def gen_report(self):
|
|
|
|
headline = ["Generated JSON diff error report:", ""]
|
|
|
|
return headline + self.errors
|
|
|
|
|
2018-12-04 16:11:41 +01:00
|
|
|
def __str__(self):
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
return (
|
|
|
|
"Generated JSON diff error report:\n\n\n" + "\n".join(self.errors) + "\n\n"
|
|
|
|
)
|
2018-12-04 16:11:41 +01:00
|
|
|
|
2018-05-21 15:47:18 +02:00
|
|
|
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
def gen_json_diff_report(d1, d2, exact=False, path="> $", acc=(0, "")):
|
2017-09-19 03:19:10 +02:00
|
|
|
"""
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
Internal workhorse which compares two JSON data structures and generates an error report suited to be read by a human eye.
|
2017-09-19 03:19:10 +02:00
|
|
|
"""
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
|
|
|
|
def dump_json(v):
|
|
|
|
if isinstance(v, (dict, list)):
|
|
|
|
return "\t" + "\t".join(
|
|
|
|
json.dumps(v, indent=4, separators=(",", ": ")).splitlines(True)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
else:
|
|
|
|
return "'{}'".format(v)
|
|
|
|
|
|
|
|
def json_type(v):
|
|
|
|
if isinstance(v, (list, tuple)):
|
|
|
|
return "Array"
|
|
|
|
elif isinstance(v, dict):
|
|
|
|
return "Object"
|
|
|
|
elif isinstance(v, (int, float)):
|
|
|
|
return "Number"
|
|
|
|
elif isinstance(v, bool):
|
|
|
|
return "Boolean"
|
|
|
|
elif isinstance(v, str):
|
|
|
|
return "String"
|
|
|
|
elif v == None:
|
|
|
|
return "null"
|
|
|
|
|
|
|
|
def get_errors(other_acc):
|
|
|
|
return other_acc[1]
|
|
|
|
|
|
|
|
def get_errors_n(other_acc):
|
|
|
|
return other_acc[0]
|
|
|
|
|
|
|
|
def add_error(acc, msg, points=1):
|
|
|
|
return (acc[0] + points, acc[1] + "{}: {}\n".format(path, msg))
|
|
|
|
|
|
|
|
def merge_errors(acc, other_acc):
|
|
|
|
return (acc[0] + other_acc[0], acc[1] + other_acc[1])
|
|
|
|
|
|
|
|
def add_idx(idx):
|
|
|
|
return "{}[{}]".format(path, idx)
|
|
|
|
|
|
|
|
def add_key(key):
|
|
|
|
return "{}->{}".format(path, key)
|
|
|
|
|
|
|
|
def has_errors(other_acc):
|
|
|
|
return other_acc[0] > 0
|
|
|
|
|
|
|
|
if d2 == "*" or (
|
|
|
|
not isinstance(d1, (list, dict))
|
|
|
|
and not isinstance(d2, (list, dict))
|
|
|
|
and d1 == d2
|
|
|
|
):
|
|
|
|
return acc
|
|
|
|
elif (
|
|
|
|
not isinstance(d1, (list, dict))
|
|
|
|
and not isinstance(d2, (list, dict))
|
|
|
|
and d1 != d2
|
|
|
|
):
|
|
|
|
acc = add_error(
|
|
|
|
acc,
|
|
|
|
"d1 has element with value '{}' but in d2 it has value '{}'".format(d1, d2),
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
elif (
|
|
|
|
isinstance(d1, list)
|
|
|
|
and isinstance(d2, list)
|
|
|
|
and ((len(d2) > 0 and d2[0] == "__ordered__") or exact)
|
|
|
|
):
|
|
|
|
if not exact:
|
|
|
|
del d2[0]
|
|
|
|
if len(d1) != len(d2):
|
|
|
|
acc = add_error(
|
|
|
|
acc,
|
|
|
|
"d1 has Array of length {} but in d2 it is of length {}".format(
|
|
|
|
len(d1), len(d2)
|
|
|
|
),
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
else:
|
|
|
|
for idx, v1, v2 in zip(range(0, len(d1)), d1, d2):
|
|
|
|
acc = merge_errors(
|
|
|
|
acc, gen_json_diff_report(v1, v2, exact=exact, path=add_idx(idx))
|
|
|
|
)
|
|
|
|
elif isinstance(d1, list) and isinstance(d2, list):
|
|
|
|
if len(d1) < len(d2):
|
|
|
|
acc = add_error(
|
|
|
|
acc,
|
|
|
|
"d1 has Array of length {} but in d2 it is of length {}".format(
|
|
|
|
len(d1), len(d2)
|
|
|
|
),
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
for idx2, v2 in zip(range(0, len(d2)), d2):
|
|
|
|
found_match = False
|
|
|
|
closest_diff = None
|
|
|
|
closest_idx = None
|
|
|
|
for idx1, v1 in zip(range(0, len(d1)), d1):
|
2020-05-22 23:18:46 +02:00
|
|
|
tmp_v1 = deepcopy(v1)
|
|
|
|
tmp_v2 = deepcopy(v2)
|
|
|
|
tmp_diff = gen_json_diff_report(tmp_v1, tmp_v2, path=add_idx(idx1))
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
if not has_errors(tmp_diff):
|
|
|
|
found_match = True
|
|
|
|
del d1[idx1]
|
|
|
|
break
|
|
|
|
elif not closest_diff or get_errors_n(tmp_diff) < get_errors_n(
|
|
|
|
closest_diff
|
|
|
|
):
|
|
|
|
closest_diff = tmp_diff
|
|
|
|
closest_idx = idx1
|
|
|
|
if not found_match and isinstance(v2, (list, dict)):
|
|
|
|
sub_error = "\n\n\t{}".format(
|
|
|
|
"\t".join(get_errors(closest_diff).splitlines(True))
|
|
|
|
)
|
|
|
|
acc = add_error(
|
|
|
|
acc,
|
|
|
|
(
|
|
|
|
"d2 has the following element at index {} which is not present in d1: "
|
|
|
|
+ "\n\n{}\n\n\tClosest match in d1 is at index {} with the following errors: {}"
|
|
|
|
).format(idx2, dump_json(v2), closest_idx, sub_error),
|
|
|
|
)
|
|
|
|
if not found_match and not isinstance(v2, (list, dict)):
|
|
|
|
acc = add_error(
|
|
|
|
acc,
|
|
|
|
"d2 has the following element at index {} which is not present in d1: {}".format(
|
|
|
|
idx2, dump_json(v2)
|
|
|
|
),
|
|
|
|
)
|
|
|
|
elif isinstance(d1, dict) and isinstance(d2, dict) and exact:
|
|
|
|
invalid_keys_d1 = [k for k in d1.keys() if k not in d2.keys()]
|
|
|
|
invalid_keys_d2 = [k for k in d2.keys() if k not in d1.keys()]
|
|
|
|
for k in invalid_keys_d1:
|
|
|
|
acc = add_error(acc, "d1 has key '{}' which is not present in d2".format(k))
|
|
|
|
for k in invalid_keys_d2:
|
|
|
|
acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
|
|
|
|
valid_keys_intersection = [k for k in d1.keys() if k in d2.keys()]
|
|
|
|
for k in valid_keys_intersection:
|
|
|
|
acc = merge_errors(
|
|
|
|
acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
|
|
|
|
)
|
|
|
|
elif isinstance(d1, dict) and isinstance(d2, dict):
|
|
|
|
none_keys = [k for k, v in d2.items() if v == None]
|
|
|
|
none_keys_present = [k for k in d1.keys() if k in none_keys]
|
|
|
|
for k in none_keys_present:
|
|
|
|
acc = add_error(
|
|
|
|
acc, "d1 has key '{}' which is not supposed to be present".format(k)
|
|
|
|
)
|
|
|
|
keys = [k for k, v in d2.items() if v != None]
|
|
|
|
invalid_keys_intersection = [k for k in keys if k not in d1.keys()]
|
|
|
|
for k in invalid_keys_intersection:
|
|
|
|
acc = add_error(acc, "d2 has key '{}' which is not present in d1".format(k))
|
|
|
|
valid_keys_intersection = [k for k in keys if k in d1.keys()]
|
|
|
|
for k in valid_keys_intersection:
|
|
|
|
acc = merge_errors(
|
|
|
|
acc, gen_json_diff_report(d1[k], d2[k], exact=exact, path=add_key(k))
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
acc = add_error(
|
|
|
|
acc,
|
|
|
|
"d1 has element of type '{}' but the corresponding element in d2 is of type '{}'".format(
|
|
|
|
json_type(d1), json_type(d2)
|
|
|
|
),
|
|
|
|
points=2,
|
2018-05-23 21:39:05 +02:00
|
|
|
)
|
|
|
|
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
return acc
|
2018-05-23 21:39:05 +02:00
|
|
|
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
|
|
|
|
def json_cmp(d1, d2, exact=False):
|
2017-06-28 20:04:00 +02:00
|
|
|
"""
|
|
|
|
JSON compare function. Receives two parameters:
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
* `d1`: parsed JSON data structure
|
|
|
|
* `d2`: parsed JSON data structure
|
|
|
|
|
|
|
|
Returns 'None' when all JSON Object keys and all Array elements of d2 have a match
|
2021-07-27 01:23:20 +02:00
|
|
|
in d1, i.e., when d2 is a "subset" of d1 without honoring any order. Otherwise an
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
error report is generated and wrapped in a 'json_cmp_result()'. There are special
|
|
|
|
parameters and notations explained below which can be used to cover rather unusual
|
|
|
|
cases:
|
|
|
|
|
|
|
|
* when 'exact is set to 'True' then d1 and d2 are tested for equality (including
|
|
|
|
order within JSON Arrays)
|
|
|
|
* using 'null' (or 'None' in Python) as JSON Object value is checking for key
|
|
|
|
absence in d1
|
|
|
|
* using '*' as JSON Object value or Array value is checking for presence in d1
|
|
|
|
without checking the values
|
|
|
|
* using '__ordered__' as first element in a JSON Array in d2 will also check the
|
|
|
|
order when it is compared to an Array in d1
|
2017-06-28 20:04:00 +02:00
|
|
|
"""
|
|
|
|
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
(errors_n, errors) = gen_json_diff_report(deepcopy(d1), deepcopy(d2), exact=exact)
|
2017-06-29 17:18:46 +02:00
|
|
|
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
if errors_n > 0:
|
|
|
|
result = json_cmp_result()
|
|
|
|
result.add_error(errors)
|
2017-06-29 17:18:46 +02:00
|
|
|
return result
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
else:
|
|
|
|
return None
|
2017-06-28 20:04:00 +02:00
|
|
|
|
2018-05-23 21:39:05 +02:00
|
|
|
|
2018-08-03 18:23:52 +02:00
|
|
|
def router_output_cmp(router, cmd, expected):
|
|
|
|
"""
|
|
|
|
Runs `cmd` in router and compares the output with `expected`.
|
|
|
|
"""
|
|
|
|
return difflines(
|
|
|
|
normalize_text(router.vtysh_cmd(cmd)),
|
|
|
|
normalize_text(expected),
|
|
|
|
title1="Current output",
|
|
|
|
title2="Expected output",
|
|
|
|
)
|
|
|
|
|
|
|
|
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
def router_json_cmp(router, cmd, data, exact=False):
|
2018-08-03 18:23:52 +02:00
|
|
|
"""
|
|
|
|
Runs `cmd` that returns JSON data (normally the command ends with 'json')
|
|
|
|
and compare with `data` contents.
|
|
|
|
"""
|
tests: introduce a proper JSON diff for topotests
Diff'ing JSON objects is a crucial operation in the topotests for
comparing e.g. vtysh output (formatted as JSON) with a file which
covers the expectation of the tests. The current diff functionality
is 'self-written' and intended to test a JSON object d2 on being a
subset of another JSON object d1. For mismatches a diff is generated
based on a normalized textual representation of the JSON objects.
This approach has several disadvantages:
* the human provided JSON text might not be normalized, hence
a diff with line numbers might be worthless since it provides
close to zero orientation what the problem is
* the diff contains changes like commatas which are meaningless
* the diff might contain a lot of changes about meaningless
content which is present in d1 but not in d2
* there is no proper functionality to test for 'equality' of
d1 and d2
* it is not possible to test for order, e.g. JSON arrays are
just tested with respect to being a subset of another array
* it is not possible to check if a key exists without also
checking the value of that particular key
This commit attempts to solve these issues. An error report is
generated which includes the "JSON Path" to the problematic JSON
elements and also hints on what the actual problem is (e.g. missing
key, mismatch in dict values etc.).
A special parameter 'exact' was introduced such that equality can be
tested. Also there was a convention that absence of keys can be
tested using the key in question with value 'None'. This convention
is still honored such that full backwards compatiiblity is in
place.
Further order can be tested using the new tag '__ordered__' in
lists (as first element). Example:
d1 = [1, 2, 3]
d2 = ['__ordered__', 1, 3, 2]
Tesing d1 and d2 this way will now result in an error.
Key existence can now be tested using an asterisk '*'. Example:
d1 = [1, 2, 3]
d2 = [1, '*', 3]
d1 = {'a': 1, 'b': 2}
d2 = {'a': '*'}
Both cases will result now in a clean diff for d1 and d2.
Signed-off-by: GalaxyGorilla <sascha@netdef.org>
2020-04-23 16:06:37 +02:00
|
|
|
return json_cmp(router.vtysh_cmd(cmd, isjson=True), data, exact)
|
2018-08-03 18:23:52 +02:00
|
|
|
|
|
|
|
|
2017-06-15 05:25:54 +02:00
|
|
|
def run_and_expect(func, what, count=20, wait=3):
|
|
|
|
"""
|
|
|
|
Run `func` and compare the result with `what`. Do it for `count` times
|
|
|
|
waiting `wait` seconds between tries. By default it tries 20 times with
|
|
|
|
3 seconds delay between tries.
|
|
|
|
|
|
|
|
Returns (True, func-return) on success or
|
|
|
|
(False, func-return) on failure.
|
2018-08-03 18:23:52 +02:00
|
|
|
|
|
|
|
---
|
|
|
|
|
|
|
|
Helper functions to use with this function:
|
|
|
|
- router_output_cmp
|
|
|
|
- router_json_cmp
|
2017-06-15 05:25:54 +02:00
|
|
|
"""
|
2018-08-02 01:02:59 +02:00
|
|
|
start_time = time.time()
|
|
|
|
func_name = "<unknown>"
|
|
|
|
if func.__class__ == functools.partial:
|
|
|
|
func_name = func.func.__name__
|
|
|
|
else:
|
|
|
|
func_name = func.__name__
|
|
|
|
|
|
|
|
logger.info(
|
2021-04-28 01:52:58 +02:00
|
|
|
"'{}' polling started (interval {} secs, maximum {} tries)".format(
|
|
|
|
func_name, wait, count
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
|
|
|
)
|
2018-08-02 01:02:59 +02:00
|
|
|
|
2017-06-15 05:25:54 +02:00
|
|
|
while count > 0:
|
|
|
|
result = func()
|
|
|
|
if result != what:
|
2017-07-24 16:53:19 +02:00
|
|
|
time.sleep(wait)
|
2017-06-15 05:25:54 +02:00
|
|
|
count -= 1
|
|
|
|
continue
|
2018-08-02 01:02:59 +02:00
|
|
|
|
|
|
|
end_time = time.time()
|
|
|
|
logger.info(
|
|
|
|
"'{}' succeeded after {:.2f} seconds".format(
|
|
|
|
func_name, end_time - start_time
|
|
|
|
)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2017-06-15 05:25:54 +02:00
|
|
|
return (True, result)
|
2018-08-02 01:02:59 +02:00
|
|
|
|
|
|
|
end_time = time.time()
|
|
|
|
logger.error(
|
|
|
|
"'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
|
|
|
|
)
|
2017-06-15 05:25:54 +02:00
|
|
|
return (False, result)
|
|
|
|
|
|
|
|
|
2019-07-22 18:12:08 +02:00
|
|
|
def run_and_expect_type(func, etype, count=20, wait=3, avalue=None):
|
|
|
|
"""
|
|
|
|
Run `func` and compare the result with `etype`. Do it for `count` times
|
|
|
|
waiting `wait` seconds between tries. By default it tries 20 times with
|
|
|
|
3 seconds delay between tries.
|
|
|
|
|
|
|
|
This function is used when you want to test the return type and,
|
|
|
|
optionally, the return value.
|
|
|
|
|
|
|
|
Returns (True, func-return) on success or
|
|
|
|
(False, func-return) on failure.
|
|
|
|
"""
|
|
|
|
start_time = time.time()
|
|
|
|
func_name = "<unknown>"
|
|
|
|
if func.__class__ == functools.partial:
|
|
|
|
func_name = func.func.__name__
|
|
|
|
else:
|
|
|
|
func_name = func.__name__
|
|
|
|
|
|
|
|
logger.info(
|
|
|
|
"'{}' polling started (interval {} secs, maximum wait {} secs)".format(
|
|
|
|
func_name, wait, int(wait * count)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
|
|
|
)
|
2019-07-22 18:12:08 +02:00
|
|
|
|
|
|
|
while count > 0:
|
|
|
|
result = func()
|
|
|
|
if not isinstance(result, etype):
|
|
|
|
logger.debug(
|
|
|
|
"Expected result type '{}' got '{}' instead".format(etype, type(result))
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2019-07-22 18:12:08 +02:00
|
|
|
time.sleep(wait)
|
|
|
|
count -= 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
if etype != type(None) and avalue != None and result != avalue:
|
|
|
|
logger.debug("Expected value '{}' got '{}' instead".format(avalue, result))
|
|
|
|
time.sleep(wait)
|
|
|
|
count -= 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
end_time = time.time()
|
|
|
|
logger.info(
|
|
|
|
"'{}' succeeded after {:.2f} seconds".format(
|
|
|
|
func_name, end_time - start_time
|
|
|
|
)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2019-07-22 18:12:08 +02:00
|
|
|
return (True, result)
|
|
|
|
|
|
|
|
end_time = time.time()
|
|
|
|
logger.error(
|
|
|
|
"'{}' failed after {:.2f} seconds".format(func_name, end_time - start_time)
|
|
|
|
)
|
|
|
|
return (False, result)
|
|
|
|
|
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
def int2dpid(dpid):
|
|
|
|
"Converting Integer to DPID"
|
|
|
|
|
|
|
|
try:
|
|
|
|
dpid = hex(dpid)[2:]
|
|
|
|
dpid = "0" * (16 - len(dpid)) + dpid
|
|
|
|
return dpid
|
|
|
|
except IndexError:
|
|
|
|
raise Exception(
|
|
|
|
"Unable to derive default datapath ID - "
|
|
|
|
"please either specify a dpid or use a "
|
|
|
|
"canonical switch name such as s23."
|
|
|
|
)
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
|
2017-04-27 04:54:25 +02:00
|
|
|
def pid_exists(pid):
|
|
|
|
"Check whether pid exists in the current process table."
|
|
|
|
|
|
|
|
if pid <= 0:
|
|
|
|
return False
|
2020-07-15 18:48:18 +02:00
|
|
|
try:
|
|
|
|
os.waitpid(pid, os.WNOHANG)
|
|
|
|
except:
|
|
|
|
pass
|
2017-04-27 04:54:25 +02:00
|
|
|
try:
|
|
|
|
os.kill(pid, 0)
|
|
|
|
except OSError as err:
|
|
|
|
if err.errno == errno.ESRCH:
|
|
|
|
# ESRCH == No such process
|
|
|
|
return False
|
|
|
|
elif err.errno == errno.EPERM:
|
|
|
|
# EPERM clearly means there's a process to deny access to
|
|
|
|
return True
|
|
|
|
else:
|
|
|
|
# According to "man 2 kill" possible error values are
|
|
|
|
# (EINVAL, EPERM, ESRCH)
|
|
|
|
raise
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-09-20 17:04:23 +02:00
|
|
|
def get_textdiff(text1, text2, title1="", title2="", **opts):
|
2017-05-31 04:39:21 +02:00
|
|
|
"Returns empty string if same or formatted diff"
|
|
|
|
|
2017-08-09 19:08:50 +02:00
|
|
|
diff = "\n".join(
|
2017-09-20 17:04:23 +02:00
|
|
|
difflib.unified_diff(text1, text2, fromfile=title1, tofile=title2, **opts)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2017-05-31 04:39:21 +02:00
|
|
|
# Clean up line endings
|
|
|
|
diff = os.linesep.join([s for s in diff.splitlines() if s])
|
|
|
|
return diff
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-09-20 17:04:23 +02:00
|
|
|
def difflines(text1, text2, title1="", title2="", **opts):
|
2017-06-15 05:25:54 +02:00
|
|
|
"Wrapper for get_textdiff to avoid string transformations."
|
|
|
|
text1 = ("\n".join(text1.rstrip().splitlines()) + "\n").splitlines(1)
|
|
|
|
text2 = ("\n".join(text2.rstrip().splitlines()) + "\n").splitlines(1)
|
2017-09-20 17:04:23 +02:00
|
|
|
return get_textdiff(text1, text2, title1, title2, **opts)
|
2017-06-15 05:25:54 +02:00
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-06-15 05:25:54 +02:00
|
|
|
def get_file(content):
|
|
|
|
"""
|
|
|
|
Generates a temporary file in '/tmp' with `content` and returns the file name.
|
|
|
|
"""
|
2021-07-27 01:23:20 +02:00
|
|
|
if isinstance(content, list) or isinstance(content, tuple):
|
|
|
|
content = "\n".join(content)
|
2017-06-15 05:25:54 +02:00
|
|
|
fde = tempfile.NamedTemporaryFile(mode="w", delete=False)
|
|
|
|
fname = fde.name
|
|
|
|
fde.write(content)
|
|
|
|
fde.close()
|
|
|
|
return fname
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-06-30 21:09:07 +02:00
|
|
|
def normalize_text(text):
|
|
|
|
"""
|
2018-08-03 17:21:52 +02:00
|
|
|
Strips formating spaces/tabs, carriage returns and trailing whitespace.
|
2017-06-30 21:09:07 +02:00
|
|
|
"""
|
|
|
|
text = re.sub(r"[ \t]+", " ", text)
|
|
|
|
text = re.sub(r"\r", "", text)
|
2018-08-03 17:21:52 +02:00
|
|
|
|
|
|
|
# Remove whitespace in the middle of text.
|
|
|
|
text = re.sub(r"[ \t]+\n", "\n", text)
|
|
|
|
# Remove whitespace at the end of the text.
|
|
|
|
text = text.rstrip()
|
|
|
|
|
2017-06-30 21:09:07 +02:00
|
|
|
return text
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2021-03-15 12:48:38 +01:00
|
|
|
def is_linux():
|
|
|
|
"""
|
|
|
|
Parses unix name output to check if running on GNU/Linux.
|
|
|
|
|
|
|
|
Returns True if running on Linux, returns False otherwise.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if os.uname()[0] == "Linux":
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
|
|
|
def iproute2_is_vrf_capable():
|
|
|
|
"""
|
|
|
|
Checks if the iproute2 version installed on the system is capable of
|
|
|
|
handling VRFs by interpreting the output of the 'ip' utility found in PATH.
|
|
|
|
|
|
|
|
Returns True if capability can be detected, returns False otherwise.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if is_linux():
|
|
|
|
try:
|
|
|
|
subp = subprocess.Popen(
|
|
|
|
["ip", "route", "show", "vrf"],
|
|
|
|
stdout=subprocess.PIPE,
|
|
|
|
stderr=subprocess.PIPE,
|
2021-04-08 19:04:26 +02:00
|
|
|
stdin=subprocess.PIPE,
|
2021-03-15 12:48:38 +01:00
|
|
|
)
|
|
|
|
iproute2_err = subp.communicate()[1].splitlines()[0].split()[0]
|
|
|
|
|
|
|
|
if iproute2_err != "Error:":
|
|
|
|
return True
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
return False
|
|
|
|
|
|
|
|
|
2018-12-15 16:54:11 +01:00
|
|
|
def module_present_linux(module, load):
|
2018-03-23 20:36:56 +01:00
|
|
|
"""
|
|
|
|
Returns whether `module` is present.
|
|
|
|
|
|
|
|
If `load` is true, it will try to load it via modprobe.
|
|
|
|
"""
|
|
|
|
with open("/proc/modules", "r") as modules_file:
|
|
|
|
if module.replace("-", "_") in modules_file.read():
|
|
|
|
return True
|
|
|
|
cmd = "/sbin/modprobe {}{}".format("" if load else "-n ", module)
|
|
|
|
if os.system(cmd) != 0:
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
return True
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2018-12-15 16:54:11 +01:00
|
|
|
def module_present_freebsd(module, load):
|
|
|
|
return True
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2018-12-15 16:54:11 +01:00
|
|
|
def module_present(module, load=True):
|
|
|
|
if sys.platform.startswith("linux"):
|
2018-12-17 02:41:58 +01:00
|
|
|
return module_present_linux(module, load)
|
2018-12-15 16:54:11 +01:00
|
|
|
elif sys.platform.startswith("freebsd"):
|
2018-12-17 02:41:58 +01:00
|
|
|
return module_present_freebsd(module, load)
|
2018-12-15 16:54:11 +01:00
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-07-14 19:00:52 +02:00
|
|
|
def version_cmp(v1, v2):
|
|
|
|
"""
|
|
|
|
Compare two version strings and returns:
|
|
|
|
|
|
|
|
* `-1`: if `v1` is less than `v2`
|
|
|
|
* `0`: if `v1` is equal to `v2`
|
|
|
|
* `1`: if `v1` is greater than `v2`
|
|
|
|
|
|
|
|
Raises `ValueError` if versions are not well formated.
|
|
|
|
"""
|
|
|
|
vregex = r"(?P<whole>\d+(\.(\d+))*)"
|
|
|
|
v1m = re.match(vregex, v1)
|
|
|
|
v2m = re.match(vregex, v2)
|
|
|
|
if v1m is None or v2m is None:
|
|
|
|
raise ValueError("got a invalid version string")
|
|
|
|
|
|
|
|
# Split values
|
|
|
|
v1g = v1m.group("whole").split(".")
|
|
|
|
v2g = v2m.group("whole").split(".")
|
|
|
|
|
|
|
|
# Get the longest version string
|
|
|
|
vnum = len(v1g)
|
|
|
|
if len(v2g) > vnum:
|
|
|
|
vnum = len(v2g)
|
|
|
|
|
|
|
|
# Reverse list because we are going to pop the tail
|
|
|
|
v1g.reverse()
|
|
|
|
v2g.reverse()
|
|
|
|
for _ in range(vnum):
|
|
|
|
try:
|
|
|
|
v1n = int(v1g.pop())
|
|
|
|
except IndexError:
|
|
|
|
while v2g:
|
|
|
|
v2n = int(v2g.pop())
|
|
|
|
if v2n > 0:
|
|
|
|
return -1
|
|
|
|
break
|
|
|
|
|
|
|
|
try:
|
|
|
|
v2n = int(v2g.pop())
|
|
|
|
except IndexError:
|
|
|
|
if v1n > 0:
|
|
|
|
return 1
|
|
|
|
while v1g:
|
|
|
|
v1n = int(v1g.pop())
|
|
|
|
if v1n > 0:
|
2017-11-08 00:17:15 +01:00
|
|
|
return 1
|
2017-07-14 19:00:52 +02:00
|
|
|
break
|
|
|
|
|
|
|
|
if v1n > v2n:
|
|
|
|
return 1
|
|
|
|
if v1n < v2n:
|
|
|
|
return -1
|
|
|
|
return 0
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2018-07-24 18:20:08 +02:00
|
|
|
def interface_set_status(node, ifacename, ifaceaction=False, vrf_name=None):
|
|
|
|
if ifaceaction:
|
|
|
|
str_ifaceaction = "no shutdown"
|
|
|
|
else:
|
|
|
|
str_ifaceaction = "shutdown"
|
|
|
|
if vrf_name == None:
|
|
|
|
cmd = 'vtysh -c "configure terminal" -c "interface {0}" -c "{1}"'.format(
|
|
|
|
ifacename, str_ifaceaction
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
cmd = (
|
|
|
|
'vtysh -c "configure terminal" -c "interface {0} vrf {1}" -c "{2}"'.format(
|
|
|
|
ifacename, vrf_name, str_ifaceaction
|
2020-12-17 21:32:11 +01:00
|
|
|
)
|
2018-07-24 18:20:08 +02:00
|
|
|
)
|
|
|
|
node.run(cmd)
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2018-07-20 16:22:38 +02:00
|
|
|
def ip4_route_zebra(node, vrf_name=None):
|
|
|
|
"""
|
|
|
|
Gets an output of 'show ip route' command. It can be used
|
|
|
|
with comparing the output to a reference
|
|
|
|
"""
|
|
|
|
if vrf_name == None:
|
|
|
|
tmp = node.vtysh_cmd("show ip route")
|
|
|
|
else:
|
|
|
|
tmp = node.vtysh_cmd("show ip route vrf {0}".format(vrf_name))
|
|
|
|
output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
|
2018-08-25 18:46:46 +02:00
|
|
|
|
|
|
|
lines = output.splitlines()
|
|
|
|
header_found = False
|
2018-08-27 18:48:45 +02:00
|
|
|
while lines and (not lines[0].strip() or not header_found):
|
2020-09-18 21:47:27 +02:00
|
|
|
if "o - offload failure" in lines[0]:
|
2018-08-25 18:46:46 +02:00
|
|
|
header_found = True
|
|
|
|
lines = lines[1:]
|
|
|
|
return "\n".join(lines)
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2018-07-20 16:22:38 +02:00
|
|
|
|
2019-06-06 17:49:15 +02:00
|
|
|
def ip6_route_zebra(node, vrf_name=None):
|
|
|
|
"""
|
|
|
|
Retrieves the output of 'show ipv6 route [vrf vrf_name]', then
|
|
|
|
canonicalizes it by eliding link-locals.
|
|
|
|
"""
|
|
|
|
|
|
|
|
if vrf_name == None:
|
|
|
|
tmp = node.vtysh_cmd("show ipv6 route")
|
|
|
|
else:
|
|
|
|
tmp = node.vtysh_cmd("show ipv6 route vrf {0}".format(vrf_name))
|
|
|
|
|
|
|
|
# Mask out timestamp
|
|
|
|
output = re.sub(r" [0-2][0-9]:[0-5][0-9]:[0-5][0-9]", " XX:XX:XX", tmp)
|
|
|
|
|
|
|
|
# Mask out the link-local addresses
|
|
|
|
output = re.sub(r"fe80::[^ ]+,", "fe80::XXXX:XXXX:XXXX:XXXX,", output)
|
|
|
|
|
|
|
|
lines = output.splitlines()
|
|
|
|
header_found = False
|
|
|
|
while lines and (not lines[0].strip() or not header_found):
|
2020-09-18 21:47:27 +02:00
|
|
|
if "o - offload failure" in lines[0]:
|
2019-06-06 17:49:15 +02:00
|
|
|
header_found = True
|
|
|
|
lines = lines[1:]
|
|
|
|
|
|
|
|
return "\n".join(lines)
|
|
|
|
|
|
|
|
|
2018-08-29 16:35:51 +02:00
|
|
|
def proto_name_to_number(protocol):
|
|
|
|
return {
|
|
|
|
"bgp": "186",
|
|
|
|
"isis": "187",
|
|
|
|
"ospf": "188",
|
|
|
|
"rip": "189",
|
|
|
|
"ripng": "190",
|
|
|
|
"nhrp": "191",
|
|
|
|
"eigrp": "192",
|
|
|
|
"ldp": "193",
|
|
|
|
"sharp": "194",
|
|
|
|
"pbr": "195",
|
|
|
|
"static": "196",
|
|
|
|
}.get(
|
|
|
|
protocol, protocol
|
|
|
|
) # default return same as input
|
|
|
|
|
|
|
|
|
2017-07-03 20:57:20 +02:00
|
|
|
def ip4_route(node):
|
|
|
|
"""
|
|
|
|
Gets a structured return of the command 'ip route'. It can be used in
|
|
|
|
conjuction with json_cmp() to provide accurate assert explanations.
|
|
|
|
|
|
|
|
Return example:
|
|
|
|
{
|
|
|
|
'10.0.1.0/24': {
|
|
|
|
'dev': 'eth0',
|
|
|
|
'via': '172.16.0.1',
|
|
|
|
'proto': '188',
|
|
|
|
},
|
|
|
|
'10.0.2.0/24': {
|
|
|
|
'dev': 'eth1',
|
|
|
|
'proto': 'kernel',
|
|
|
|
}
|
|
|
|
}
|
|
|
|
"""
|
|
|
|
output = normalize_text(node.run("ip route")).splitlines()
|
|
|
|
result = {}
|
|
|
|
for line in output:
|
|
|
|
columns = line.split(" ")
|
|
|
|
route = result[columns[0]] = {}
|
|
|
|
prev = None
|
|
|
|
for column in columns:
|
|
|
|
if prev == "dev":
|
|
|
|
route["dev"] = column
|
|
|
|
if prev == "via":
|
|
|
|
route["via"] = column
|
|
|
|
if prev == "proto":
|
2018-08-29 16:35:51 +02:00
|
|
|
# translate protocol names back to numbers
|
|
|
|
route["proto"] = proto_name_to_number(column)
|
2017-07-03 20:57:20 +02:00
|
|
|
if prev == "metric":
|
|
|
|
route["metric"] = column
|
|
|
|
if prev == "scope":
|
|
|
|
route["scope"] = column
|
|
|
|
prev = column
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2020-09-01 09:56:44 +02:00
|
|
|
def ip4_vrf_route(node):
|
|
|
|
"""
|
|
|
|
Gets a structured return of the command 'ip route show vrf {0}-cust1'.
|
|
|
|
It can be used in conjuction with json_cmp() to provide accurate assert explanations.
|
|
|
|
|
|
|
|
Return example:
|
|
|
|
{
|
|
|
|
'10.0.1.0/24': {
|
|
|
|
'dev': 'eth0',
|
|
|
|
'via': '172.16.0.1',
|
|
|
|
'proto': '188',
|
|
|
|
},
|
|
|
|
'10.0.2.0/24': {
|
|
|
|
'dev': 'eth1',
|
|
|
|
'proto': 'kernel',
|
|
|
|
}
|
|
|
|
}
|
|
|
|
"""
|
|
|
|
output = normalize_text(
|
|
|
|
node.run("ip route show vrf {0}-cust1".format(node.name))
|
|
|
|
).splitlines()
|
|
|
|
|
|
|
|
result = {}
|
|
|
|
for line in output:
|
|
|
|
columns = line.split(" ")
|
|
|
|
route = result[columns[0]] = {}
|
|
|
|
prev = None
|
|
|
|
for column in columns:
|
|
|
|
if prev == "dev":
|
|
|
|
route["dev"] = column
|
|
|
|
if prev == "via":
|
|
|
|
route["via"] = column
|
|
|
|
if prev == "proto":
|
|
|
|
# translate protocol names back to numbers
|
|
|
|
route["proto"] = proto_name_to_number(column)
|
|
|
|
if prev == "metric":
|
|
|
|
route["metric"] = column
|
|
|
|
if prev == "scope":
|
|
|
|
route["scope"] = column
|
|
|
|
prev = column
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2017-07-03 20:57:20 +02:00
|
|
|
def ip6_route(node):
|
|
|
|
"""
|
|
|
|
Gets a structured return of the command 'ip -6 route'. It can be used in
|
|
|
|
conjuction with json_cmp() to provide accurate assert explanations.
|
|
|
|
|
|
|
|
Return example:
|
|
|
|
{
|
|
|
|
'2001:db8:1::/64': {
|
|
|
|
'dev': 'eth0',
|
|
|
|
'proto': '188',
|
|
|
|
},
|
|
|
|
'2001:db8:2::/64': {
|
|
|
|
'dev': 'eth1',
|
|
|
|
'proto': 'kernel',
|
|
|
|
}
|
|
|
|
}
|
|
|
|
"""
|
|
|
|
output = normalize_text(node.run("ip -6 route")).splitlines()
|
|
|
|
result = {}
|
|
|
|
for line in output:
|
|
|
|
columns = line.split(" ")
|
|
|
|
route = result[columns[0]] = {}
|
|
|
|
prev = None
|
|
|
|
for column in columns:
|
|
|
|
if prev == "dev":
|
|
|
|
route["dev"] = column
|
|
|
|
if prev == "via":
|
|
|
|
route["via"] = column
|
|
|
|
if prev == "proto":
|
2018-08-29 16:35:51 +02:00
|
|
|
# translate protocol names back to numbers
|
|
|
|
route["proto"] = proto_name_to_number(column)
|
2017-07-03 20:57:20 +02:00
|
|
|
if prev == "metric":
|
|
|
|
route["metric"] = column
|
|
|
|
if prev == "pref":
|
|
|
|
route["pref"] = column
|
|
|
|
prev = column
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2020-09-01 09:56:44 +02:00
|
|
|
def ip6_vrf_route(node):
|
|
|
|
"""
|
|
|
|
Gets a structured return of the command 'ip -6 route show vrf {0}-cust1'.
|
|
|
|
It can be used in conjuction with json_cmp() to provide accurate assert explanations.
|
|
|
|
|
|
|
|
Return example:
|
|
|
|
{
|
|
|
|
'2001:db8:1::/64': {
|
|
|
|
'dev': 'eth0',
|
|
|
|
'proto': '188',
|
|
|
|
},
|
|
|
|
'2001:db8:2::/64': {
|
|
|
|
'dev': 'eth1',
|
|
|
|
'proto': 'kernel',
|
|
|
|
}
|
|
|
|
}
|
|
|
|
"""
|
|
|
|
output = normalize_text(
|
|
|
|
node.run("ip -6 route show vrf {0}-cust1".format(node.name))
|
|
|
|
).splitlines()
|
|
|
|
result = {}
|
|
|
|
for line in output:
|
|
|
|
columns = line.split(" ")
|
|
|
|
route = result[columns[0]] = {}
|
|
|
|
prev = None
|
|
|
|
for column in columns:
|
|
|
|
if prev == "dev":
|
|
|
|
route["dev"] = column
|
|
|
|
if prev == "via":
|
|
|
|
route["via"] = column
|
|
|
|
if prev == "proto":
|
|
|
|
# translate protocol names back to numbers
|
|
|
|
route["proto"] = proto_name_to_number(column)
|
|
|
|
if prev == "metric":
|
|
|
|
route["metric"] = column
|
|
|
|
if prev == "pref":
|
|
|
|
route["pref"] = column
|
|
|
|
prev = column
|
|
|
|
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2020-05-26 18:24:16 +02:00
|
|
|
def ip_rules(node):
|
|
|
|
"""
|
|
|
|
Gets a structured return of the command 'ip rule'. It can be used in
|
|
|
|
conjuction with json_cmp() to provide accurate assert explanations.
|
|
|
|
|
|
|
|
Return example:
|
|
|
|
[
|
|
|
|
{
|
|
|
|
"pref": "0"
|
|
|
|
"from": "all"
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"pref": "32766"
|
|
|
|
"from": "all"
|
|
|
|
},
|
|
|
|
{
|
|
|
|
"to": "3.4.5.0/24",
|
|
|
|
"iif": "r1-eth2",
|
|
|
|
"pref": "304",
|
|
|
|
"from": "1.2.0.0/16",
|
|
|
|
"proto": "zebra"
|
|
|
|
}
|
|
|
|
]
|
|
|
|
"""
|
|
|
|
output = normalize_text(node.run("ip rule")).splitlines()
|
|
|
|
result = []
|
|
|
|
for line in output:
|
|
|
|
columns = line.split(" ")
|
|
|
|
|
|
|
|
route = {}
|
|
|
|
# remove last character, since it is ':'
|
|
|
|
pref = columns[0][:-1]
|
|
|
|
route["pref"] = pref
|
|
|
|
prev = None
|
|
|
|
for column in columns:
|
|
|
|
if prev == "from":
|
|
|
|
route["from"] = column
|
|
|
|
if prev == "to":
|
|
|
|
route["to"] = column
|
|
|
|
if prev == "proto":
|
|
|
|
route["proto"] = column
|
|
|
|
if prev == "iif":
|
|
|
|
route["iif"] = column
|
|
|
|
if prev == "fwmark":
|
|
|
|
route["fwmark"] = column
|
|
|
|
prev = column
|
|
|
|
|
|
|
|
result.append(route)
|
|
|
|
return result
|
|
|
|
|
|
|
|
|
2017-07-24 16:53:19 +02:00
|
|
|
def sleep(amount, reason=None):
|
|
|
|
"""
|
|
|
|
Sleep wrapper that registers in the log the amount of sleep
|
|
|
|
"""
|
|
|
|
if reason is None:
|
|
|
|
logger.info("Sleeping for {} seconds".format(amount))
|
|
|
|
else:
|
|
|
|
logger.info(reason + " ({} seconds)".format(amount))
|
|
|
|
|
|
|
|
time.sleep(amount)
|
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2020-11-23 04:06:07 +01:00
|
|
|
def checkAddressSanitizerError(output, router, component, logdir=""):
|
2017-05-20 11:24:11 +02:00
|
|
|
"Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise"
|
|
|
|
|
2020-11-23 04:06:07 +01:00
|
|
|
def processAddressSanitizerError(asanErrorRe, output, router, component):
|
2017-05-20 11:24:11 +02:00
|
|
|
sys.stderr.write(
|
|
|
|
"%s: %s triggered an exception by AddressSanitizer\n" % (router, component)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2017-05-20 11:24:11 +02:00
|
|
|
# Sanitizer Error found in log
|
2020-11-23 04:06:07 +01:00
|
|
|
pidMark = asanErrorRe.group(1)
|
2020-12-02 17:08:03 +01:00
|
|
|
addressSanitizerLog = re.search(
|
2017-05-20 11:24:11 +02:00
|
|
|
"%s(.*)%s" % (pidMark, pidMark), output, re.DOTALL
|
|
|
|
)
|
2020-12-02 17:08:03 +01:00
|
|
|
if addressSanitizerLog:
|
2020-11-23 04:06:07 +01:00
|
|
|
# Find Calling Test. Could be multiple steps back
|
|
|
|
testframe = sys._current_frames().values()[0]
|
|
|
|
level = 0
|
|
|
|
while level < 10:
|
|
|
|
test = os.path.splitext(
|
|
|
|
os.path.basename(testframe.f_globals["__file__"])
|
|
|
|
)[0]
|
|
|
|
if (test != "topotest") and (test != "topogen"):
|
|
|
|
# Found the calling test
|
|
|
|
callingTest = os.path.basename(testframe.f_globals["__file__"])
|
|
|
|
break
|
|
|
|
level = level + 1
|
|
|
|
testframe = testframe.f_back
|
|
|
|
if level >= 10:
|
|
|
|
# somehow couldn't find the test script.
|
|
|
|
callingTest = "unknownTest"
|
|
|
|
#
|
|
|
|
# Now finding Calling Procedure
|
|
|
|
level = 0
|
|
|
|
while level < 20:
|
|
|
|
callingProc = sys._getframe(level).f_code.co_name
|
|
|
|
if (
|
|
|
|
(callingProc != "processAddressSanitizerError")
|
|
|
|
and (callingProc != "checkAddressSanitizerError")
|
|
|
|
and (callingProc != "checkRouterCores")
|
|
|
|
and (callingProc != "stopRouter")
|
|
|
|
and (callingProc != "stop")
|
|
|
|
and (callingProc != "stop_topology")
|
|
|
|
and (callingProc != "checkRouterRunning")
|
|
|
|
and (callingProc != "check_router_running")
|
|
|
|
and (callingProc != "routers_have_failure")
|
|
|
|
):
|
|
|
|
# Found the calling test
|
|
|
|
break
|
|
|
|
level = level + 1
|
|
|
|
if level >= 20:
|
|
|
|
# something wrong - couldn't found the calling test function
|
|
|
|
callingProc = "unknownProc"
|
2017-05-20 11:24:11 +02:00
|
|
|
with open("/tmp/AddressSanitzer.txt", "a") as addrSanFile:
|
2020-11-23 04:06:07 +01:00
|
|
|
sys.stderr.write(
|
|
|
|
"AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
|
|
|
|
% (callingTest, callingProc, router)
|
|
|
|
)
|
2017-05-20 11:24:11 +02:00
|
|
|
sys.stderr.write(
|
2020-12-02 17:08:03 +01:00
|
|
|
"\n".join(addressSanitizerLog.group(1).splitlines()) + "\n"
|
2017-05-20 11:24:11 +02:00
|
|
|
)
|
2020-11-23 04:06:07 +01:00
|
|
|
addrSanFile.write("## Error: %s\n\n" % asanErrorRe.group(2))
|
2017-05-20 11:24:11 +02:00
|
|
|
addrSanFile.write(
|
|
|
|
"### AddressSanitizer error in topotest `%s`, test `%s`, router `%s`\n\n"
|
|
|
|
% (callingTest, callingProc, router)
|
|
|
|
)
|
|
|
|
addrSanFile.write(
|
2020-04-03 13:05:24 +02:00
|
|
|
" "
|
2020-12-02 17:08:03 +01:00
|
|
|
+ "\n ".join(addressSanitizerLog.group(1).splitlines())
|
2017-05-20 11:24:11 +02:00
|
|
|
+ "\n"
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2017-05-20 11:24:11 +02:00
|
|
|
addrSanFile.write("\n---------------\n")
|
2020-11-23 04:06:07 +01:00
|
|
|
return
|
|
|
|
|
2020-12-02 17:08:03 +01:00
|
|
|
addressSanitizerError = re.search(
|
2021-07-27 01:23:20 +02:00
|
|
|
r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", output
|
2020-11-23 04:06:07 +01:00
|
|
|
)
|
2020-12-02 17:08:03 +01:00
|
|
|
if addressSanitizerError:
|
|
|
|
processAddressSanitizerError(addressSanitizerError, output, router, component)
|
2017-05-20 11:24:11 +02:00
|
|
|
return True
|
2020-11-23 04:06:07 +01:00
|
|
|
|
|
|
|
# No Address Sanitizer Error in Output. Now check for AddressSanitizer daemon file
|
|
|
|
if logdir:
|
|
|
|
filepattern = logdir + "/" + router + "/" + component + ".asan.*"
|
2020-12-02 17:08:38 +01:00
|
|
|
logger.debug(
|
|
|
|
"Log check for %s on %s, pattern %s\n" % (component, router, filepattern)
|
2020-12-17 21:32:11 +01:00
|
|
|
)
|
2020-11-23 04:06:07 +01:00
|
|
|
for file in glob.glob(filepattern):
|
|
|
|
with open(file, "r") as asanErrorFile:
|
|
|
|
asanError = asanErrorFile.read()
|
2020-12-02 17:08:03 +01:00
|
|
|
addressSanitizerError = re.search(
|
2021-07-27 01:23:20 +02:00
|
|
|
r"(==[0-9]+==)ERROR: AddressSanitizer: ([^\s]*) ", asanError
|
2020-11-23 04:06:07 +01:00
|
|
|
)
|
2020-12-02 17:08:03 +01:00
|
|
|
if addressSanitizerError:
|
|
|
|
processAddressSanitizerError(
|
|
|
|
addressSanitizerError, asanError, router, component
|
|
|
|
)
|
2020-11-23 04:06:07 +01:00
|
|
|
return True
|
2017-07-07 14:57:07 +02:00
|
|
|
return False
|
2017-05-20 11:24:11 +02:00
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
def _sysctl_atleast(commander, variable, min_value):
|
|
|
|
if isinstance(min_value, tuple):
|
|
|
|
min_value = list(min_value)
|
|
|
|
is_list = isinstance(min_value, list)
|
2017-01-30 22:50:48 +01:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
sval = commander.cmd_raises("sysctl -n " + variable).strip()
|
|
|
|
if is_list:
|
|
|
|
cur_val = [int(x) for x in sval.split()]
|
|
|
|
else:
|
|
|
|
cur_val = int(sval)
|
|
|
|
|
|
|
|
set_value = False
|
|
|
|
if is_list:
|
|
|
|
for i, v in enumerate(cur_val):
|
|
|
|
if v < min_value[i]:
|
|
|
|
set_value = True
|
|
|
|
else:
|
|
|
|
min_value[i] = v
|
|
|
|
else:
|
|
|
|
if cur_val < min_value:
|
|
|
|
set_value = True
|
|
|
|
if set_value:
|
|
|
|
if is_list:
|
|
|
|
valstr = " ".join([str(x) for x in min_value])
|
|
|
|
else:
|
|
|
|
valstr = str(min_value)
|
|
|
|
logger.info("Increasing sysctl %s from %s to %s", variable, cur_val, valstr)
|
|
|
|
commander.cmd_raises("sysctl -w {}=\"{}\"\n".format(variable, valstr))
|
2017-01-30 22:50:48 +01:00
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
def _sysctl_assure(commander, variable, value):
|
|
|
|
if isinstance(value, tuple):
|
|
|
|
value = list(value)
|
|
|
|
is_list = isinstance(value, list)
|
2017-06-14 15:30:10 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
sval = commander.cmd_raises("sysctl -n " + variable).strip()
|
|
|
|
if is_list:
|
|
|
|
cur_val = [int(x) for x in sval.split()]
|
|
|
|
else:
|
|
|
|
cur_val = sval
|
2017-06-14 15:30:10 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
set_value = False
|
|
|
|
if is_list:
|
|
|
|
for i, v in enumerate(cur_val):
|
|
|
|
if v != value[i]:
|
|
|
|
set_value = True
|
|
|
|
else:
|
|
|
|
value[i] = v
|
|
|
|
else:
|
|
|
|
if cur_val != str(value):
|
|
|
|
set_value = True
|
|
|
|
|
|
|
|
if set_value:
|
|
|
|
if is_list:
|
|
|
|
valstr = " ".join([str(x) for x in value])
|
|
|
|
else:
|
|
|
|
valstr = str(value)
|
|
|
|
logger.info("Changing sysctl %s from %s to %s", variable, cur_val, valstr)
|
|
|
|
commander.cmd_raises("sysctl -w {}=\"{}\"\n".format(variable, valstr))
|
|
|
|
|
|
|
|
|
|
|
|
def sysctl_atleast(commander, variable, min_value, raises=False):
|
|
|
|
try:
|
|
|
|
if commander is None:
|
|
|
|
commander = micronet.Commander("topotest")
|
|
|
|
return _sysctl_atleast(commander, variable, min_value)
|
|
|
|
except subprocess.CalledProcessError as error:
|
|
|
|
logger.warning(
|
|
|
|
"%s: Failed to assure sysctl min value %s = %s",
|
|
|
|
commander, variable, min_value
|
|
|
|
)
|
|
|
|
if raises:
|
|
|
|
raise
|
2017-06-14 15:30:10 +02:00
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
def sysctl_assure(commander, variable, value, raises=False):
|
|
|
|
try:
|
|
|
|
if commander is None:
|
|
|
|
commander = micronet.Commander("topotest")
|
|
|
|
return _sysctl_assure(commander, variable, value)
|
|
|
|
except subprocess.CalledProcessError as error:
|
|
|
|
logger.warning(
|
|
|
|
"%s: Failed to assure sysctl value %s = %s",
|
|
|
|
commander, variable, value, exc_info=True
|
|
|
|
)
|
|
|
|
if raises:
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
|
def rlimit_atleast(rname, min_value, raises=False):
|
|
|
|
try:
|
|
|
|
cval = resource.getrlimit(rname)
|
|
|
|
soft, hard = cval
|
|
|
|
if soft < min_value:
|
|
|
|
nval = (min_value, hard if min_value < hard else min_value)
|
|
|
|
logger.info("Increasing rlimit %s from %s to %s", rname, cval, nval)
|
|
|
|
resource.setrlimit(rname, nval)
|
|
|
|
except subprocess.CalledProcessError as error:
|
|
|
|
logger.warning(
|
|
|
|
"Failed to assure rlimit [%s] = %s",
|
|
|
|
rname, min_value, exc_info=True
|
|
|
|
)
|
|
|
|
if raises:
|
|
|
|
raise
|
|
|
|
|
|
|
|
|
|
|
|
def fix_netns_limits(ns):
|
|
|
|
|
|
|
|
# Maximum read and write socket buffer sizes
|
|
|
|
sysctl_atleast(ns, "net.ipv4.tcp_rmem", [10*1024, 87380, 16*2**20])
|
|
|
|
sysctl_atleast(ns, "net.ipv4.tcp_wmem", [10*1024, 87380, 16*2**20])
|
|
|
|
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.all.rp_filter", 0)
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.default.rp_filter", 0)
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.lo.rp_filter", 0)
|
|
|
|
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.all.forwarding", 1)
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.default.forwarding", 1)
|
|
|
|
|
|
|
|
# XXX if things fail look here as this wasn't done previously
|
|
|
|
sysctl_assure(ns, "net.ipv6.conf.all.forwarding", 1)
|
|
|
|
sysctl_assure(ns, "net.ipv6.conf.default.forwarding", 1)
|
|
|
|
|
|
|
|
# ARP
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.default.arp_announce", 2)
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.default.arp_notify", 1)
|
|
|
|
# Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.default.arp_ignore", 0)
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.all.arp_announce", 2)
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.all.arp_notify", 1)
|
|
|
|
# Setting this to 1 breaks topotests that rely on lo addresses being proxy arp'd for
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.all.arp_ignore", 0)
|
|
|
|
|
|
|
|
sysctl_assure(ns, "net.ipv4.icmp_errors_use_inbound_ifaddr", 1)
|
|
|
|
|
|
|
|
# Keep ipv6 permanent addresses on an admin down
|
|
|
|
sysctl_assure(ns, "net.ipv6.conf.all.keep_addr_on_down", 1)
|
|
|
|
if version_cmp(platform.release(), "4.20") >= 0:
|
|
|
|
sysctl_assure(ns, "net.ipv6.route.skip_notify_on_dev_down", 1)
|
|
|
|
|
|
|
|
sysctl_assure(ns, "net.ipv4.conf.all.ignore_routes_with_linkdown", 1)
|
|
|
|
sysctl_assure(ns, "net.ipv6.conf.all.ignore_routes_with_linkdown", 1)
|
|
|
|
|
|
|
|
# igmp
|
|
|
|
sysctl_atleast(ns, "net.ipv4.igmp_max_memberships", 1000)
|
|
|
|
|
|
|
|
# Use neigh information on selection of nexthop for multipath hops
|
|
|
|
sysctl_assure(ns, "net.ipv4.fib_multipath_use_neigh", 1)
|
|
|
|
|
|
|
|
|
|
|
|
def fix_host_limits():
|
|
|
|
"""Increase system limits."""
|
|
|
|
|
|
|
|
rlimit_atleast(resource.RLIMIT_NPROC, 8*1024)
|
|
|
|
rlimit_atleast(resource.RLIMIT_NOFILE, 16*1024)
|
|
|
|
sysctl_atleast(None, "fs.file-max", 16*1024)
|
|
|
|
sysctl_atleast(None, "kernel.pty.max", 16*1024)
|
|
|
|
|
|
|
|
# Enable coredumps
|
|
|
|
# Original on ubuntu 17.x, but apport won't save as in namespace
|
|
|
|
# |/usr/share/apport/apport %p %s %c %d %P
|
|
|
|
sysctl_assure(None, "kernel.core_pattern", "%e_core-sig_%s-pid_%p.dmp")
|
|
|
|
sysctl_assure(None, "kernel.core_uses_pid", 1)
|
|
|
|
sysctl_assure(None, "fs.suid_dumpable", 1)
|
|
|
|
|
|
|
|
# Maximum connection backlog
|
|
|
|
sysctl_atleast(None, "net.core.netdev_max_backlog", 4*1024)
|
|
|
|
|
|
|
|
# Maximum read and write socket buffer sizes
|
|
|
|
sysctl_atleast(None, "net.core.rmem_max", 16 * 2**20)
|
|
|
|
sysctl_atleast(None, "net.core.wmem_max", 16 * 2**20)
|
|
|
|
|
|
|
|
# Garbage Collection Settings for ARP and Neighbors
|
|
|
|
sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh2", 4*1024)
|
|
|
|
sysctl_atleast(None, "net.ipv4.neigh.default.gc_thresh3", 8*1024)
|
|
|
|
sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh2", 4*1024)
|
|
|
|
sysctl_atleast(None, "net.ipv6.neigh.default.gc_thresh3", 8*1024)
|
|
|
|
# Hold entries for 10 minutes
|
|
|
|
sysctl_assure(None, "net.ipv4.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
|
|
|
|
sysctl_assure(None, "net.ipv6.neigh.default.base_reachable_time_ms", 10 * 60 * 1000)
|
|
|
|
|
|
|
|
# igmp
|
|
|
|
sysctl_assure(None, "net.ipv4.neigh.default.mcast_solicit", 10)
|
|
|
|
|
|
|
|
# MLD
|
|
|
|
sysctl_atleast(None, "net.ipv6.mld_max_msf", 512)
|
|
|
|
|
|
|
|
# Increase routing table size to 128K
|
|
|
|
sysctl_atleast(None, "net.ipv4.route.max_size", 128*1024)
|
|
|
|
sysctl_atleast(None, "net.ipv6.route.max_size", 128*1024)
|
|
|
|
|
|
|
|
|
|
|
|
def setup_node_tmpdir(logdir, name):
|
|
|
|
# Cleanup old log, valgrind, and core files.
|
|
|
|
subprocess.check_call(
|
|
|
|
"rm -rf {0}/{1}.valgrind.* {1}.*.asan {0}/{1}/".format(
|
|
|
|
logdir, name
|
|
|
|
),
|
|
|
|
shell=True
|
|
|
|
)
|
|
|
|
|
|
|
|
# Setup the per node directory.
|
|
|
|
nodelogdir = "{}/{}".format(logdir, name)
|
|
|
|
subprocess.check_call("mkdir -p {0} && chmod 1777 {0}".format(nodelogdir), shell=True)
|
|
|
|
logfile = "{0}/{1}.log".format(logdir, name)
|
|
|
|
return logfile
|
2017-06-14 15:30:10 +02:00
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
|
|
|
|
class Router(Node):
|
2020-09-19 03:07:20 +02:00
|
|
|
"A Node with IPv4/IPv6 forwarding enabled"
|
2017-01-30 22:50:48 +01:00
|
|
|
|
2017-06-21 17:54:40 +02:00
|
|
|
def __init__(self, name, **params):
|
2019-03-25 16:08:26 +01:00
|
|
|
|
2019-06-11 18:53:13 +02:00
|
|
|
# Backward compatibility:
|
|
|
|
# Load configuration defaults like topogen.
|
|
|
|
self.config_defaults = configparser.ConfigParser(
|
2020-09-23 23:08:36 +02:00
|
|
|
defaults={
|
2019-06-11 18:53:13 +02:00
|
|
|
"verbosity": "info",
|
|
|
|
"frrdir": "/usr/lib/frr",
|
|
|
|
"routertype": "frr",
|
2020-09-23 23:08:36 +02:00
|
|
|
"memleak_path": "",
|
2019-06-11 18:53:13 +02:00
|
|
|
}
|
|
|
|
)
|
2021-07-27 01:23:20 +02:00
|
|
|
|
2019-06-11 18:53:13 +02:00
|
|
|
self.config_defaults.read(
|
|
|
|
os.path.join(os.path.dirname(os.path.realpath(__file__)), "../pytest.ini")
|
|
|
|
)
|
|
|
|
|
2019-03-25 16:08:26 +01:00
|
|
|
# If this topology is using old API and doesn't have logdir
|
|
|
|
# specified, then attempt to generate an unique logdir.
|
2021-07-27 01:23:20 +02:00
|
|
|
self.logdir = params.get("logdir")
|
2019-03-25 16:08:26 +01:00
|
|
|
if self.logdir is None:
|
2021-07-27 01:23:20 +02:00
|
|
|
self.logdir = get_logs_path(g_extra_config["rundir"])
|
|
|
|
|
|
|
|
if not params.get("logger"):
|
|
|
|
# If logger is present topogen has already set this up
|
|
|
|
logfile = setup_node_tmpdir(self.logdir, name)
|
|
|
|
l = topolog.get_logger(name, log_level="debug", target=logfile)
|
|
|
|
params["logger"] = l
|
|
|
|
|
|
|
|
super(Router, self).__init__(name, **params)
|
2019-03-25 16:08:26 +01:00
|
|
|
|
2017-06-21 17:54:40 +02:00
|
|
|
self.daemondir = None
|
2018-02-01 02:07:27 +01:00
|
|
|
self.hasmpls = False
|
2017-06-21 17:54:40 +02:00
|
|
|
self.routertype = "frr"
|
|
|
|
self.daemons = {
|
|
|
|
"zebra": 0,
|
|
|
|
"ripd": 0,
|
|
|
|
"ripngd": 0,
|
|
|
|
"ospfd": 0,
|
|
|
|
"ospf6d": 0,
|
|
|
|
"isisd": 0,
|
|
|
|
"bgpd": 0,
|
|
|
|
"pimd": 0,
|
2018-02-28 17:06:16 +01:00
|
|
|
"ldpd": 0,
|
|
|
|
"eigrpd": 0,
|
|
|
|
"nhrpd": 0,
|
|
|
|
"staticd": 0,
|
2018-12-17 02:46:10 +01:00
|
|
|
"bfdd": 0,
|
|
|
|
"sharpd": 0,
|
2020-05-24 01:44:04 +02:00
|
|
|
"babeld": 0,
|
2020-05-23 19:24:38 +02:00
|
|
|
"pbrd": 0,
|
2020-11-11 15:34:32 +01:00
|
|
|
"pathd": 0,
|
|
|
|
"snmpd": 0,
|
2018-12-17 02:46:10 +01:00
|
|
|
}
|
2018-01-31 11:48:11 +01:00
|
|
|
self.daemons_options = {"zebra": ""}
|
2018-05-10 13:54:38 +02:00
|
|
|
self.reportCores = True
|
2018-06-30 21:18:33 +02:00
|
|
|
self.version = None
|
2017-06-21 17:54:40 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
self.ns_cmd = "sudo nsenter -a -t {} ".format(self.pid)
|
2021-07-09 11:22:51 +02:00
|
|
|
try:
|
|
|
|
# Allow escaping from running inside docker
|
|
|
|
cgroup = open("/proc/1/cgroup").read()
|
|
|
|
m = re.search("[0-9]+:cpuset:/docker/([a-f0-9]+)", cgroup)
|
|
|
|
if m:
|
|
|
|
self.ns_cmd = "docker exec -it {} ".format(m.group(1)) + self.ns_cmd
|
|
|
|
except IOError:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
logger.debug("CMD to enter {}: {}".format(self.name, self.ns_cmd))
|
|
|
|
|
2017-06-27 23:11:02 +02:00
|
|
|
def _config_frr(self, **params):
|
|
|
|
"Configure FRR binaries"
|
|
|
|
self.daemondir = params.get("frrdir")
|
|
|
|
if self.daemondir is None:
|
2019-06-11 18:53:13 +02:00
|
|
|
self.daemondir = self.config_defaults.get("topogen", "frrdir")
|
2017-06-27 23:11:02 +02:00
|
|
|
|
|
|
|
zebra_path = os.path.join(self.daemondir, "zebra")
|
|
|
|
if not os.path.isfile(zebra_path):
|
|
|
|
raise Exception("FRR zebra binary doesn't exist at {}".format(zebra_path))
|
|
|
|
|
2017-06-21 17:54:40 +02:00
|
|
|
# pylint: disable=W0221
|
|
|
|
# Some params are only meaningful for the parent class.
|
2017-01-30 22:50:48 +01:00
|
|
|
def config(self, **params):
|
|
|
|
super(Router, self).config(**params)
|
|
|
|
|
2017-06-21 17:54:40 +02:00
|
|
|
# User did not specify the daemons directory, try to autodetect it.
|
|
|
|
self.daemondir = params.get("daemondir")
|
|
|
|
if self.daemondir is None:
|
2019-06-11 18:53:13 +02:00
|
|
|
self.routertype = params.get(
|
|
|
|
"routertype", self.config_defaults.get("topogen", "routertype")
|
|
|
|
)
|
2020-09-19 03:07:20 +02:00
|
|
|
self._config_frr(**params)
|
2017-01-30 22:50:48 +01:00
|
|
|
else:
|
2017-06-21 17:54:40 +02:00
|
|
|
# Test the provided path
|
|
|
|
zpath = os.path.join(self.daemondir, "zebra")
|
|
|
|
if not os.path.isfile(zpath):
|
|
|
|
raise Exception("No zebra binary found in {}".format(zpath))
|
|
|
|
# Allow user to specify routertype when the path was specified.
|
|
|
|
if params.get("routertype") is not None:
|
2019-06-11 18:53:13 +02:00
|
|
|
self.routertype = params.get("routertype")
|
2017-06-21 17:54:40 +02:00
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
self.cmd("ulimit -c unlimited")
|
|
|
|
# Set ownership of config files
|
2017-06-21 17:54:40 +02:00
|
|
|
self.cmd("chown {0}:{0}vty /etc/{0}".format(self.routertype))
|
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
def terminate(self):
|
2020-08-28 14:38:34 +02:00
|
|
|
# Stop running FRR daemons
|
2017-02-01 16:50:13 +01:00
|
|
|
self.stopRouter()
|
2017-01-30 22:50:48 +01:00
|
|
|
super(Router, self).terminate()
|
2021-07-27 01:23:20 +02:00
|
|
|
os.system("chmod -R go+rw " + self.logdir)
|
2018-05-22 13:44:51 +02:00
|
|
|
|
2020-07-03 19:22:42 +02:00
|
|
|
# Return count of running daemons
|
2020-07-15 18:48:18 +02:00
|
|
|
def listDaemons(self):
|
|
|
|
ret = []
|
2021-07-27 01:23:20 +02:00
|
|
|
rc, stdout, _ = self.cmd_status("ls -1 /var/run/%s/*.pid" % self.routertype, warn=False)
|
|
|
|
if rc:
|
|
|
|
return ret
|
|
|
|
for d in stdout.strip().split("\n"):
|
|
|
|
pidfile = d.strip()
|
|
|
|
try:
|
|
|
|
pid = int(self.cmd_raises("cat %s" % pidfile, warn=False).strip())
|
|
|
|
name = os.path.basename(pidfile[:-4])
|
|
|
|
|
|
|
|
# probably not compatible with bsd.
|
|
|
|
rc, _, _ = self.cmd_status("test -d /proc/{}".format(pid), warn=False)
|
|
|
|
if rc:
|
|
|
|
logger.warning("%s: %s exited leaving pidfile %s (%s)", self.name, name, pidfile, pid)
|
|
|
|
self.cmd("rm -- " + pidfile)
|
|
|
|
else:
|
|
|
|
ret.append((name, pid))
|
|
|
|
except (subprocess.CalledProcessError, ValueError):
|
|
|
|
pass
|
2020-07-15 18:48:18 +02:00
|
|
|
return ret
|
2020-07-03 19:22:42 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
def stopRouter(self, assertOnError=True, minErrorVersion="5.1"):
|
2020-07-03 19:22:42 +02:00
|
|
|
# Stop Running FRR Daemons
|
2021-07-27 01:23:20 +02:00
|
|
|
running = self.listDaemons()
|
|
|
|
if not running:
|
|
|
|
return ""
|
|
|
|
|
|
|
|
logger.info("%s: stopping %s", self.name, ", ".join([x[0] for x in running]))
|
|
|
|
for name, pid in running:
|
|
|
|
logger.info("{}: sending SIGTERM to {}".format(self.name, name))
|
|
|
|
try:
|
|
|
|
os.kill(pid, signal.SIGTERM)
|
|
|
|
except OSError as err:
|
|
|
|
logger.info("%s: could not kill %s (%s): %s", self.name, name, pid, str(err))
|
|
|
|
|
|
|
|
running = self.listDaemons()
|
|
|
|
if running:
|
|
|
|
for _ in range(0, 5):
|
2020-07-15 18:48:18 +02:00
|
|
|
sleep(
|
2021-07-27 01:23:20 +02:00
|
|
|
0.5,
|
2020-07-15 18:48:18 +02:00
|
|
|
"{}: waiting for daemons stopping: {}".format(
|
2021-07-27 01:23:20 +02:00
|
|
|
self.name, ", ".join([x[0] for x in running])
|
2020-10-07 23:22:26 +02:00
|
|
|
),
|
|
|
|
)
|
2020-07-15 18:48:18 +02:00
|
|
|
running = self.listDaemons()
|
2021-07-27 01:23:20 +02:00
|
|
|
if not running:
|
|
|
|
break
|
2020-07-15 18:48:18 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
if not running:
|
|
|
|
return ""
|
2020-07-03 19:22:42 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
logger.warning("%s: sending SIGBUS to: %s", self.name, ", ".join([x[0] for x in running]))
|
|
|
|
for name, pid in running:
|
|
|
|
pidfile = "/var/run/{}/{}.pid".format(self.routertype, name)
|
|
|
|
logger.info("%s: killing %s", self.name, name)
|
|
|
|
self.cmd("kill -SIGBUS %d" % pid)
|
|
|
|
self.cmd("rm -- " + pidfile)
|
2020-07-03 19:22:42 +02:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
sleep(0.5, "%s: waiting for daemons to exit/core after initial SIGBUS" % self.name)
|
2020-07-15 18:48:18 +02:00
|
|
|
|
|
|
|
errors = self.checkRouterCores(reportOnce=True)
|
|
|
|
if self.checkRouterVersion("<", minErrorVersion):
|
|
|
|
# ignore errors in old versions
|
|
|
|
errors = ""
|
2021-07-27 01:23:20 +02:00
|
|
|
if assertOnError and (errors is not None) and len(errors) > 0:
|
2020-07-15 18:48:18 +02:00
|
|
|
assert "Errors found - details follow:" == 0, errors
|
2018-06-30 18:36:01 +02:00
|
|
|
return errors
|
2018-05-09 19:02:33 +02:00
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
def removeIPs(self):
|
|
|
|
for interface in self.intfNames():
|
2021-07-27 01:23:20 +02:00
|
|
|
try:
|
|
|
|
self.intf_ip_cmd(interface, "ip address flush " + interface)
|
|
|
|
except Exception as ex:
|
|
|
|
logger.error("%s can't remove IPs %s", self, str(ex))
|
|
|
|
# pdb.set_trace()
|
|
|
|
# assert False, "can't remove IPs %s" % str(ex)
|
2018-01-31 11:48:11 +01:00
|
|
|
|
|
|
|
def checkCapability(self, daemon, param):
|
|
|
|
if param is not None:
|
|
|
|
daemon_path = os.path.join(self.daemondir, daemon)
|
|
|
|
daemon_search_option = param.replace("-", "")
|
|
|
|
output = self.cmd(
|
|
|
|
"{0} -h | grep {1}".format(daemon_path, daemon_search_option)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2018-01-31 11:48:11 +01:00
|
|
|
if daemon_search_option not in output:
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
def loadConf(self, daemon, source=None, param=None):
|
2021-07-27 01:23:20 +02:00
|
|
|
# Unfortunately this API allowsfor source to not exist for any and all routers.
|
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
# print "Daemons before:", self.daemons
|
|
|
|
if daemon in self.daemons.keys():
|
|
|
|
self.daemons[daemon] = 1
|
2018-01-31 11:48:11 +01:00
|
|
|
if param is not None:
|
|
|
|
self.daemons_options[daemon] = param
|
2021-07-27 01:23:20 +02:00
|
|
|
conf_file = "/etc/{}/{}.conf".format(self.routertype, daemon)
|
|
|
|
if source is None or not os.path.exists(source):
|
|
|
|
self.cmd_raises("touch " + conf_file)
|
2017-01-30 22:50:48 +01:00
|
|
|
else:
|
2021-07-27 01:23:20 +02:00
|
|
|
self.cmd_raises("cp {} {}".format(source, conf_file))
|
|
|
|
self.cmd_raises("chown {0}:{0} {1}".format(self.routertype, conf_file))
|
|
|
|
self.cmd_raises("chmod 664 {}".format(conf_file))
|
2020-11-11 15:34:32 +01:00
|
|
|
if (daemon == "snmpd") and (self.routertype == "frr"):
|
2021-07-27 01:23:20 +02:00
|
|
|
# /etc/snmp is private mount now
|
2020-11-11 15:34:32 +01:00
|
|
|
self.cmd('echo "agentXSocket /etc/frr/agentx" > /etc/snmp/frr.conf')
|
2021-07-27 01:23:20 +02:00
|
|
|
self.cmd('echo "mibs +ALL" > /etc/snmp/snmp.conf')
|
|
|
|
|
2018-07-19 14:04:38 +02:00
|
|
|
if (daemon == "zebra") and (self.daemons["staticd"] == 0):
|
2018-07-19 02:41:24 +02:00
|
|
|
# Add staticd with zebra - if it exists
|
2021-07-27 01:23:20 +02:00
|
|
|
try:
|
|
|
|
staticd_path = os.path.join(self.daemondir, "staticd")
|
|
|
|
except:
|
|
|
|
pdb.set_trace()
|
|
|
|
|
2018-07-19 02:41:24 +02:00
|
|
|
if os.path.isfile(staticd_path):
|
|
|
|
self.daemons["staticd"] = 1
|
2018-07-19 14:04:38 +02:00
|
|
|
self.daemons_options["staticd"] = ""
|
|
|
|
# Auto-Started staticd has no config, so it will read from zebra config
|
2017-01-30 22:50:48 +01:00
|
|
|
else:
|
2017-11-08 19:23:14 +01:00
|
|
|
logger.info("No daemon {} known".format(daemon))
|
2017-01-30 22:50:48 +01:00
|
|
|
# print "Daemons after:", self.daemons
|
2018-05-09 17:11:47 +02:00
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
def runInWindow(self, cmd, title=None):
|
2021-07-27 01:23:20 +02:00
|
|
|
return self.run_in_window(cmd, title)
|
2021-03-04 03:56:46 +01:00
|
|
|
|
2018-02-06 21:23:46 +01:00
|
|
|
def startRouter(self, tgen=None):
|
2017-01-30 22:50:48 +01:00
|
|
|
# Disable integrated-vtysh-config
|
2017-04-07 02:38:48 +02:00
|
|
|
self.cmd(
|
|
|
|
'echo "no service integrated-vtysh-config" >> /etc/%s/vtysh.conf'
|
|
|
|
% self.routertype
|
|
|
|
)
|
2017-01-30 22:50:48 +01:00
|
|
|
self.cmd(
|
|
|
|
"chown %s:%svty /etc/%s/vtysh.conf"
|
|
|
|
% (self.routertype, self.routertype, self.routertype)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2017-07-07 15:18:25 +02:00
|
|
|
# TODO remove the following lines after all tests are migrated to Topogen.
|
2017-01-30 22:50:48 +01:00
|
|
|
# Try to find relevant old logfiles in /tmp and delete them
|
2018-05-09 17:11:47 +02:00
|
|
|
map(os.remove, glob.glob("{}/{}/*.log".format(self.logdir, self.name)))
|
2017-01-30 22:50:48 +01:00
|
|
|
# Remove old core files
|
2018-05-09 17:11:47 +02:00
|
|
|
map(os.remove, glob.glob("{}/{}/*.dmp".format(self.logdir, self.name)))
|
2017-01-30 22:50:48 +01:00
|
|
|
# Remove IP addresses from OS first - we have them in zebra.conf
|
|
|
|
self.removeIPs()
|
|
|
|
# If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
|
|
|
|
# No error - but return message and skip all the tests
|
|
|
|
if self.daemons["ldpd"] == 1:
|
2017-06-21 17:54:40 +02:00
|
|
|
ldpd_path = os.path.join(self.daemondir, "ldpd")
|
|
|
|
if not os.path.isfile(ldpd_path):
|
2017-11-08 19:23:14 +01:00
|
|
|
logger.info("LDP Test, but no ldpd compiled or installed")
|
2017-01-30 22:50:48 +01:00
|
|
|
return "LDP Test, but no ldpd compiled or installed"
|
2017-04-26 14:51:50 +02:00
|
|
|
|
2017-07-18 21:44:58 +02:00
|
|
|
if version_cmp(platform.release(), "4.5") < 0:
|
2017-11-08 19:23:14 +01:00
|
|
|
logger.info("LDP Test need Linux Kernel 4.5 minimum")
|
2017-07-18 21:44:58 +02:00
|
|
|
return "LDP Test need Linux Kernel 4.5 minimum"
|
2018-02-06 21:23:46 +01:00
|
|
|
# Check if have mpls
|
|
|
|
if tgen != None:
|
|
|
|
self.hasmpls = tgen.hasmpls
|
|
|
|
if self.hasmpls != True:
|
|
|
|
logger.info(
|
|
|
|
"LDP/MPLS Tests will be skipped, platform missing module(s)"
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
# Test for MPLS Kernel modules available
|
|
|
|
self.hasmpls = False
|
2018-03-23 20:36:56 +01:00
|
|
|
if not module_present("mpls-router"):
|
2018-02-06 21:23:46 +01:00
|
|
|
logger.info(
|
|
|
|
"MPLS tests will not run (missing mpls-router kernel module)"
|
|
|
|
)
|
2018-03-23 20:36:56 +01:00
|
|
|
elif not module_present("mpls-iptunnel"):
|
2018-02-06 21:23:46 +01:00
|
|
|
logger.info(
|
|
|
|
"MPLS tests will not run (missing mpls-iptunnel kernel module)"
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
self.hasmpls = True
|
|
|
|
if self.hasmpls != True:
|
|
|
|
return "LDP/MPLS Tests need mpls kernel modules"
|
2021-07-27 01:23:20 +02:00
|
|
|
|
|
|
|
# Really want to use sysctl_atleast here, but only when MPLS is actually being
|
|
|
|
# used
|
2018-08-22 21:03:18 +02:00
|
|
|
self.cmd("echo 100000 > /proc/sys/net/mpls/platform_labels")
|
2017-08-20 08:37:17 +02:00
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
shell_routers = g_extra_config["shell"]
|
|
|
|
if "all" in shell_routers or self.name in shell_routers:
|
2021-07-27 01:23:20 +02:00
|
|
|
self.run_in_window(os.getenv("SHELL", "bash"))
|
2021-03-04 03:56:46 +01:00
|
|
|
|
|
|
|
vtysh_routers = g_extra_config["vtysh"]
|
|
|
|
if "all" in vtysh_routers or self.name in vtysh_routers:
|
2021-07-27 01:23:20 +02:00
|
|
|
self.run_in_window("vtysh")
|
2021-03-04 03:56:46 +01:00
|
|
|
|
2017-08-20 08:37:17 +02:00
|
|
|
if self.daemons["eigrpd"] == 1:
|
|
|
|
eigrpd_path = os.path.join(self.daemondir, "eigrpd")
|
|
|
|
if not os.path.isfile(eigrpd_path):
|
2017-11-08 19:23:14 +01:00
|
|
|
logger.info("EIGRP Test, but no eigrpd compiled or installed")
|
2017-08-20 08:37:17 +02:00
|
|
|
return "EIGRP Test, but no eigrpd compiled or installed"
|
|
|
|
|
2018-02-28 17:06:16 +01:00
|
|
|
if self.daemons["bfdd"] == 1:
|
|
|
|
bfdd_path = os.path.join(self.daemondir, "bfdd")
|
|
|
|
if not os.path.isfile(bfdd_path):
|
|
|
|
logger.info("BFD Test, but no bfdd compiled or installed")
|
|
|
|
return "BFD Test, but no bfdd compiled or installed"
|
|
|
|
|
2021-03-02 19:59:35 +01:00
|
|
|
return self.startRouterDaemons(tgen=tgen)
|
2020-07-02 19:47:28 +02:00
|
|
|
|
|
|
|
def getStdErr(self, daemon):
|
|
|
|
return self.getLog("err", daemon)
|
|
|
|
|
|
|
|
def getStdOut(self, daemon):
|
|
|
|
return self.getLog("out", daemon)
|
|
|
|
|
|
|
|
def getLog(self, log, daemon):
|
|
|
|
return self.cmd("cat {}/{}/{}.{}".format(self.logdir, self.name, daemon, log))
|
|
|
|
|
2021-03-02 19:59:35 +01:00
|
|
|
def startRouterDaemons(self, daemons=None, tgen=None):
|
2021-07-27 01:23:20 +02:00
|
|
|
"Starts FRR daemons for this router."
|
2018-05-09 17:11:47 +02:00
|
|
|
|
2021-07-09 11:22:51 +02:00
|
|
|
asan_abort = g_extra_config["asan_abort"]
|
2021-04-08 19:04:26 +02:00
|
|
|
gdb_breakpoints = g_extra_config["gdb_breakpoints"]
|
2021-03-04 03:56:46 +01:00
|
|
|
gdb_daemons = g_extra_config["gdb_daemons"]
|
|
|
|
gdb_routers = g_extra_config["gdb_routers"]
|
2021-06-12 11:07:24 +02:00
|
|
|
valgrind_extra = g_extra_config["valgrind_extra"]
|
|
|
|
valgrind_memleaks = g_extra_config["valgrind_memleaks"]
|
2021-07-09 11:22:51 +02:00
|
|
|
strace_daemons = g_extra_config["strace_daemons"]
|
2021-03-04 03:56:46 +01:00
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
# Get global bundle data
|
|
|
|
if not self.path_exists("/etc/frr/support_bundle_commands.conf"):
|
|
|
|
# Copy global value if was covered by namespace mount
|
|
|
|
bundle_data = ""
|
|
|
|
if os.path.exists("/etc/frr/support_bundle_commands.conf"):
|
|
|
|
with open("/etc/frr/support_bundle_commands.conf", "r") as rf:
|
|
|
|
bundle_data = rf.read()
|
|
|
|
self.cmd_raises(
|
|
|
|
"cat > /etc/frr/support_bundle_commands.conf",
|
|
|
|
stdin=bundle_data,
|
2020-07-09 18:13:55 +02:00
|
|
|
)
|
2020-07-07 17:17:04 +02:00
|
|
|
|
2018-05-09 17:11:47 +02:00
|
|
|
# Starts actual daemons without init (ie restart)
|
|
|
|
# cd to per node directory
|
2021-07-27 01:23:20 +02:00
|
|
|
self.cmd("install -m 775 -o frr -g frr -d {}/{}".format(self.logdir, self.name))
|
|
|
|
self.set_cwd("{}/{}".format(self.logdir, self.name))
|
2018-05-22 13:44:51 +02:00
|
|
|
self.cmd("umask 000")
|
2020-07-02 19:47:28 +02:00
|
|
|
|
2018-05-10 13:54:38 +02:00
|
|
|
# Re-enable to allow for report per run
|
|
|
|
self.reportCores = True
|
2020-07-02 19:47:28 +02:00
|
|
|
|
|
|
|
# XXX: glue code forward ported from removed function.
|
2018-06-30 21:18:33 +02:00
|
|
|
if self.version == None:
|
|
|
|
self.version = self.cmd(
|
2020-07-07 17:17:04 +02:00
|
|
|
os.path.join(self.daemondir, "bgpd") + " -v"
|
2018-06-30 21:18:33 +02:00
|
|
|
).split()[2]
|
|
|
|
logger.info("{}: running version: {}".format(self.name, self.version))
|
2020-07-02 19:47:28 +02:00
|
|
|
# If `daemons` was specified then some upper API called us with
|
|
|
|
# specific daemons, otherwise just use our own configuration.
|
|
|
|
daemons_list = []
|
2021-03-04 03:56:46 +01:00
|
|
|
if daemons is not None:
|
2020-09-23 14:48:25 +02:00
|
|
|
daemons_list = daemons
|
|
|
|
else:
|
2020-07-02 19:47:28 +02:00
|
|
|
# Append all daemons configured.
|
|
|
|
for daemon in self.daemons:
|
|
|
|
if self.daemons[daemon] == 1:
|
|
|
|
daemons_list.append(daemon)
|
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
def start_daemon(daemon, extra_opts=None):
|
|
|
|
daemon_opts = self.daemons_options.get(daemon, "")
|
|
|
|
rediropt = " > {0}.out 2> {0}.err".format(daemon)
|
|
|
|
if daemon == "snmpd":
|
|
|
|
binary = "/usr/sbin/snmpd"
|
|
|
|
cmdenv = ""
|
|
|
|
cmdopt = "{} -C -c /etc/frr/snmpd.conf -p ".format(
|
|
|
|
daemon_opts
|
|
|
|
) + "/var/run/{}/snmpd.pid -x /etc/frr/agentx".format(self.routertype)
|
|
|
|
else:
|
|
|
|
binary = os.path.join(self.daemondir, daemon)
|
2021-06-12 11:07:24 +02:00
|
|
|
|
2021-07-09 11:22:51 +02:00
|
|
|
cmdenv = "ASAN_OPTIONS="
|
|
|
|
if asan_abort:
|
|
|
|
cmdenv = "abort_on_error=1:"
|
|
|
|
cmdenv += "log_path={0}/{1}.{2}.asan ".format(self.logdir, self.name, daemon)
|
|
|
|
|
2021-06-12 11:07:24 +02:00
|
|
|
if valgrind_memleaks:
|
|
|
|
this_dir = os.path.dirname(os.path.abspath(os.path.realpath(__file__)))
|
|
|
|
supp_file = os.path.abspath(os.path.join(this_dir, "../../../tools/valgrind.supp"))
|
|
|
|
cmdenv += " /usr/bin/valgrind --num-callers=50 --log-file={1}/{2}.valgrind.{0}.%p --leak-check=full --suppressions={3}".format(daemon, self.logdir, self.name, supp_file)
|
|
|
|
if valgrind_extra:
|
|
|
|
cmdenv += "--gen-suppressions=all --expensive-definedness-checks=yes"
|
2021-07-09 11:22:51 +02:00
|
|
|
elif daemon in strace_daemons or "all" in strace_daemons:
|
|
|
|
cmdenv = "strace -f -D -o {1}/{2}.strace.{0} ".format(daemon, self.logdir, self.name)
|
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
cmdopt = "{} --log file:{}.log --log-level debug".format(
|
|
|
|
daemon_opts, daemon
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2021-03-04 03:56:46 +01:00
|
|
|
if extra_opts:
|
|
|
|
cmdopt += " " + extra_opts
|
|
|
|
|
|
|
|
if (
|
|
|
|
(gdb_routers or gdb_daemons)
|
2021-04-08 19:04:26 +02:00
|
|
|
and (
|
|
|
|
not gdb_routers or self.name in gdb_routers or "all" in gdb_routers
|
|
|
|
)
|
|
|
|
and (not gdb_daemons or daemon in gdb_daemons or "all" in gdb_daemons)
|
2021-03-04 03:56:46 +01:00
|
|
|
):
|
|
|
|
if daemon == "snmpd":
|
|
|
|
cmdopt += " -f "
|
|
|
|
|
|
|
|
cmdopt += rediropt
|
|
|
|
gdbcmd = "sudo -E gdb " + binary
|
|
|
|
if gdb_breakpoints:
|
|
|
|
gdbcmd += " -ex 'set breakpoint pending on'"
|
|
|
|
for bp in gdb_breakpoints:
|
|
|
|
gdbcmd += " -ex 'b {}'".format(bp)
|
|
|
|
gdbcmd += " -ex 'run {}'".format(cmdopt)
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
self.run_in_window(gdbcmd, daemon)
|
|
|
|
|
|
|
|
logger.info("%s: %s %s launched in gdb window", self, self.routertype, daemon)
|
2021-03-04 03:56:46 +01:00
|
|
|
else:
|
|
|
|
if daemon != "snmpd":
|
|
|
|
cmdopt += " -d "
|
|
|
|
cmdopt += rediropt
|
2021-07-27 01:23:20 +02:00
|
|
|
|
|
|
|
try:
|
|
|
|
self.cmd_raises(" ".join([cmdenv, binary, cmdopt]), warn=False)
|
|
|
|
except subprocess.CalledProcessError as error:
|
|
|
|
self.logger.error(
|
|
|
|
'%s: Failed to launch "%s" daemon (%d) using: %s%s%s:',
|
|
|
|
self, daemon, error.returncode, error.cmd,
|
|
|
|
'\n:stdout: "{}"'.format(error.stdout.strip()) if error.stdout else "",
|
|
|
|
'\n:stderr: "{}"'.format(error.stderr.strip()) if error.stderr else "",
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
logger.info("%s: %s %s started", self, self.routertype, daemon)
|
2021-03-04 03:56:46 +01:00
|
|
|
|
|
|
|
# Start Zebra first
|
|
|
|
if "zebra" in daemons_list:
|
|
|
|
start_daemon("zebra", "-s 90000000")
|
2020-07-07 17:17:04 +02:00
|
|
|
while "zebra" in daemons_list:
|
|
|
|
daemons_list.remove("zebra")
|
2020-07-02 19:47:28 +02:00
|
|
|
|
2018-07-19 02:41:24 +02:00
|
|
|
# Start staticd next if required
|
2020-07-07 17:17:04 +02:00
|
|
|
if "staticd" in daemons_list:
|
2021-03-04 03:56:46 +01:00
|
|
|
start_daemon("staticd")
|
2020-07-07 17:17:04 +02:00
|
|
|
while "staticd" in daemons_list:
|
|
|
|
daemons_list.remove("staticd")
|
2020-07-02 19:47:28 +02:00
|
|
|
|
2020-11-11 15:34:32 +01:00
|
|
|
if "snmpd" in daemons_list:
|
2021-07-27 01:23:20 +02:00
|
|
|
# Give zerbra a chance to configure interface addresses that snmpd daemon
|
|
|
|
# may then use.
|
|
|
|
time.sleep(2)
|
|
|
|
|
2021-03-04 03:56:46 +01:00
|
|
|
start_daemon("snmpd")
|
2020-11-11 15:34:32 +01:00
|
|
|
while "snmpd" in daemons_list:
|
|
|
|
daemons_list.remove("snmpd")
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
if daemons is None:
|
|
|
|
# Fix Link-Local Addresses on initial startup
|
|
|
|
# Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
|
|
|
|
_, output, _ = self.cmd_status(
|
|
|
|
"for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; echo $i: $mac; [ -z \"$mac\" ] && continue; IFS=':'; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:${3}ff:fe$4:$5$6/64; done",
|
|
|
|
stderr=subprocess.STDOUT
|
|
|
|
)
|
|
|
|
logger.debug("Set MACs:\n%s", output)
|
2020-07-02 19:47:28 +02:00
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
# Now start all the other daemons
|
2020-07-04 18:25:10 +02:00
|
|
|
for daemon in daemons_list:
|
2020-07-02 19:47:28 +02:00
|
|
|
if self.daemons[daemon] == 0:
|
2017-06-21 17:54:40 +02:00
|
|
|
continue
|
2021-03-04 03:56:46 +01:00
|
|
|
start_daemon(daemon)
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2020-07-02 19:47:28 +02:00
|
|
|
# Check if daemons are running.
|
2020-07-07 17:17:04 +02:00
|
|
|
rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
|
2020-04-13 16:47:45 +02:00
|
|
|
if re.search(r"No such file or directory", rundaemons):
|
|
|
|
return "Daemons are not running"
|
|
|
|
|
2021-07-27 01:23:20 +02:00
|
|
|
# Update the permissions on the log files
|
|
|
|
self.cmd("chown frr:frr -R {}/{}".format(self.logdir, self.name))
|
|
|
|
self.cmd("chmod ug+rwX,o+r -R {}/{}".format(self.logdir, self.name))
|
|
|
|
|
2020-04-13 16:47:45 +02:00
|
|
|
return ""
|
|
|
|
|
2020-07-07 17:17:04 +02:00
|
|
|
def killRouterDaemons(
|
|
|
|
self, daemons, wait=True, assertOnError=True, minErrorVersion="5.1"
|
|
|
|
):
|
2020-09-19 03:07:20 +02:00
|
|
|
# Kill Running FRR
|
2020-04-13 16:47:45 +02:00
|
|
|
# Daemons(user specified daemon only) using SIGKILL
|
2020-07-07 17:17:04 +02:00
|
|
|
rundaemons = self.cmd("ls -1 /var/run/%s/*.pid" % self.routertype)
|
2020-04-13 16:47:45 +02:00
|
|
|
errors = ""
|
|
|
|
daemonsNotRunning = []
|
|
|
|
if re.search(r"No such file or directory", rundaemons):
|
|
|
|
return errors
|
|
|
|
for daemon in daemons:
|
|
|
|
if rundaemons is not None and daemon in rundaemons:
|
|
|
|
numRunning = 0
|
2020-08-28 14:38:34 +02:00
|
|
|
dmns = rundaemons.split("\n")
|
|
|
|
# Exclude empty string at end of list
|
|
|
|
for d in dmns[:-1]:
|
2020-04-13 16:47:45 +02:00
|
|
|
if re.search(r"%s" % daemon, d):
|
2020-07-07 17:17:04 +02:00
|
|
|
daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
|
|
|
|
if daemonpid.isdigit() and pid_exists(int(daemonpid)):
|
|
|
|
logger.info(
|
|
|
|
"{}: killing {}".format(
|
|
|
|
self.name,
|
|
|
|
os.path.basename(d.rstrip().rsplit(".", 1)[0]),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
self.cmd("kill -9 %s" % daemonpid)
|
2020-04-13 16:47:45 +02:00
|
|
|
if pid_exists(int(daemonpid)):
|
|
|
|
numRunning += 1
|
|
|
|
if wait and numRunning > 0:
|
2020-07-07 17:17:04 +02:00
|
|
|
sleep(
|
|
|
|
2,
|
|
|
|
"{}: waiting for {} daemon to be stopped".format(
|
|
|
|
self.name, daemon
|
|
|
|
),
|
|
|
|
)
|
2020-08-28 14:38:34 +02:00
|
|
|
|
2020-04-13 16:47:45 +02:00
|
|
|
# 2nd round of kill if daemons didn't exit
|
2020-08-28 14:38:34 +02:00
|
|
|
for d in dmns[:-1]:
|
2020-04-13 16:47:45 +02:00
|
|
|
if re.search(r"%s" % daemon, d):
|
2020-07-07 17:17:04 +02:00
|
|
|
daemonpid = self.cmd("cat %s" % d.rstrip()).rstrip()
|
|
|
|
if daemonpid.isdigit() and pid_exists(
|
|
|
|
int(daemonpid)
|
|
|
|
):
|
|
|
|
logger.info(
|
|
|
|
"{}: killing {}".format(
|
|
|
|
self.name,
|
|
|
|
os.path.basename(
|
|
|
|
d.rstrip().rsplit(".", 1)[0]
|
|
|
|
),
|
|
|
|
)
|
|
|
|
)
|
|
|
|
self.cmd("kill -9 %s" % daemonpid)
|
|
|
|
self.cmd("rm -- {}".format(d.rstrip()))
|
2020-04-13 16:47:45 +02:00
|
|
|
if wait:
|
|
|
|
errors = self.checkRouterCores(reportOnce=True)
|
2020-07-07 17:17:04 +02:00
|
|
|
if self.checkRouterVersion("<", minErrorVersion):
|
|
|
|
# ignore errors in old versions
|
2020-04-13 16:47:45 +02:00
|
|
|
errors = ""
|
|
|
|
if assertOnError and len(errors) > 0:
|
|
|
|
assert "Errors found - details follow:" == 0, errors
|
|
|
|
else:
|
|
|
|
daemonsNotRunning.append(daemon)
|
|
|
|
if len(daemonsNotRunning) > 0:
|
2020-07-07 17:17:04 +02:00
|
|
|
errors = errors + "Daemons are not running", daemonsNotRunning
|
2020-04-13 16:47:45 +02:00
|
|
|
|
|
|
|
return errors
|
|
|
|
|
2018-05-10 13:54:38 +02:00
|
|
|
def checkRouterCores(self, reportLeaks=True, reportOnce=False):
|
|
|
|
if reportOnce and not self.reportCores:
|
|
|
|
return
|
|
|
|
reportMade = False
|
2018-06-30 18:36:01 +02:00
|
|
|
traces = ""
|
2018-05-09 19:02:33 +02:00
|
|
|
for daemon in self.daemons:
|
|
|
|
if self.daemons[daemon] == 1:
|
|
|
|
# Look for core file
|
|
|
|
corefiles = glob.glob(
|
|
|
|
"{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2018-05-09 19:02:33 +02:00
|
|
|
if len(corefiles) > 0:
|
2020-07-14 23:30:28 +02:00
|
|
|
backtrace = gdb_core(self, daemon, corefiles)
|
2018-06-30 18:36:01 +02:00
|
|
|
traces = (
|
|
|
|
traces
|
|
|
|
+ "\n%s: %s crashed. Core file found - Backtrace follows:\n%s"
|
|
|
|
% (self.name, daemon, backtrace)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2018-05-10 13:54:38 +02:00
|
|
|
reportMade = True
|
2018-05-09 19:02:33 +02:00
|
|
|
elif reportLeaks:
|
|
|
|
log = self.getStdErr(daemon)
|
|
|
|
if "memstats" in log:
|
|
|
|
sys.stderr.write(
|
|
|
|
"%s: %s has memory leaks:\n" % (self.name, daemon)
|
2018-06-30 18:36:01 +02:00
|
|
|
)
|
|
|
|
traces = traces + "\n%s: %s has memory leaks:\n" % (
|
|
|
|
self.name,
|
|
|
|
daemon,
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2018-05-09 19:02:33 +02:00
|
|
|
log = re.sub("core_handler: ", "", log)
|
|
|
|
log = re.sub(
|
|
|
|
r"(showing active allocations in memory group [a-zA-Z0-9]+)",
|
|
|
|
r"\n ## \1",
|
|
|
|
log,
|
|
|
|
)
|
|
|
|
log = re.sub("memstats: ", " ", log)
|
|
|
|
sys.stderr.write(log)
|
2018-05-10 13:54:38 +02:00
|
|
|
reportMade = True
|
2018-05-09 19:02:33 +02:00
|
|
|
# Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
|
|
|
|
if checkAddressSanitizerError(
|
2020-11-23 04:06:07 +01:00
|
|
|
self.getStdErr(daemon), self.name, daemon, self.logdir
|
2018-05-09 19:02:33 +02:00
|
|
|
):
|
|
|
|
sys.stderr.write(
|
|
|
|
"%s: Daemon %s killed by AddressSanitizer" % (self.name, daemon)
|
2018-06-30 18:36:01 +02:00
|
|
|
)
|
|
|
|
traces = traces + "\n%s: Daemon %s killed by AddressSanitizer" % (
|
|
|
|
self.name,
|
|
|
|
daemon,
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2018-05-10 13:54:38 +02:00
|
|
|
reportMade = True
|
|
|
|
if reportMade:
|
|
|
|
self.reportCores = False
|
2018-06-30 18:36:01 +02:00
|
|
|
return traces
|
2018-05-09 19:02:33 +02:00
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
def checkRouterRunning(self):
|
2017-05-19 11:16:42 +02:00
|
|
|
"Check if router daemons are running and collect crashinfo they don't run"
|
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
global fatal_error
|
|
|
|
|
2019-06-14 20:13:22 +02:00
|
|
|
daemonsRunning = self.cmd(
|
|
|
|
'vtysh -c "show logging" | grep "Logging configuration for"'
|
|
|
|
)
|
2017-05-20 11:24:11 +02:00
|
|
|
# Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
|
|
|
|
if checkAddressSanitizerError(daemonsRunning, self.name, "vtysh"):
|
|
|
|
return "%s: vtysh killed by AddressSanitizer" % (self.name)
|
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
for daemon in self.daemons:
|
2021-01-22 16:51:36 +01:00
|
|
|
if daemon == "snmpd":
|
|
|
|
continue
|
2017-01-30 22:50:48 +01:00
|
|
|
if (self.daemons[daemon] == 1) and not (daemon in daemonsRunning):
|
|
|
|
sys.stderr.write("%s: Daemon %s not running\n" % (self.name, daemon))
|
2020-09-23 23:08:36 +02:00
|
|
|
if daemon == "staticd":
|
2018-11-26 16:56:56 +01:00
|
|
|
sys.stderr.write(
|
|
|
|
"You may have a copy of staticd installed but are attempting to test against\n"
|
|
|
|
)
|
|
|
|
sys.stderr.write(
|
|
|
|
"a version of FRR that does not have staticd, please cleanup the install dir\n"
|
|
|
|
)
|
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
# Look for core file
|
2018-05-09 17:11:47 +02:00
|
|
|
corefiles = glob.glob(
|
2017-07-07 15:18:25 +02:00
|
|
|
"{}/{}/{}_core*.dmp".format(self.logdir, self.name, daemon)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2017-01-30 22:50:48 +01:00
|
|
|
if len(corefiles) > 0:
|
2020-07-14 23:30:28 +02:00
|
|
|
gdb_core(self, daemon, corefiles)
|
2017-01-30 22:50:48 +01:00
|
|
|
else:
|
|
|
|
# No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
|
2018-05-09 17:11:47 +02:00
|
|
|
if os.path.isfile(
|
|
|
|
"{}/{}/{}.log".format(self.logdir, self.name, daemon)
|
|
|
|
):
|
2017-07-07 15:18:25 +02:00
|
|
|
log_tail = subprocess.check_output(
|
|
|
|
[
|
2018-05-09 17:11:47 +02:00
|
|
|
"tail -n20 {}/{}/{}.log 2> /dev/null".format(
|
2017-07-07 15:18:25 +02:00
|
|
|
self.logdir, self.name, daemon
|
|
|
|
)
|
|
|
|
],
|
|
|
|
shell=True,
|
|
|
|
)
|
2017-01-30 22:50:48 +01:00
|
|
|
sys.stderr.write(
|
|
|
|
"\nFrom %s %s %s log file:\n"
|
|
|
|
% (self.routertype, self.name, daemon)
|
2020-04-03 13:05:24 +02:00
|
|
|
)
|
2017-01-30 22:50:48 +01:00
|
|
|
sys.stderr.write("%s\n" % log_tail)
|
2017-05-20 11:24:11 +02:00
|
|
|
|
2017-05-19 11:16:42 +02:00
|
|
|
# Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
|
2017-05-20 11:24:11 +02:00
|
|
|
if checkAddressSanitizerError(
|
2020-11-23 04:06:07 +01:00
|
|
|
self.getStdErr(daemon), self.name, daemon, self.logdir
|
2017-05-20 11:24:11 +02:00
|
|
|
):
|
2017-05-20 05:30:40 +02:00
|
|
|
return "%s: Daemon %s not running - killed by AddressSanitizer" % (
|
|
|
|
self.name,
|
|
|
|
daemon,
|
|
|
|
)
|
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
return "%s: Daemon %s not running" % (self.name, daemon)
|
|
|
|
return ""
|
2018-06-30 21:18:33 +02:00
|
|
|
|
|
|
|
def checkRouterVersion(self, cmpop, version):
|
|
|
|
"""
|
|
|
|
Compares router version using operation `cmpop` with `version`.
|
|
|
|
Valid `cmpop` values:
|
|
|
|
* `>=`: has the same version or greater
|
|
|
|
* '>': has greater version
|
|
|
|
* '=': has the same version
|
|
|
|
* '<': has a lesser version
|
|
|
|
* '<=': has the same version or lesser
|
|
|
|
|
|
|
|
Usage example: router.checkRouterVersion('>', '1.0')
|
|
|
|
"""
|
2018-09-07 00:52:43 +02:00
|
|
|
|
|
|
|
# Make sure we have version information first
|
|
|
|
if self.version == None:
|
|
|
|
self.version = self.cmd(
|
|
|
|
os.path.join(self.daemondir, "bgpd") + " -v"
|
|
|
|
).split()[2]
|
|
|
|
logger.info("{}: running version: {}".format(self.name, self.version))
|
|
|
|
|
2018-06-30 21:18:33 +02:00
|
|
|
rversion = self.version
|
2020-09-23 23:08:36 +02:00
|
|
|
if rversion == None:
|
2018-06-30 21:18:33 +02:00
|
|
|
return False
|
|
|
|
|
|
|
|
result = version_cmp(rversion, version)
|
|
|
|
if cmpop == ">=":
|
|
|
|
return result >= 0
|
|
|
|
if cmpop == ">":
|
|
|
|
return result > 0
|
|
|
|
if cmpop == "=":
|
|
|
|
return result == 0
|
|
|
|
if cmpop == "<":
|
|
|
|
return result < 0
|
|
|
|
if cmpop == "<":
|
|
|
|
return result < 0
|
|
|
|
if cmpop == "<=":
|
|
|
|
return result <= 0
|
|
|
|
|
2017-01-30 22:50:48 +01:00
|
|
|
def get_ipv6_linklocal(self):
|
|
|
|
"Get LinkLocal Addresses from interfaces"
|
|
|
|
|
|
|
|
linklocal = []
|
|
|
|
|
|
|
|
ifaces = self.cmd("ip -6 address")
|
|
|
|
# Fix newlines (make them all the same)
|
|
|
|
ifaces = ("\n".join(ifaces.splitlines()) + "\n").splitlines()
|
|
|
|
interface = ""
|
|
|
|
ll_per_if_count = 0
|
|
|
|
for line in ifaces:
|
2021-02-19 12:09:16 +01:00
|
|
|
m = re.search("[0-9]+: ([^:@]+)[-@a-z0-9:]+ <", line)
|
2017-01-30 22:50:48 +01:00
|
|
|
if m:
|
|
|
|
interface = m.group(1)
|
|
|
|
ll_per_if_count = 0
|
|
|
|
m = re.search(
|
|
|
|
"inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link",
|
|
|
|
line,
|
|
|
|
)
|
|
|
|
if m:
|
|
|
|
local = m.group(1)
|
|
|
|
ll_per_if_count += 1
|
|
|
|
if ll_per_if_count > 1:
|
|
|
|
linklocal += [["%s-%s" % (interface, ll_per_if_count), local]]
|
|
|
|
else:
|
|
|
|
linklocal += [[interface, local]]
|
|
|
|
return linklocal
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-04-08 12:40:51 +02:00
|
|
|
def daemon_available(self, daemon):
|
|
|
|
"Check if specified daemon is installed (and for ldp if kernel supports MPLS)"
|
|
|
|
|
2017-06-21 17:54:40 +02:00
|
|
|
daemon_path = os.path.join(self.daemondir, daemon)
|
|
|
|
if not os.path.isfile(daemon_path):
|
2017-04-08 12:40:51 +02:00
|
|
|
return False
|
|
|
|
if daemon == "ldpd":
|
2017-07-27 01:22:14 +02:00
|
|
|
if version_cmp(platform.release(), "4.5") < 0:
|
|
|
|
return False
|
2018-03-23 20:36:56 +01:00
|
|
|
if not module_present("mpls-router", load=False):
|
2017-04-08 12:40:51 +02:00
|
|
|
return False
|
2018-03-23 20:36:56 +01:00
|
|
|
if not module_present("mpls-iptunnel", load=False):
|
2017-07-27 01:22:14 +02:00
|
|
|
return False
|
2017-04-08 12:40:51 +02:00
|
|
|
return True
|
2018-03-23 20:36:56 +01:00
|
|
|
|
2017-04-08 12:40:51 +02:00
|
|
|
def get_routertype(self):
|
2020-09-19 03:07:20 +02:00
|
|
|
"Return the type of Router (frr)"
|
2017-04-08 12:40:51 +02:00
|
|
|
|
|
|
|
return self.routertype
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2017-04-27 04:54:25 +02:00
|
|
|
def report_memory_leaks(self, filename_prefix, testscript):
|
|
|
|
"Report Memory Leaks to file prefixed with given string"
|
|
|
|
|
|
|
|
leakfound = False
|
|
|
|
filename = filename_prefix + re.sub(r"\.py", "", testscript) + ".txt"
|
|
|
|
for daemon in self.daemons:
|
|
|
|
if self.daemons[daemon] == 1:
|
|
|
|
log = self.getStdErr(daemon)
|
|
|
|
if "memstats" in log:
|
|
|
|
# Found memory leak
|
2017-07-07 14:57:07 +02:00
|
|
|
logger.info(
|
|
|
|
"\nRouter {} {} StdErr Log:\n{}".format(self.name, daemon, log)
|
|
|
|
)
|
2017-04-27 04:54:25 +02:00
|
|
|
if not leakfound:
|
|
|
|
leakfound = True
|
|
|
|
# Check if file already exists
|
|
|
|
fileexists = os.path.isfile(filename)
|
|
|
|
leakfile = open(filename, "a")
|
|
|
|
if not fileexists:
|
|
|
|
# New file - add header
|
|
|
|
leakfile.write(
|
|
|
|
"# Memory Leak Detection for topotest %s\n\n"
|
|
|
|
% testscript
|
|
|
|
)
|
|
|
|
leakfile.write("## Router %s\n" % self.name)
|
|
|
|
leakfile.write("### Process %s\n" % daemon)
|
|
|
|
log = re.sub("core_handler: ", "", log)
|
|
|
|
log = re.sub(
|
|
|
|
r"(showing active allocations in memory group [a-zA-Z0-9]+)",
|
|
|
|
r"\n#### \1\n",
|
|
|
|
log,
|
|
|
|
)
|
|
|
|
log = re.sub("memstats: ", " ", log)
|
|
|
|
leakfile.write(log)
|
|
|
|
leakfile.write("\n")
|
|
|
|
if leakfound:
|
|
|
|
leakfile.close()
|
2017-04-08 12:40:51 +02:00
|
|
|
|
2020-04-03 13:05:24 +02:00
|
|
|
|
2020-09-25 16:29:54 +02:00
|
|
|
def frr_unicode(s):
|
|
|
|
"""Convert string to unicode, depending on python version"""
|
|
|
|
if sys.version_info[0] > 2:
|
|
|
|
return s
|
|
|
|
else:
|
2021-07-27 01:23:20 +02:00
|
|
|
return unicode(s) # pylint: disable=E0602
|