2017-01-30 22:50:48 +01:00
#!/usr/bin/env python
#
# topotest.py
# Library of helper functions for NetDEF Topology Tests
#
# Copyright (c) 2016 by
# Network Device Education Foundation, Inc. ("NetDEF")
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
2017-09-19 03:19:10 +02:00
import json
2017-01-30 22:50:48 +01:00
import os
2017-04-27 04:54:25 +02:00
import errno
2017-01-30 22:50:48 +01:00
import re
import sys
2018-08-02 01:02:59 +02:00
import functools
2017-01-30 22:50:48 +01:00
import glob
import StringIO
import subprocess
2017-06-15 05:25:54 +02:00
import tempfile
2017-01-30 22:50:48 +01:00
import platform
2017-05-31 04:39:21 +02:00
import difflib
2017-07-24 16:53:19 +02:00
import time
2017-01-30 22:50:48 +01:00
2017-07-07 14:57:07 +02:00
from lib . topolog import logger
2019-06-11 18:53:13 +02:00
if sys . version_info [ 0 ] > 2 :
import configparser
else :
import ConfigParser as configparser
2017-01-30 22:50:48 +01:00
from mininet . topo import Topo
from mininet . net import Mininet
from mininet . node import Node , OVSSwitch , Host
from mininet . log import setLogLevel , info
from mininet . cli import CLI
from mininet . link import Intf
2020-04-03 13:05:24 +02:00
2017-06-29 17:18:46 +02:00
class json_cmp_result ( object ) :
" json_cmp result class for better assertion messages "
def __init__ ( self ) :
self . errors = [ ]
def add_error ( self , error ) :
" Append error message to the result "
2017-09-19 22:05:07 +02:00
for line in error . splitlines ( ) :
self . errors . append ( line )
2017-06-29 17:18:46 +02:00
def has_errors ( self ) :
" Returns True if there were errors, otherwise False. "
return len ( self . errors ) > 0
2018-12-04 16:11:41 +01:00
def __str__ ( self ) :
2020-04-03 13:05:24 +02:00
return " \n " . join ( self . errors )
2018-12-04 16:11:41 +01:00
2018-05-21 15:47:18 +02:00
2017-09-19 03:19:10 +02:00
def json_diff ( d1 , d2 ) :
"""
Returns a string with the difference between JSON data .
"""
json_format_opts = {
2020-04-03 13:05:24 +02:00
" indent " : 4 ,
" sort_keys " : True ,
2017-09-19 03:19:10 +02:00
}
dstr1 = json . dumps ( d1 , * * json_format_opts )
dstr2 = json . dumps ( d2 , * * json_format_opts )
2020-04-03 13:05:24 +02:00
return difflines ( dstr2 , dstr1 , title1 = " Expected value " , title2 = " Current value " , n = 0 )
2017-06-28 20:04:00 +02:00
2018-05-23 21:39:05 +02:00
def _json_list_cmp ( list1 , list2 , parent , result ) :
" Handles list type entries. "
# Check second list2 type
if not isinstance ( list1 , type ( [ ] ) ) or not isinstance ( list2 , type ( [ ] ) ) :
result . add_error (
2020-04-03 13:05:24 +02:00
" {} has different type than expected " . format ( parent )
+ " (have {} , expected {} ): \n {} " . format (
type ( list1 ) , type ( list2 ) , json_diff ( list1 , list2 )
)
)
2018-05-23 21:39:05 +02:00
return
# Check list size
if len ( list2 ) > len ( list1 ) :
result . add_error (
2020-04-03 13:05:24 +02:00
" {} too few items " . format ( parent )
+ " (have {} , expected {} : \n {} ) " . format (
len ( list1 ) , len ( list2 ) , json_diff ( list1 , list2 )
)
)
2018-05-23 21:39:05 +02:00
return
# List all unmatched items errors
unmatched = [ ]
for expected in list2 :
matched = False
for value in list1 :
2020-04-03 13:05:24 +02:00
if json_cmp ( { " json " : value } , { " json " : expected } ) is None :
2018-05-23 21:39:05 +02:00
matched = True
break
if not matched :
unmatched . append ( expected )
# If there are unmatched items, error out.
if unmatched :
result . add_error (
2020-04-03 13:05:24 +02:00
" {} value is different ( \n {} ) " . format ( parent , json_diff ( list1 , list2 ) )
)
2018-05-23 21:39:05 +02:00
2017-09-19 03:14:27 +02:00
def json_cmp ( d1 , d2 ) :
2017-06-28 20:04:00 +02:00
"""
JSON compare function . Receives two parameters :
* ` d1 ` : json value
* ` d2 ` : json subset which we expect
Returns ` None ` when all keys that ` d1 ` has matches ` d2 ` ,
otherwise a string containing what failed .
Note : key absence can be tested by adding a key with value ` None ` .
"""
2020-04-03 13:05:24 +02:00
squeue = [ ( d1 , d2 , " json " ) ]
2017-06-29 17:18:46 +02:00
result = json_cmp_result ( )
2018-05-23 21:39:05 +02:00
2017-06-28 20:04:00 +02:00
for s in squeue :
2017-06-29 17:18:46 +02:00
nd1 , nd2 , parent = s
2018-05-23 21:39:05 +02:00
# Handle JSON beginning with lists.
if isinstance ( nd1 , type ( [ ] ) ) or isinstance ( nd2 , type ( [ ] ) ) :
_json_list_cmp ( nd1 , nd2 , parent , result )
if result . has_errors ( ) :
return result
else :
return None
2017-06-28 20:04:00 +02:00
# Expect all required fields to exist.
2018-05-23 21:39:05 +02:00
s1 , s2 = set ( nd1 ) , set ( nd2 )
2017-06-28 20:04:00 +02:00
s2_req = set ( [ key for key in nd2 if nd2 [ key ] is not None ] )
diff = s2_req - s1
if diff != set ( { } ) :
2020-04-03 13:05:24 +02:00
result . add_error (
" expected key(s) {} in {} (have {} ): \n {} " . format (
str ( list ( diff ) ) , parent , str ( list ( s1 ) ) , json_diff ( nd1 , nd2 )
)
)
2017-06-28 20:04:00 +02:00
for key in s2 . intersection ( s1 ) :
# Test for non existence of key in d2
if nd2 [ key ] is None :
2020-04-03 13:05:24 +02:00
result . add_error (
' " {} " should not exist in {} (have {} ): \n {} ' . format (
key , parent , str ( s1 ) , json_diff ( nd1 [ key ] , nd2 [ key ] )
)
)
2017-06-29 17:18:46 +02:00
continue
2018-05-23 21:39:05 +02:00
2017-06-28 20:04:00 +02:00
# If nd1 key is a dict, we have to recurse in it later.
if isinstance ( nd2 [ key ] , type ( { } ) ) :
2017-07-05 18:46:28 +02:00
if not isinstance ( nd1 [ key ] , type ( { } ) ) :
result . add_error (
2020-04-03 13:05:24 +02:00
' {} [ " {} " ] has different type than expected ' . format ( parent , key )
+ " (have {} , expected {} ): \n {} " . format (
type ( nd1 [ key ] ) ,
type ( nd2 [ key ] ) ,
json_diff ( nd1 [ key ] , nd2 [ key ] ) ,
)
)
2017-07-05 18:46:28 +02:00
continue
2017-06-29 17:18:46 +02:00
nparent = ' {} [ " {} " ] ' . format ( parent , key )
squeue . append ( ( nd1 [ key ] , nd2 [ key ] , nparent ) )
2017-06-28 20:04:00 +02:00
continue
2018-05-23 21:39:05 +02:00
2017-07-05 18:46:28 +02:00
# Check list items
if isinstance ( nd2 [ key ] , type ( [ ] ) ) :
2018-05-23 21:39:05 +02:00
_json_list_cmp ( nd1 [ key ] , nd2 [ key ] , parent , result )
2017-07-05 18:46:28 +02:00
continue
2017-06-28 20:04:00 +02:00
# Compare JSON values
if nd1 [ key ] != nd2 [ key ] :
2017-06-29 17:18:46 +02:00
result . add_error (
2017-09-19 03:19:10 +02:00
' {} [ " {} " ] value is different ( \n {} ) ' . format (
2020-04-03 13:05:24 +02:00
parent , key , json_diff ( nd1 [ key ] , nd2 [ key ] )
)
)
2017-06-29 17:18:46 +02:00
continue
if result . has_errors ( ) :
return result
2017-06-28 20:04:00 +02:00
return None
2018-05-23 21:39:05 +02:00
2018-08-03 18:23:52 +02:00
def router_output_cmp ( router , cmd , expected ) :
"""
Runs ` cmd ` in router and compares the output with ` expected ` .
"""
2020-04-03 13:05:24 +02:00
return difflines (
normalize_text ( router . vtysh_cmd ( cmd ) ) ,
normalize_text ( expected ) ,
title1 = " Current output " ,
title2 = " Expected output " ,
)
2018-08-03 18:23:52 +02:00
def router_json_cmp ( router , cmd , data ) :
"""
Runs ` cmd ` that returns JSON data ( normally the command ends with ' json ' )
and compare with ` data ` contents .
"""
return json_cmp ( router . vtysh_cmd ( cmd , isjson = True ) , data )
2017-06-15 05:25:54 +02:00
def run_and_expect ( func , what , count = 20 , wait = 3 ) :
"""
Run ` func ` and compare the result with ` what ` . Do it for ` count ` times
waiting ` wait ` seconds between tries . By default it tries 20 times with
3 seconds delay between tries .
Returns ( True , func - return ) on success or
( False , func - return ) on failure .
2018-08-03 18:23:52 +02:00
- - -
Helper functions to use with this function :
- router_output_cmp
- router_json_cmp
2017-06-15 05:25:54 +02:00
"""
2018-08-02 01:02:59 +02:00
start_time = time . time ( )
func_name = " <unknown> "
if func . __class__ == functools . partial :
func_name = func . func . __name__
else :
func_name = func . __name__
logger . info (
" ' {} ' polling started (interval {} secs, maximum wait {} secs) " . format (
2020-04-03 13:05:24 +02:00
func_name , wait , int ( wait * count )
)
)
2018-08-02 01:02:59 +02:00
2017-06-15 05:25:54 +02:00
while count > 0 :
result = func ( )
if result != what :
2017-07-24 16:53:19 +02:00
time . sleep ( wait )
2017-06-15 05:25:54 +02:00
count - = 1
continue
2018-08-02 01:02:59 +02:00
end_time = time . time ( )
2020-04-03 13:05:24 +02:00
logger . info (
" ' {} ' succeeded after {:.2f} seconds " . format (
func_name , end_time - start_time
)
)
2017-06-15 05:25:54 +02:00
return ( True , result )
2018-08-02 01:02:59 +02:00
end_time = time . time ( )
2020-04-03 13:05:24 +02:00
logger . error (
" ' {} ' failed after {:.2f} seconds " . format ( func_name , end_time - start_time )
)
2017-06-15 05:25:54 +02:00
return ( False , result )
2019-07-22 18:12:08 +02:00
def run_and_expect_type ( func , etype , count = 20 , wait = 3 , avalue = None ) :
"""
Run ` func ` and compare the result with ` etype ` . Do it for ` count ` times
waiting ` wait ` seconds between tries . By default it tries 20 times with
3 seconds delay between tries .
This function is used when you want to test the return type and ,
optionally , the return value .
Returns ( True , func - return ) on success or
( False , func - return ) on failure .
"""
start_time = time . time ( )
func_name = " <unknown> "
if func . __class__ == functools . partial :
func_name = func . func . __name__
else :
func_name = func . __name__
logger . info (
" ' {} ' polling started (interval {} secs, maximum wait {} secs) " . format (
2020-04-03 13:05:24 +02:00
func_name , wait , int ( wait * count )
)
)
2019-07-22 18:12:08 +02:00
while count > 0 :
result = func ( )
if not isinstance ( result , etype ) :
2020-04-03 13:05:24 +02:00
logger . debug (
" Expected result type ' {} ' got ' {} ' instead " . format ( etype , type ( result ) )
)
2019-07-22 18:12:08 +02:00
time . sleep ( wait )
count - = 1
continue
if etype != type ( None ) and avalue != None and result != avalue :
logger . debug ( " Expected value ' {} ' got ' {} ' instead " . format ( avalue , result ) )
time . sleep ( wait )
count - = 1
continue
end_time = time . time ( )
2020-04-03 13:05:24 +02:00
logger . info (
" ' {} ' succeeded after {:.2f} seconds " . format (
func_name , end_time - start_time
)
)
2019-07-22 18:12:08 +02:00
return ( True , result )
end_time = time . time ( )
2020-04-03 13:05:24 +02:00
logger . error (
" ' {} ' failed after {:.2f} seconds " . format ( func_name , end_time - start_time )
)
2019-07-22 18:12:08 +02:00
return ( False , result )
2017-01-30 22:50:48 +01:00
def int2dpid ( dpid ) :
" Converting Integer to DPID "
try :
dpid = hex ( dpid ) [ 2 : ]
2020-04-03 13:05:24 +02:00
dpid = " 0 " * ( 16 - len ( dpid ) ) + dpid
2017-01-30 22:50:48 +01:00
return dpid
except IndexError :
2020-04-03 13:05:24 +02:00
raise Exception (
" Unable to derive default datapath ID - "
" please either specify a dpid or use a "
" canonical switch name such as s23. "
)
2017-01-30 22:50:48 +01:00
2017-04-27 04:54:25 +02:00
def pid_exists ( pid ) :
" Check whether pid exists in the current process table. "
if pid < = 0 :
return False
try :
os . kill ( pid , 0 )
except OSError as err :
if err . errno == errno . ESRCH :
# ESRCH == No such process
return False
elif err . errno == errno . EPERM :
# EPERM clearly means there's a process to deny access to
return True
else :
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else :
return True
2020-04-03 13:05:24 +02:00
2017-09-20 17:04:23 +02:00
def get_textdiff ( text1 , text2 , title1 = " " , title2 = " " , * * opts ) :
2017-05-31 04:39:21 +02:00
" Returns empty string if same or formatted diff "
2020-04-03 13:05:24 +02:00
diff = " \n " . join (
difflib . unified_diff ( text1 , text2 , fromfile = title1 , tofile = title2 , * * opts )
)
2017-05-31 04:39:21 +02:00
# Clean up line endings
diff = os . linesep . join ( [ s for s in diff . splitlines ( ) if s ] )
return diff
2020-04-03 13:05:24 +02:00
def difflines ( text1 , text2 , title1 = " " , title2 = " " , * * opts ) :
2017-06-15 05:25:54 +02:00
" Wrapper for get_textdiff to avoid string transformations. "
2020-04-03 13:05:24 +02:00
text1 = ( " \n " . join ( text1 . rstrip ( ) . splitlines ( ) ) + " \n " ) . splitlines ( 1 )
text2 = ( " \n " . join ( text2 . rstrip ( ) . splitlines ( ) ) + " \n " ) . splitlines ( 1 )
2017-09-20 17:04:23 +02:00
return get_textdiff ( text1 , text2 , title1 , title2 , * * opts )
2017-06-15 05:25:54 +02:00
2020-04-03 13:05:24 +02:00
2017-06-15 05:25:54 +02:00
def get_file ( content ) :
"""
Generates a temporary file in ' /tmp ' with ` content ` and returns the file name .
"""
2020-04-03 13:05:24 +02:00
fde = tempfile . NamedTemporaryFile ( mode = " w " , delete = False )
2017-06-15 05:25:54 +02:00
fname = fde . name
fde . write ( content )
fde . close ( )
return fname
2020-04-03 13:05:24 +02:00
2017-06-30 21:09:07 +02:00
def normalize_text ( text ) :
"""
2018-08-03 17:21:52 +02:00
Strips formating spaces / tabs , carriage returns and trailing whitespace .
2017-06-30 21:09:07 +02:00
"""
2020-04-03 13:05:24 +02:00
text = re . sub ( r " [ \ t]+ " , " " , text )
text = re . sub ( r " \ r " , " " , text )
2018-08-03 17:21:52 +02:00
# Remove whitespace in the middle of text.
2020-04-03 13:05:24 +02:00
text = re . sub ( r " [ \ t]+ \ n " , " \n " , text )
2018-08-03 17:21:52 +02:00
# Remove whitespace at the end of the text.
text = text . rstrip ( )
2017-06-30 21:09:07 +02:00
return text
2020-04-03 13:05:24 +02:00
2018-12-15 16:54:11 +01:00
def module_present_linux ( module , load ) :
2018-03-23 20:36:56 +01:00
"""
Returns whether ` module ` is present .
If ` load ` is true , it will try to load it via modprobe .
"""
2020-04-03 13:05:24 +02:00
with open ( " /proc/modules " , " r " ) as modules_file :
if module . replace ( " - " , " _ " ) in modules_file . read ( ) :
2018-03-23 20:36:56 +01:00
return True
2020-04-03 13:05:24 +02:00
cmd = " /sbin/modprobe {} {} " . format ( " " if load else " -n " , module )
2018-03-23 20:36:56 +01:00
if os . system ( cmd ) != 0 :
return False
else :
return True
2020-04-03 13:05:24 +02:00
2018-12-15 16:54:11 +01:00
def module_present_freebsd ( module , load ) :
return True
2020-04-03 13:05:24 +02:00
2018-12-15 16:54:11 +01:00
def module_present ( module , load = True ) :
if sys . platform . startswith ( " linux " ) :
2018-12-17 02:41:58 +01:00
return module_present_linux ( module , load )
2018-12-15 16:54:11 +01:00
elif sys . platform . startswith ( " freebsd " ) :
2018-12-17 02:41:58 +01:00
return module_present_freebsd ( module , load )
2018-12-15 16:54:11 +01:00
2020-04-03 13:05:24 +02:00
2017-07-14 19:00:52 +02:00
def version_cmp ( v1 , v2 ) :
"""
Compare two version strings and returns :
* ` - 1 ` : if ` v1 ` is less than ` v2 `
* ` 0 ` : if ` v1 ` is equal to ` v2 `
* ` 1 ` : if ` v1 ` is greater than ` v2 `
Raises ` ValueError ` if versions are not well formated .
"""
2020-04-03 13:05:24 +02:00
vregex = r " (?P<whole> \ d+( \ .( \ d+))*) "
2017-07-14 19:00:52 +02:00
v1m = re . match ( vregex , v1 )
v2m = re . match ( vregex , v2 )
if v1m is None or v2m is None :
raise ValueError ( " got a invalid version string " )
# Split values
2020-04-03 13:05:24 +02:00
v1g = v1m . group ( " whole " ) . split ( " . " )
v2g = v2m . group ( " whole " ) . split ( " . " )
2017-07-14 19:00:52 +02:00
# Get the longest version string
vnum = len ( v1g )
if len ( v2g ) > vnum :
vnum = len ( v2g )
# Reverse list because we are going to pop the tail
v1g . reverse ( )
v2g . reverse ( )
for _ in range ( vnum ) :
try :
v1n = int ( v1g . pop ( ) )
except IndexError :
while v2g :
v2n = int ( v2g . pop ( ) )
if v2n > 0 :
return - 1
break
try :
v2n = int ( v2g . pop ( ) )
except IndexError :
if v1n > 0 :
return 1
while v1g :
v1n = int ( v1g . pop ( ) )
if v1n > 0 :
2017-11-08 00:17:15 +01:00
return 1
2017-07-14 19:00:52 +02:00
break
if v1n > v2n :
return 1
if v1n < v2n :
return - 1
return 0
2020-04-03 13:05:24 +02:00
2018-07-24 18:20:08 +02:00
def interface_set_status ( node , ifacename , ifaceaction = False , vrf_name = None ) :
if ifaceaction :
2020-04-03 13:05:24 +02:00
str_ifaceaction = " no shutdown "
2018-07-24 18:20:08 +02:00
else :
2020-04-03 13:05:24 +02:00
str_ifaceaction = " shutdown "
2018-07-24 18:20:08 +02:00
if vrf_name == None :
2020-04-03 13:05:24 +02:00
cmd = ' vtysh -c " configure terminal " -c " interface {0} " -c " {1} " ' . format (
ifacename , str_ifaceaction
)
2018-07-24 18:20:08 +02:00
else :
2020-04-03 13:05:24 +02:00
cmd = ' vtysh -c " configure terminal " -c " interface {0} vrf {1} " -c " {2} " ' . format (
ifacename , vrf_name , str_ifaceaction
)
2018-07-24 18:20:08 +02:00
node . run ( cmd )
2020-04-03 13:05:24 +02:00
2018-07-20 16:22:38 +02:00
def ip4_route_zebra ( node , vrf_name = None ) :
"""
Gets an output of ' show ip route ' command . It can be used
with comparing the output to a reference
"""
if vrf_name == None :
2020-04-03 13:05:24 +02:00
tmp = node . vtysh_cmd ( " show ip route " )
2018-07-20 16:22:38 +02:00
else :
2020-04-03 13:05:24 +02:00
tmp = node . vtysh_cmd ( " show ip route vrf {0} " . format ( vrf_name ) )
2018-07-20 16:22:38 +02:00
output = re . sub ( r " [0-2][0-9]:[0-5][0-9]:[0-5][0-9] " , " XX:XX:XX " , tmp )
2018-08-25 18:46:46 +02:00
lines = output . splitlines ( )
header_found = False
2018-08-27 18:48:45 +02:00
while lines and ( not lines [ 0 ] . strip ( ) or not header_found ) :
2020-04-03 13:05:24 +02:00
if " > - selected route " in lines [ 0 ] :
2018-08-25 18:46:46 +02:00
header_found = True
lines = lines [ 1 : ]
2020-04-03 13:05:24 +02:00
return " \n " . join ( lines )
2018-07-20 16:22:38 +02:00
2019-06-06 17:49:15 +02:00
def ip6_route_zebra ( node , vrf_name = None ) :
"""
Retrieves the output of ' show ipv6 route [vrf vrf_name] ' , then
canonicalizes it by eliding link - locals .
"""
if vrf_name == None :
2020-04-03 13:05:24 +02:00
tmp = node . vtysh_cmd ( " show ipv6 route " )
2019-06-06 17:49:15 +02:00
else :
2020-04-03 13:05:24 +02:00
tmp = node . vtysh_cmd ( " show ipv6 route vrf {0} " . format ( vrf_name ) )
2019-06-06 17:49:15 +02:00
# Mask out timestamp
output = re . sub ( r " [0-2][0-9]:[0-5][0-9]:[0-5][0-9] " , " XX:XX:XX " , tmp )
# Mask out the link-local addresses
2020-04-03 13:05:24 +02:00
output = re . sub ( r " fe80::[^ ]+, " , " fe80::XXXX:XXXX:XXXX:XXXX, " , output )
2019-06-06 17:49:15 +02:00
lines = output . splitlines ( )
header_found = False
while lines and ( not lines [ 0 ] . strip ( ) or not header_found ) :
2020-04-03 13:05:24 +02:00
if " > - selected route " in lines [ 0 ] :
2019-06-06 17:49:15 +02:00
header_found = True
lines = lines [ 1 : ]
2020-04-03 13:05:24 +02:00
return " \n " . join ( lines )
2019-06-06 17:49:15 +02:00
2018-08-29 16:35:51 +02:00
def proto_name_to_number ( protocol ) :
return {
2020-04-03 13:05:24 +02:00
" bgp " : " 186 " ,
" isis " : " 187 " ,
" ospf " : " 188 " ,
" rip " : " 189 " ,
" ripng " : " 190 " ,
" nhrp " : " 191 " ,
" eigrp " : " 192 " ,
" ldp " : " 193 " ,
" sharp " : " 194 " ,
" pbr " : " 195 " ,
" static " : " 196 " ,
} . get (
protocol , protocol
) # default return same as input
2018-08-29 16:35:51 +02:00
2017-07-03 20:57:20 +02:00
def ip4_route ( node ) :
"""
Gets a structured return of the command ' ip route ' . It can be used in
conjuction with json_cmp ( ) to provide accurate assert explanations .
Return example :
{
' 10.0.1.0/24 ' : {
' dev ' : ' eth0 ' ,
' via ' : ' 172.16.0.1 ' ,
' proto ' : ' 188 ' ,
} ,
' 10.0.2.0/24 ' : {
' dev ' : ' eth1 ' ,
' proto ' : ' kernel ' ,
}
}
"""
2020-04-03 13:05:24 +02:00
output = normalize_text ( node . run ( " ip route " ) ) . splitlines ( )
2017-07-03 20:57:20 +02:00
result = { }
for line in output :
2020-04-03 13:05:24 +02:00
columns = line . split ( " " )
2017-07-03 20:57:20 +02:00
route = result [ columns [ 0 ] ] = { }
prev = None
for column in columns :
2020-04-03 13:05:24 +02:00
if prev == " dev " :
route [ " dev " ] = column
if prev == " via " :
route [ " via " ] = column
if prev == " proto " :
2018-08-29 16:35:51 +02:00
# translate protocol names back to numbers
2020-04-03 13:05:24 +02:00
route [ " proto " ] = proto_name_to_number ( column )
if prev == " metric " :
route [ " metric " ] = column
if prev == " scope " :
route [ " scope " ] = column
2017-07-03 20:57:20 +02:00
prev = column
return result
2020-04-03 13:05:24 +02:00
2017-07-03 20:57:20 +02:00
def ip6_route ( node ) :
"""
Gets a structured return of the command ' ip -6 route ' . It can be used in
conjuction with json_cmp ( ) to provide accurate assert explanations .
Return example :
{
' 2001:db8:1::/64 ' : {
' dev ' : ' eth0 ' ,
' proto ' : ' 188 ' ,
} ,
' 2001:db8:2::/64 ' : {
' dev ' : ' eth1 ' ,
' proto ' : ' kernel ' ,
}
}
"""
2020-04-03 13:05:24 +02:00
output = normalize_text ( node . run ( " ip -6 route " ) ) . splitlines ( )
2017-07-03 20:57:20 +02:00
result = { }
for line in output :
2020-04-03 13:05:24 +02:00
columns = line . split ( " " )
2017-07-03 20:57:20 +02:00
route = result [ columns [ 0 ] ] = { }
prev = None
for column in columns :
2020-04-03 13:05:24 +02:00
if prev == " dev " :
route [ " dev " ] = column
if prev == " via " :
route [ " via " ] = column
if prev == " proto " :
2018-08-29 16:35:51 +02:00
# translate protocol names back to numbers
2020-04-03 13:05:24 +02:00
route [ " proto " ] = proto_name_to_number ( column )
if prev == " metric " :
route [ " metric " ] = column
if prev == " pref " :
route [ " pref " ] = column
2017-07-03 20:57:20 +02:00
prev = column
return result
2020-04-03 13:05:24 +02:00
2017-07-24 16:53:19 +02:00
def sleep ( amount , reason = None ) :
"""
Sleep wrapper that registers in the log the amount of sleep
"""
if reason is None :
2020-04-03 13:05:24 +02:00
logger . info ( " Sleeping for {} seconds " . format ( amount ) )
2017-07-24 16:53:19 +02:00
else :
2020-04-03 13:05:24 +02:00
logger . info ( reason + " ( {} seconds) " . format ( amount ) )
2017-07-24 16:53:19 +02:00
time . sleep ( amount )
2020-04-03 13:05:24 +02:00
2017-05-20 11:24:11 +02:00
def checkAddressSanitizerError ( output , router , component ) :
" Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise "
2020-04-03 13:05:24 +02:00
addressSantizerError = re . search (
" (==[0-9]+==)ERROR: AddressSanitizer: ([^ \ s]*) " , output
)
2017-05-20 11:24:11 +02:00
if addressSantizerError :
2020-04-03 13:05:24 +02:00
sys . stderr . write (
" %s : %s triggered an exception by AddressSanitizer \n " % ( router , component )
)
2017-05-20 11:24:11 +02:00
# Sanitizer Error found in log
pidMark = addressSantizerError . group ( 1 )
2020-04-03 13:05:24 +02:00
addressSantizerLog = re . search (
" %s (.*) %s " % ( pidMark , pidMark ) , output , re . DOTALL
)
2017-05-20 11:24:11 +02:00
if addressSantizerLog :
2020-04-03 13:05:24 +02:00
callingTest = os . path . basename (
sys . _current_frames ( ) . values ( ) [ 0 ] . f_back . f_back . f_globals [ " __file__ " ]
)
2017-05-20 11:24:11 +02:00
callingProc = sys . _getframe ( 2 ) . f_code . co_name
with open ( " /tmp/AddressSanitzer.txt " , " a " ) as addrSanFile :
2020-04-03 13:05:24 +02:00
sys . stderr . write (
" \n " . join ( addressSantizerLog . group ( 1 ) . splitlines ( ) ) + " \n "
)
2017-05-20 11:24:11 +02:00
addrSanFile . write ( " ## Error: %s \n \n " % addressSantizerError . group ( 2 ) )
2020-04-03 13:05:24 +02:00
addrSanFile . write (
" ### AddressSanitizer error in topotest ` %s `, test ` %s `, router ` %s ` \n \n "
% ( callingTest , callingProc , router )
)
addrSanFile . write (
" "
+ " \n " . join ( addressSantizerLog . group ( 1 ) . splitlines ( ) )
+ " \n "
)
2017-05-20 11:24:11 +02:00
addrSanFile . write ( " \n --------------- \n " )
return True
2017-07-07 14:57:07 +02:00
return False
2017-05-20 11:24:11 +02:00
2020-04-03 13:05:24 +02:00
2017-01-30 22:50:48 +01:00
def addRouter ( topo , name ) :
2017-04-08 12:40:51 +02:00
" Adding a FRRouter (or Quagga) to Topology "
2017-01-30 22:50:48 +01:00
2020-04-03 13:05:24 +02:00
MyPrivateDirs = [
" /etc/frr " ,
" /etc/quagga " ,
" /var/run/frr " ,
" /var/run/quagga " ,
" /var/log " ,
]
2018-12-17 16:43:27 +01:00
if sys . platform . startswith ( " linux " ) :
return topo . addNode ( name , cls = LinuxRouter , privateDirs = MyPrivateDirs )
elif sys . platform . startswith ( " freebsd " ) :
return topo . addNode ( name , cls = FreeBSDRouter , privateDirs = MyPrivateDirs )
2017-01-30 22:50:48 +01:00
2020-04-03 13:05:24 +02:00
2017-06-14 15:30:10 +02:00
def set_sysctl ( node , sysctl , value ) :
" Set a sysctl value and return None on success or an error string "
2020-04-03 13:05:24 +02:00
valuestr = " {} " . format ( value )
2017-06-14 15:30:10 +02:00
command = " sysctl {0} = {1} " . format ( sysctl , valuestr )
cmdret = node . cmd ( command )
2020-04-03 13:05:24 +02:00
matches = re . search ( r " ([^ ]+) = ([^ \ s]+) " , cmdret )
2017-06-14 15:30:10 +02:00
if matches is None :
return cmdret
if matches . group ( 1 ) != sysctl :
return cmdret
if matches . group ( 2 ) != valuestr :
return cmdret
return None
2020-04-03 13:05:24 +02:00
2017-06-14 15:30:10 +02:00
def assert_sysctl ( node , sysctl , value ) :
" Set and assert that the sysctl is set with the specified value. "
assert set_sysctl ( node , sysctl , value ) is None
2017-01-30 22:50:48 +01:00
class Router ( Node ) :
" A Node with IPv4/IPv6 forwarding enabled and Quagga as Routing Engine "
2017-06-21 17:54:40 +02:00
def __init__ ( self , name , * * params ) :
super ( Router , self ) . __init__ ( name , * * params )
2020-04-03 13:05:24 +02:00
self . logdir = params . get ( " logdir " )
2019-03-25 16:08:26 +01:00
2019-06-11 18:53:13 +02:00
# Backward compatibility:
# Load configuration defaults like topogen.
2020-04-03 13:05:24 +02:00
self . config_defaults = configparser . ConfigParser (
{
" verbosity " : " info " ,
" frrdir " : " /usr/lib/frr " ,
" quaggadir " : " /usr/lib/quagga " ,
" routertype " : " frr " ,
" memleak_path " : None ,
}
)
2019-06-11 18:53:13 +02:00
self . config_defaults . read (
2020-04-03 13:05:24 +02:00
os . path . join ( os . path . dirname ( os . path . realpath ( __file__ ) ) , " ../pytest.ini " )
2019-06-11 18:53:13 +02:00
)
2019-03-25 16:08:26 +01:00
# If this topology is using old API and doesn't have logdir
# specified, then attempt to generate an unique logdir.
if self . logdir is None :
2020-04-03 13:05:24 +02:00
cur_test = os . environ [ " PYTEST_CURRENT_TEST " ]
self . logdir = " /tmp/topotests/ " + cur_test [
0 : cur_test . find ( " .py " )
] . replace ( " / " , " . " )
2019-03-25 16:08:26 +01:00
# If the logdir is not created, then create it and set the
# appropriated permissions.
if not os . path . isdir ( self . logdir ) :
2020-04-03 13:05:24 +02:00
os . system ( " mkdir -p " + self . logdir + " / " + name )
os . system ( " chmod -R go+rw /tmp/topotests " )
2019-03-25 16:08:26 +01:00
2017-06-21 17:54:40 +02:00
self . daemondir = None
2018-02-01 02:07:27 +01:00
self . hasmpls = False
2020-04-03 13:05:24 +02:00
self . routertype = " frr "
self . daemons = {
" zebra " : 0 ,
" ripd " : 0 ,
" ripngd " : 0 ,
" ospfd " : 0 ,
" ospf6d " : 0 ,
" isisd " : 0 ,
" bgpd " : 0 ,
" pimd " : 0 ,
" ldpd " : 0 ,
" eigrpd " : 0 ,
" nhrpd " : 0 ,
" staticd " : 0 ,
" bfdd " : 0 ,
" sharpd " : 0 ,
}
self . daemons_options = { " zebra " : " " }
2018-05-10 13:54:38 +02:00
self . reportCores = True
2018-06-30 21:18:33 +02:00
self . version = None
2017-06-21 17:54:40 +02:00
2017-06-27 23:11:02 +02:00
def _config_frr ( self , * * params ) :
" Configure FRR binaries "
2020-04-03 13:05:24 +02:00
self . daemondir = params . get ( " frrdir " )
2017-06-27 23:11:02 +02:00
if self . daemondir is None :
2020-04-03 13:05:24 +02:00
self . daemondir = self . config_defaults . get ( " topogen " , " frrdir " )
2017-06-27 23:11:02 +02:00
2020-04-03 13:05:24 +02:00
zebra_path = os . path . join ( self . daemondir , " zebra " )
2017-06-27 23:11:02 +02:00
if not os . path . isfile ( zebra_path ) :
raise Exception ( " FRR zebra binary doesn ' t exist at {} " . format ( zebra_path ) )
def _config_quagga ( self , * * params ) :
" Configure Quagga binaries "
2020-04-03 13:05:24 +02:00
self . daemondir = params . get ( " quaggadir " )
2017-06-27 23:11:02 +02:00
if self . daemondir is None :
2020-04-03 13:05:24 +02:00
self . daemondir = self . config_defaults . get ( " topogen " , " quaggadir " )
2017-06-27 23:11:02 +02:00
2020-04-03 13:05:24 +02:00
zebra_path = os . path . join ( self . daemondir , " zebra " )
2017-06-27 23:11:02 +02:00
if not os . path . isfile ( zebra_path ) :
2020-04-03 13:05:24 +02:00
raise Exception (
" Quagga zebra binary doesn ' t exist at {} " . format ( zebra_path )
)
2017-06-27 23:11:02 +02:00
2017-06-21 17:54:40 +02:00
# pylint: disable=W0221
# Some params are only meaningful for the parent class.
2017-01-30 22:50:48 +01:00
def config ( self , * * params ) :
super ( Router , self ) . config ( * * params )
2017-06-21 17:54:40 +02:00
# User did not specify the daemons directory, try to autodetect it.
2020-04-03 13:05:24 +02:00
self . daemondir = params . get ( " daemondir " )
2017-06-21 17:54:40 +02:00
if self . daemondir is None :
2020-04-03 13:05:24 +02:00
self . routertype = params . get (
" routertype " , self . config_defaults . get ( " topogen " , " routertype " )
)
if self . routertype == " quagga " :
2017-06-27 23:11:02 +02:00
self . _config_quagga ( * * params )
else :
self . _config_frr ( * * params )
2017-01-30 22:50:48 +01:00
else :
2017-06-21 17:54:40 +02:00
# Test the provided path
2020-04-03 13:05:24 +02:00
zpath = os . path . join ( self . daemondir , " zebra " )
2017-06-21 17:54:40 +02:00
if not os . path . isfile ( zpath ) :
2020-04-03 13:05:24 +02:00
raise Exception ( " No zebra binary found in {} " . format ( zpath ) )
2017-06-21 17:54:40 +02:00
# Allow user to specify routertype when the path was specified.
2020-04-03 13:05:24 +02:00
if params . get ( " routertype " ) is not None :
self . routertype = params . get ( " routertype " )
2017-06-21 17:54:40 +02:00
2020-04-03 13:05:24 +02:00
self . cmd ( " ulimit -c unlimited " )
2017-01-30 22:50:48 +01:00
# Set ownership of config files
2020-04-03 13:05:24 +02:00
self . cmd ( " chown {0} : {0} vty /etc/ {0} " . format ( self . routertype ) )
2017-06-21 17:54:40 +02:00
2017-01-30 22:50:48 +01:00
def terminate ( self ) :
# Delete Running Quagga or FRR Daemons
2017-02-01 16:50:13 +01:00
self . stopRouter ( )
# rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
# for d in StringIO.StringIO(rundaemons):
# self.cmd('kill -7 `cat %s`' % d.rstrip())
# self.waitOutput()
2017-01-30 22:50:48 +01:00
# Disable forwarding
2020-04-03 13:05:24 +02:00
set_sysctl ( self , " net.ipv4.ip_forward " , 0 )
set_sysctl ( self , " net.ipv6.conf.all.forwarding " , 0 )
2017-01-30 22:50:48 +01:00
super ( Router , self ) . terminate ( )
2020-04-03 13:05:24 +02:00
os . system ( " chmod -R go+rw /tmp/topotests " )
2018-05-22 13:44:51 +02:00
2020-04-03 13:05:24 +02:00
def stopRouter ( self , wait = True , assertOnError = True , minErrorVersion = " 5.1 " ) :
2017-02-01 16:50:13 +01:00
# Stop Running Quagga or FRR Daemons
2020-04-03 13:05:24 +02:00
rundaemons = self . cmd ( " ls -1 /var/run/ %s /*.pid " % self . routertype )
2018-06-30 18:36:01 +02:00
errors = " "
2017-12-08 15:41:35 +01:00
if re . search ( r " No such file or directory " , rundaemons ) :
2018-06-30 18:36:01 +02:00
return errors
2017-02-01 16:50:13 +01:00
if rundaemons is not None :
2017-12-07 19:47:22 +01:00
numRunning = 0
2017-08-31 02:06:35 +02:00
for d in StringIO . StringIO ( rundaemons ) :
2020-04-03 13:05:24 +02:00
daemonpid = self . cmd ( " cat %s " % d . rstrip ( ) ) . rstrip ( )
if daemonpid . isdigit ( ) and pid_exists ( int ( daemonpid ) ) :
logger . info (
" {} : stopping {} " . format (
self . name , os . path . basename ( d . rstrip ( ) . rsplit ( " . " , 1 ) [ 0 ] )
)
)
self . cmd ( " kill -TERM %s " % daemonpid )
2017-08-31 02:06:35 +02:00
self . waitOutput ( )
2017-12-07 19:47:22 +01:00
if pid_exists ( int ( daemonpid ) ) :
numRunning + = 1
if wait and numRunning > 0 :
2020-04-03 13:05:24 +02:00
sleep ( 2 , " {} : waiting for daemons stopping " . format ( self . name ) )
2017-12-07 19:47:22 +01:00
# 2nd round of kill if daemons didn't exit
for d in StringIO . StringIO ( rundaemons ) :
2020-04-03 13:05:24 +02:00
daemonpid = self . cmd ( " cat %s " % d . rstrip ( ) ) . rstrip ( )
if daemonpid . isdigit ( ) and pid_exists ( int ( daemonpid ) ) :
logger . info (
" {} : killing {} " . format (
self . name ,
os . path . basename ( d . rstrip ( ) . rsplit ( " . " , 1 ) [ 0 ] ) ,
)
)
self . cmd ( " kill -7 %s " % daemonpid )
2017-12-07 19:47:22 +01:00
self . waitOutput ( )
2020-04-03 13:05:24 +02:00
self . cmd ( " rm -- {} " . format ( d . rstrip ( ) ) )
2018-05-09 19:02:33 +02:00
if wait :
2020-04-03 13:05:24 +02:00
errors = self . checkRouterCores ( reportOnce = True )
if self . checkRouterVersion ( " < " , minErrorVersion ) :
# ignore errors in old versions
errors = " "
if assertOnError and len ( errors ) > 0 :
assert " Errors found - details follow: " == 0 , errors
2018-06-30 18:36:01 +02:00
return errors
2018-05-09 19:02:33 +02:00
2017-01-30 22:50:48 +01:00
def removeIPs ( self ) :
for interface in self . intfNames ( ) :
2020-04-03 13:05:24 +02:00
self . cmd ( " ip address flush " , interface )
2018-01-31 11:48:11 +01:00
def checkCapability ( self , daemon , param ) :
if param is not None :
daemon_path = os . path . join ( self . daemondir , daemon )
2020-04-03 13:05:24 +02:00
daemon_search_option = param . replace ( " - " , " " )
output = self . cmd (
" {0} -h | grep {1} " . format ( daemon_path , daemon_search_option )
)
2018-01-31 11:48:11 +01:00
if daemon_search_option not in output :
return False
return True
def loadConf ( self , daemon , source = None , param = None ) :
2017-01-30 22:50:48 +01:00
# print "Daemons before:", self.daemons
if daemon in self . daemons . keys ( ) :
self . daemons [ daemon ] = 1
2018-01-31 11:48:11 +01:00
if param is not None :
self . daemons_options [ daemon ] = param
2017-01-30 22:50:48 +01:00
if source is None :
2020-04-03 13:05:24 +02:00
self . cmd ( " touch /etc/ %s / %s .conf " % ( self . routertype , daemon ) )
2017-01-30 22:50:48 +01:00
self . waitOutput ( )
else :
2020-04-03 13:05:24 +02:00
self . cmd ( " cp %s /etc/ %s / %s .conf " % ( source , self . routertype , daemon ) )
2017-01-30 22:50:48 +01:00
self . waitOutput ( )
2020-04-03 13:05:24 +02:00
self . cmd ( " chmod 640 /etc/ %s / %s .conf " % ( self . routertype , daemon ) )
2017-01-30 22:50:48 +01:00
self . waitOutput ( )
2020-04-03 13:05:24 +02:00
self . cmd (
" chown %s : %s /etc/ %s / %s .conf "
% ( self . routertype , self . routertype , self . routertype , daemon )
)
2017-01-30 22:50:48 +01:00
self . waitOutput ( )
2020-04-03 13:05:24 +02:00
if ( daemon == " zebra " ) and ( self . daemons [ " staticd " ] == 0 ) :
2018-07-19 02:41:24 +02:00
# Add staticd with zebra - if it exists
2020-04-03 13:05:24 +02:00
staticd_path = os . path . join ( self . daemondir , " staticd " )
2018-07-19 02:41:24 +02:00
if os . path . isfile ( staticd_path ) :
2020-04-03 13:05:24 +02:00
self . daemons [ " staticd " ] = 1
self . daemons_options [ " staticd " ] = " "
2018-07-19 14:04:38 +02:00
# Auto-Started staticd has no config, so it will read from zebra config
2017-01-30 22:50:48 +01:00
else :
2020-04-03 13:05:24 +02:00
logger . info ( " No daemon {} known " . format ( daemon ) )
2017-01-30 22:50:48 +01:00
# print "Daemons after:", self.daemons
2018-05-09 17:11:47 +02:00
2018-02-06 21:23:46 +01:00
def startRouter ( self , tgen = None ) :
2017-01-30 22:50:48 +01:00
# Disable integrated-vtysh-config
2020-04-03 13:05:24 +02:00
self . cmd (
' echo " no service integrated-vtysh-config " >> /etc/ %s /vtysh.conf '
% self . routertype
)
self . cmd (
" chown %s : %s vty /etc/ %s /vtysh.conf "
% ( self . routertype , self . routertype , self . routertype )
)
2017-07-07 15:18:25 +02:00
# TODO remove the following lines after all tests are migrated to Topogen.
2017-01-30 22:50:48 +01:00
# Try to find relevant old logfiles in /tmp and delete them
2020-04-03 13:05:24 +02:00
map ( os . remove , glob . glob ( " {} / {} /*.log " . format ( self . logdir , self . name ) ) )
2017-01-30 22:50:48 +01:00
# Remove old core files
2020-04-03 13:05:24 +02:00
map ( os . remove , glob . glob ( " {} / {} /*.dmp " . format ( self . logdir , self . name ) ) )
2017-01-30 22:50:48 +01:00
# Remove IP addresses from OS first - we have them in zebra.conf
self . removeIPs ( )
# If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
# No error - but return message and skip all the tests
2020-04-03 13:05:24 +02:00
if self . daemons [ " ldpd " ] == 1 :
ldpd_path = os . path . join ( self . daemondir , " ldpd " )
2017-06-21 17:54:40 +02:00
if not os . path . isfile ( ldpd_path ) :
2017-11-08 19:23:14 +01:00
logger . info ( " LDP Test, but no ldpd compiled or installed " )
2017-01-30 22:50:48 +01:00
return " LDP Test, but no ldpd compiled or installed "
2017-04-26 14:51:50 +02:00
2020-04-03 13:05:24 +02:00
if version_cmp ( platform . release ( ) , " 4.5 " ) < 0 :
2017-11-08 19:23:14 +01:00
logger . info ( " LDP Test need Linux Kernel 4.5 minimum " )
2017-07-18 21:44:58 +02:00
return " LDP Test need Linux Kernel 4.5 minimum "
2018-02-06 21:23:46 +01:00
# Check if have mpls
if tgen != None :
self . hasmpls = tgen . hasmpls
if self . hasmpls != True :
2020-04-03 13:05:24 +02:00
logger . info (
" LDP/MPLS Tests will be skipped, platform missing module(s) "
)
2018-02-06 21:23:46 +01:00
else :
# Test for MPLS Kernel modules available
self . hasmpls = False
2020-04-03 13:05:24 +02:00
if not module_present ( " mpls-router " ) :
logger . info (
" MPLS tests will not run (missing mpls-router kernel module) "
)
elif not module_present ( " mpls-iptunnel " ) :
logger . info (
" MPLS tests will not run (missing mpls-iptunnel kernel module) "
)
2018-02-06 21:23:46 +01:00
else :
self . hasmpls = True
if self . hasmpls != True :
return " LDP/MPLS Tests need mpls kernel modules "
2020-04-03 13:05:24 +02:00
self . cmd ( " echo 100000 > /proc/sys/net/mpls/platform_labels " )
2017-08-20 08:37:17 +02:00
2020-04-03 13:05:24 +02:00
if self . daemons [ " eigrpd " ] == 1 :
eigrpd_path = os . path . join ( self . daemondir , " eigrpd " )
2017-08-20 08:37:17 +02:00
if not os . path . isfile ( eigrpd_path ) :
2017-11-08 19:23:14 +01:00
logger . info ( " EIGRP Test, but no eigrpd compiled or installed " )
2017-08-20 08:37:17 +02:00
return " EIGRP Test, but no eigrpd compiled or installed "
2020-04-03 13:05:24 +02:00
if self . daemons [ " bfdd " ] == 1 :
bfdd_path = os . path . join ( self . daemondir , " bfdd " )
2018-02-28 17:06:16 +01:00
if not os . path . isfile ( bfdd_path ) :
logger . info ( " BFD Test, but no bfdd compiled or installed " )
return " BFD Test, but no bfdd compiled or installed "
2017-02-01 16:50:13 +01:00
self . restartRouter ( )
return " "
2018-05-09 17:11:47 +02:00
2017-02-01 16:50:13 +01:00
def restartRouter ( self ) :
2018-05-09 17:11:47 +02:00
# Starts actual daemons without init (ie restart)
# cd to per node directory
2020-04-03 13:05:24 +02:00
self . cmd ( " cd {} / {} " . format ( self . logdir , self . name ) )
self . cmd ( " umask 000 " )
# Re-enable to allow for report per run
2018-05-10 13:54:38 +02:00
self . reportCores = True
2018-06-30 21:18:33 +02:00
if self . version == None :
2020-04-03 13:05:24 +02:00
self . version = self . cmd (
os . path . join ( self . daemondir , " bgpd " ) + " -v "
) . split ( ) [ 2 ]
logger . info ( " {} : running version: {} " . format ( self . name , self . version ) )
2017-01-30 22:50:48 +01:00
# Start Zebra first
2020-04-03 13:05:24 +02:00
if self . daemons [ " zebra " ] == 1 :
zebra_path = os . path . join ( self . daemondir , " zebra " )
zebra_option = self . daemons_options [ " zebra " ]
self . cmd (
" {0} {1} > zebra.out 2> zebra.err & " . format (
zebra_path , zebra_option , self . logdir , self . name
)
)
2017-01-30 22:50:48 +01:00
self . waitOutput ( )
2020-04-03 13:05:24 +02:00
logger . debug ( " {} : {} zebra started " . format ( self , self . routertype ) )
sleep ( 1 , " {} : waiting for zebra to start " . format ( self . name ) )
2018-07-19 02:41:24 +02:00
# Start staticd next if required
2020-04-03 13:05:24 +02:00
if self . daemons [ " staticd " ] == 1 :
staticd_path = os . path . join ( self . daemondir , " staticd " )
staticd_option = self . daemons_options [ " staticd " ]
self . cmd (
" {0} {1} > staticd.out 2> staticd.err & " . format (
staticd_path , staticd_option , self . logdir , self . name
)
)
2018-07-19 02:41:24 +02:00
self . waitOutput ( )
2020-04-03 13:05:24 +02:00
logger . debug ( " {} : {} staticd started " . format ( self , self . routertype ) )
# Fix Link-Local Addresses
2017-01-30 22:50:48 +01:00
# Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
2020-04-03 13:05:24 +02:00
self . cmd (
" for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS= ' : ' ; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:$ {3} ff:fe$4:$5$6/64; done "
)
2017-01-30 22:50:48 +01:00
# Now start all the other daemons
for daemon in self . daemons :
2017-06-21 17:54:40 +02:00
# Skip disabled daemons and zebra
2020-04-03 13:05:24 +02:00
if self . daemons [ daemon ] == 0 or daemon == " zebra " or daemon == " staticd " :
2017-06-21 17:54:40 +02:00
continue
daemon_path = os . path . join ( self . daemondir , daemon )
2020-04-03 13:05:24 +02:00
self . cmd (
" {0} {1} > {2} .out 2> {2} .err & " . format (
daemon_path , self . daemons_options . get ( daemon , " " ) , daemon
)
)
2017-06-21 17:54:40 +02:00
self . waitOutput ( )
2020-04-03 13:05:24 +02:00
logger . debug ( " {} : {} {} started " . format ( self , self . routertype , daemon ) )
2017-02-01 16:50:13 +01:00
def getStdErr ( self , daemon ) :
2020-04-03 13:05:24 +02:00
return self . getLog ( " err " , daemon )
2017-02-01 16:50:13 +01:00
def getStdOut ( self , daemon ) :
2020-04-03 13:05:24 +02:00
return self . getLog ( " out " , daemon )
2017-02-01 16:50:13 +01:00
def getLog ( self , log , daemon ) :
2020-04-03 13:05:24 +02:00
return self . cmd ( " cat {} / {} / {} . {} " . format ( self . logdir , self . name , daemon , log ) )
2018-05-09 19:02:33 +02:00
2018-05-10 13:54:38 +02:00
def checkRouterCores ( self , reportLeaks = True , reportOnce = False ) :
if reportOnce and not self . reportCores :
return
reportMade = False
2018-06-30 18:36:01 +02:00
traces = " "
2018-05-09 19:02:33 +02:00
for daemon in self . daemons :
2020-04-03 13:05:24 +02:00
if self . daemons [ daemon ] == 1 :
2018-05-09 19:02:33 +02:00
# Look for core file
2020-04-03 13:05:24 +02:00
corefiles = glob . glob (
" {} / {} / {} _core*.dmp " . format ( self . logdir , self . name , daemon )
)
if len ( corefiles ) > 0 :
2018-05-09 19:02:33 +02:00
daemon_path = os . path . join ( self . daemondir , daemon )
2020-04-03 13:05:24 +02:00
backtrace = subprocess . check_output (
[
" gdb {} {} --batch -ex bt 2> /dev/null " . format (
daemon_path , corefiles [ 0 ]
)
] ,
shell = True ,
)
sys . stderr . write (
" \n %s : %s crashed. Core file found - Backtrace follows: \n "
% ( self . name , daemon )
)
2018-05-09 19:02:33 +02:00
sys . stderr . write ( " %s " % backtrace )
2020-04-03 13:05:24 +02:00
traces = (
traces
+ " \n %s : %s crashed. Core file found - Backtrace follows: \n %s "
% ( self . name , daemon , backtrace )
)
2018-05-10 13:54:38 +02:00
reportMade = True
2018-05-09 19:02:33 +02:00
elif reportLeaks :
log = self . getStdErr ( daemon )
if " memstats " in log :
2020-04-03 13:05:24 +02:00
sys . stderr . write (
" %s : %s has memory leaks: \n " % ( self . name , daemon )
)
traces = traces + " \n %s : %s has memory leaks: \n " % (
self . name ,
daemon ,
)
2018-05-09 19:02:33 +02:00
log = re . sub ( " core_handler: " , " " , log )
2020-04-03 13:05:24 +02:00
log = re . sub (
r " (showing active allocations in memory group [a-zA-Z0-9]+) " ,
r " \ n ## \ 1 " ,
log ,
)
2018-05-09 19:02:33 +02:00
log = re . sub ( " memstats: " , " " , log )
sys . stderr . write ( log )
2018-05-10 13:54:38 +02:00
reportMade = True
2018-05-09 19:02:33 +02:00
# Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2020-04-03 13:05:24 +02:00
if checkAddressSanitizerError (
self . getStdErr ( daemon ) , self . name , daemon
) :
sys . stderr . write (
" %s : Daemon %s killed by AddressSanitizer " % ( self . name , daemon )
)
traces = traces + " \n %s : Daemon %s killed by AddressSanitizer " % (
self . name ,
daemon ,
)
2018-05-10 13:54:38 +02:00
reportMade = True
if reportMade :
self . reportCores = False
2018-06-30 18:36:01 +02:00
return traces
2018-05-09 19:02:33 +02:00
2017-01-30 22:50:48 +01:00
def checkRouterRunning ( self ) :
2017-05-19 11:16:42 +02:00
" Check if router daemons are running and collect crashinfo they don ' t run "
2017-01-30 22:50:48 +01:00
global fatal_error
2020-04-03 13:05:24 +02:00
daemonsRunning = self . cmd (
' vtysh -c " show logging " | grep " Logging configuration for " '
)
2017-05-20 11:24:11 +02:00
# Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
if checkAddressSanitizerError ( daemonsRunning , self . name , " vtysh " ) :
return " %s : vtysh killed by AddressSanitizer " % ( self . name )
2017-01-30 22:50:48 +01:00
for daemon in self . daemons :
if ( self . daemons [ daemon ] == 1 ) and not ( daemon in daemonsRunning ) :
sys . stderr . write ( " %s : Daemon %s not running \n " % ( self . name , daemon ) )
2018-11-26 16:56:56 +01:00
if daemon is " staticd " :
2020-04-03 13:05:24 +02:00
sys . stderr . write (
" You may have a copy of staticd installed but are attempting to test against \n "
)
sys . stderr . write (
" a version of FRR that does not have staticd, please cleanup the install dir \n "
)
2018-11-26 16:56:56 +01:00
2017-01-30 22:50:48 +01:00
# Look for core file
2020-04-03 13:05:24 +02:00
corefiles = glob . glob (
" {} / {} / {} _core*.dmp " . format ( self . logdir , self . name , daemon )
)
if len ( corefiles ) > 0 :
2017-06-21 17:54:40 +02:00
daemon_path = os . path . join ( self . daemondir , daemon )
2020-04-03 13:05:24 +02:00
backtrace = subprocess . check_output (
[
" gdb {} {} --batch -ex bt 2> /dev/null " . format (
daemon_path , corefiles [ 0 ]
)
] ,
shell = True ,
)
sys . stderr . write (
" \n %s : %s crashed. Core file found - Backtrace follows: \n "
% ( self . name , daemon )
)
2017-01-30 22:50:48 +01:00
sys . stderr . write ( " %s \n " % backtrace )
else :
# No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2020-04-03 13:05:24 +02:00
if os . path . isfile (
" {} / {} / {} .log " . format ( self . logdir , self . name , daemon )
) :
log_tail = subprocess . check_output (
[
" tail -n20 {} / {} / {} .log 2> /dev/null " . format (
self . logdir , self . name , daemon
)
] ,
shell = True ,
)
sys . stderr . write (
" \n From %s %s %s log file: \n "
% ( self . routertype , self . name , daemon )
)
2017-01-30 22:50:48 +01:00
sys . stderr . write ( " %s \n " % log_tail )
2017-05-20 11:24:11 +02:00
2017-05-19 11:16:42 +02:00
# Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2020-04-03 13:05:24 +02:00
if checkAddressSanitizerError (
self . getStdErr ( daemon ) , self . name , daemon
) :
return " %s : Daemon %s not running - killed by AddressSanitizer " % (
self . name ,
daemon ,
)
2017-05-20 05:30:40 +02:00
2017-01-30 22:50:48 +01:00
return " %s : Daemon %s not running " % ( self . name , daemon )
return " "
2018-06-30 21:18:33 +02:00
def checkRouterVersion ( self , cmpop , version ) :
"""
Compares router version using operation ` cmpop ` with ` version ` .
Valid ` cmpop ` values :
* ` > = ` : has the same version or greater
* ' > ' : has greater version
* ' = ' : has the same version
* ' < ' : has a lesser version
* ' <= ' : has the same version or lesser
Usage example : router . checkRouterVersion ( ' > ' , ' 1.0 ' )
"""
2018-09-07 00:52:43 +02:00
# Make sure we have version information first
if self . version == None :
2020-04-03 13:05:24 +02:00
self . version = self . cmd (
os . path . join ( self . daemondir , " bgpd " ) + " -v "
) . split ( ) [ 2 ]
logger . info ( " {} : running version: {} " . format ( self . name , self . version ) )
2018-09-07 00:52:43 +02:00
2018-06-30 21:18:33 +02:00
rversion = self . version
if rversion is None :
return False
result = version_cmp ( rversion , version )
2020-04-03 13:05:24 +02:00
if cmpop == " >= " :
2018-06-30 21:18:33 +02:00
return result > = 0
2020-04-03 13:05:24 +02:00
if cmpop == " > " :
2018-06-30 21:18:33 +02:00
return result > 0
2020-04-03 13:05:24 +02:00
if cmpop == " = " :
2018-06-30 21:18:33 +02:00
return result == 0
2020-04-03 13:05:24 +02:00
if cmpop == " < " :
2018-06-30 21:18:33 +02:00
return result < 0
2020-04-03 13:05:24 +02:00
if cmpop == " < " :
2018-06-30 21:18:33 +02:00
return result < 0
2020-04-03 13:05:24 +02:00
if cmpop == " <= " :
2018-06-30 21:18:33 +02:00
return result < = 0
2017-01-30 22:50:48 +01:00
def get_ipv6_linklocal ( self ) :
" Get LinkLocal Addresses from interfaces "
linklocal = [ ]
2020-04-03 13:05:24 +02:00
ifaces = self . cmd ( " ip -6 address " )
2017-01-30 22:50:48 +01:00
# Fix newlines (make them all the same)
2020-04-03 13:05:24 +02:00
ifaces = ( " \n " . join ( ifaces . splitlines ( ) ) + " \n " ) . splitlines ( )
interface = " "
ll_per_if_count = 0
2017-01-30 22:50:48 +01:00
for line in ifaces :
2020-04-03 13:05:24 +02:00
m = re . search ( " [0-9]+: ([^:@]+)[@if0-9:]+ < " , line )
2017-01-30 22:50:48 +01:00
if m :
interface = m . group ( 1 )
ll_per_if_count = 0
2020-04-03 13:05:24 +02:00
m = re . search (
" inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link " ,
line ,
)
2017-01-30 22:50:48 +01:00
if m :
local = m . group ( 1 )
ll_per_if_count + = 1
2020-04-03 13:05:24 +02:00
if ll_per_if_count > 1 :
2017-01-30 22:50:48 +01:00
linklocal + = [ [ " %s - %s " % ( interface , ll_per_if_count ) , local ] ]
else :
linklocal + = [ [ interface , local ] ]
return linklocal
2020-04-03 13:05:24 +02:00
2017-04-08 12:40:51 +02:00
def daemon_available ( self , daemon ) :
" Check if specified daemon is installed (and for ldp if kernel supports MPLS) "
2017-06-21 17:54:40 +02:00
daemon_path = os . path . join ( self . daemondir , daemon )
if not os . path . isfile ( daemon_path ) :
2017-04-08 12:40:51 +02:00
return False
2020-04-03 13:05:24 +02:00
if daemon == " ldpd " :
if version_cmp ( platform . release ( ) , " 4.5 " ) < 0 :
2017-07-27 01:22:14 +02:00
return False
2020-04-03 13:05:24 +02:00
if not module_present ( " mpls-router " , load = False ) :
2017-04-08 12:40:51 +02:00
return False
2020-04-03 13:05:24 +02:00
if not module_present ( " mpls-iptunnel " , load = False ) :
2017-07-27 01:22:14 +02:00
return False
2017-04-08 12:40:51 +02:00
return True
2018-03-23 20:36:56 +01:00
2017-04-08 12:40:51 +02:00
def get_routertype ( self ) :
" Return the type of Router (frr or quagga) "
return self . routertype
2020-04-03 13:05:24 +02:00
2017-04-27 04:54:25 +02:00
def report_memory_leaks ( self , filename_prefix , testscript ) :
" Report Memory Leaks to file prefixed with given string "
leakfound = False
filename = filename_prefix + re . sub ( r " \ .py " , " " , testscript ) + " .txt "
for daemon in self . daemons :
2020-04-03 13:05:24 +02:00
if self . daemons [ daemon ] == 1 :
2017-04-27 04:54:25 +02:00
log = self . getStdErr ( daemon )
if " memstats " in log :
# Found memory leak
2020-04-03 13:05:24 +02:00
logger . info (
" \n Router {} {} StdErr Log: \n {} " . format ( self . name , daemon , log )
)
2017-04-27 04:54:25 +02:00
if not leakfound :
leakfound = True
# Check if file already exists
fileexists = os . path . isfile ( filename )
leakfile = open ( filename , " a " )
if not fileexists :
# New file - add header
2020-04-03 13:05:24 +02:00
leakfile . write (
" # Memory Leak Detection for topotest %s \n \n "
% testscript
)
2017-04-27 04:54:25 +02:00
leakfile . write ( " ## Router %s \n " % self . name )
leakfile . write ( " ### Process %s \n " % daemon )
log = re . sub ( " core_handler: " , " " , log )
2020-04-03 13:05:24 +02:00
log = re . sub (
r " (showing active allocations in memory group [a-zA-Z0-9]+) " ,
r " \ n#### \ 1 \ n " ,
log ,
)
2017-04-27 04:54:25 +02:00
log = re . sub ( " memstats: " , " " , log )
leakfile . write ( log )
leakfile . write ( " \n " )
if leakfound :
leakfile . close ( )
2017-04-08 12:40:51 +02:00
2020-04-03 13:05:24 +02:00
2018-12-17 16:33:37 +01:00
class LinuxRouter ( Router ) :
2018-12-17 16:43:27 +01:00
" A Linux Router Node with IPv4/IPv6 forwarding enabled. "
2018-12-17 16:33:37 +01:00
def __init__ ( self , name , * * params ) :
Router . __init__ ( self , name , * * params )
def config ( self , * * params ) :
Router . config ( self , * * params )
# Enable forwarding on the router
2020-04-03 13:05:24 +02:00
assert_sysctl ( self , " net.ipv4.ip_forward " , 1 )
assert_sysctl ( self , " net.ipv6.conf.all.forwarding " , 1 )
2018-12-17 16:38:02 +01:00
# Enable coredumps
2020-04-03 13:05:24 +02:00
assert_sysctl ( self , " kernel.core_uses_pid " , 1 )
assert_sysctl ( self , " fs.suid_dumpable " , 1 )
# this applies to the kernel not the namespace...
# original on ubuntu 17.x, but apport won't save as in namespace
2018-12-17 16:38:02 +01:00
# |/usr/share/apport/apport %p %s %c %d %P
2020-04-03 13:05:24 +02:00
corefile = " %e _core-sig_ %s -pid_ % p.dmp "
assert_sysctl ( self , " kernel.core_pattern " , corefile )
2018-12-17 16:38:02 +01:00
2018-12-17 16:33:37 +01:00
def terminate ( self ) :
"""
Terminate generic LinuxRouter Mininet instance
"""
2020-04-03 13:05:24 +02:00
set_sysctl ( self , " net.ipv4.ip_forward " , 0 )
set_sysctl ( self , " net.ipv6.conf.all.forwarding " , 0 )
2018-12-17 16:33:37 +01:00
Router . terminate ( self )
2017-01-30 22:50:48 +01:00
2020-04-03 13:05:24 +02:00
2018-12-17 16:43:27 +01:00
class FreeBSDRouter ( Router ) :
" A FreeBSD Router Node with IPv4/IPv6 forwarding enabled. "
def __init__ ( eslf , name , * * params ) :
Router . __init__ ( Self , name , * * params )
2017-01-30 22:50:48 +01:00
class LegacySwitch ( OVSSwitch ) :
" A Legacy Switch without OpenFlow "
def __init__ ( self , name , * * params ) :
2020-04-03 13:05:24 +02:00
OVSSwitch . __init__ ( self , name , failMode = " standalone " , * * params )
2017-01-30 22:50:48 +01:00
self . switchIP = None