2017-01-30 22:50:48 +01:00
#!/usr/bin/env python
#
# topotest.py
# Library of helper functions for NetDEF Topology Tests
#
# Copyright (c) 2016 by
# Network Device Education Foundation, Inc. ("NetDEF")
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
2017-09-19 03:19:10 +02:00
import json
2017-01-30 22:50:48 +01:00
import os
2017-04-27 04:54:25 +02:00
import errno
2017-01-30 22:50:48 +01:00
import re
import sys
2018-08-02 01:02:59 +02:00
import functools
2017-01-30 22:50:48 +01:00
import glob
import StringIO
import subprocess
2017-06-15 05:25:54 +02:00
import tempfile
2017-01-30 22:50:48 +01:00
import platform
2017-05-31 04:39:21 +02:00
import difflib
2017-07-24 16:53:19 +02:00
import time
2017-01-30 22:50:48 +01:00
2017-07-07 14:57:07 +02:00
from lib . topolog import logger
2017-01-30 22:50:48 +01:00
from mininet . topo import Topo
from mininet . net import Mininet
from mininet . node import Node , OVSSwitch , Host
from mininet . log import setLogLevel , info
from mininet . cli import CLI
from mininet . link import Intf
2017-06-29 17:18:46 +02:00
class json_cmp_result ( object ) :
" json_cmp result class for better assertion messages "
def __init__ ( self ) :
self . errors = [ ]
def add_error ( self , error ) :
" Append error message to the result "
2017-09-19 22:05:07 +02:00
for line in error . splitlines ( ) :
self . errors . append ( line )
2017-06-29 17:18:46 +02:00
def has_errors ( self ) :
" Returns True if there were errors, otherwise False. "
return len ( self . errors ) > 0
2018-05-21 15:47:18 +02:00
def get_test_logdir ( node = None , init = False ) :
"""
Return the current test log directory based on PYTEST_CURRENT_TEST
environment variable .
Optional paramters :
node : when set , adds the node specific log directory to the init dir
init : when set , initializes the log directory and fixes path permissions
"""
cur_test = os . environ [ ' PYTEST_CURRENT_TEST ' ]
ret = ' /tmp/topotests/ ' + cur_test [ 0 : cur_test . find ( " .py " ) ] . replace ( ' / ' , ' . ' )
if node != None :
dir = ret + " / " + node
if init :
os . system ( ' mkdir -p ' + dir )
2018-05-22 13:44:51 +02:00
os . system ( ' chmod -R go+rw /tmp/topotests ' )
2018-05-21 15:47:18 +02:00
return ret
2017-09-19 03:19:10 +02:00
def json_diff ( d1 , d2 ) :
"""
Returns a string with the difference between JSON data .
"""
json_format_opts = {
' indent ' : 4 ,
' sort_keys ' : True ,
}
dstr1 = json . dumps ( d1 , * * json_format_opts )
dstr2 = json . dumps ( d2 , * * json_format_opts )
return difflines ( dstr2 , dstr1 , title1 = ' Expected value ' , title2 = ' Current value ' , n = 0 )
2017-06-28 20:04:00 +02:00
2018-05-23 21:39:05 +02:00
def _json_list_cmp ( list1 , list2 , parent , result ) :
" Handles list type entries. "
# Check second list2 type
if not isinstance ( list1 , type ( [ ] ) ) or not isinstance ( list2 , type ( [ ] ) ) :
result . add_error (
' {} has different type than expected ' . format ( parent ) +
' (have {} , expected {} ): \n {} ' . format (
type ( list1 ) , type ( list2 ) , json_diff ( list1 , list2 ) ) )
return
# Check list size
if len ( list2 ) > len ( list1 ) :
result . add_error (
' {} too few items ' . format ( parent ) +
' (have {} , expected {} : \n {} ) ' . format (
len ( list1 ) , len ( list2 ) ,
json_diff ( list1 , list2 ) ) )
return
# List all unmatched items errors
unmatched = [ ]
for expected in list2 :
matched = False
for value in list1 :
if json_cmp ( { ' json ' : value } , { ' json ' : expected } ) is None :
matched = True
break
if not matched :
unmatched . append ( expected )
# If there are unmatched items, error out.
if unmatched :
result . add_error (
' {} value is different ( \n {} ) ' . format (
parent , json_diff ( list1 , list2 ) ) )
2017-09-19 03:14:27 +02:00
def json_cmp ( d1 , d2 ) :
2017-06-28 20:04:00 +02:00
"""
JSON compare function . Receives two parameters :
* ` d1 ` : json value
* ` d2 ` : json subset which we expect
Returns ` None ` when all keys that ` d1 ` has matches ` d2 ` ,
otherwise a string containing what failed .
Note : key absence can be tested by adding a key with value ` None ` .
"""
2017-06-29 17:18:46 +02:00
squeue = [ ( d1 , d2 , ' json ' ) ]
result = json_cmp_result ( )
2018-05-23 21:39:05 +02:00
2017-06-28 20:04:00 +02:00
for s in squeue :
2017-06-29 17:18:46 +02:00
nd1 , nd2 , parent = s
2018-05-23 21:39:05 +02:00
# Handle JSON beginning with lists.
if isinstance ( nd1 , type ( [ ] ) ) or isinstance ( nd2 , type ( [ ] ) ) :
_json_list_cmp ( nd1 , nd2 , parent , result )
if result . has_errors ( ) :
return result
else :
return None
2017-06-28 20:04:00 +02:00
# Expect all required fields to exist.
2018-05-23 21:39:05 +02:00
s1 , s2 = set ( nd1 ) , set ( nd2 )
2017-06-28 20:04:00 +02:00
s2_req = set ( [ key for key in nd2 if nd2 [ key ] is not None ] )
diff = s2_req - s1
if diff != set ( { } ) :
2017-11-09 21:10:54 +01:00
result . add_error ( ' expected key(s) {} in {} (have {} ): \n {} ' . format (
str ( list ( diff ) ) , parent , str ( list ( s1 ) ) , json_diff ( nd1 , nd2 ) ) )
2017-06-28 20:04:00 +02:00
for key in s2 . intersection ( s1 ) :
# Test for non existence of key in d2
if nd2 [ key ] is None :
2017-11-09 21:10:54 +01:00
result . add_error ( ' " {} " should not exist in {} (have {} ): \n {} ' . format (
key , parent , str ( s1 ) , json_diff ( nd1 [ key ] , nd2 [ key ] ) ) )
2017-06-29 17:18:46 +02:00
continue
2018-05-23 21:39:05 +02:00
2017-06-28 20:04:00 +02:00
# If nd1 key is a dict, we have to recurse in it later.
if isinstance ( nd2 [ key ] , type ( { } ) ) :
2017-07-05 18:46:28 +02:00
if not isinstance ( nd1 [ key ] , type ( { } ) ) :
result . add_error (
' {} [ " {} " ] has different type than expected ' . format ( parent , key ) +
2017-11-09 21:10:54 +01:00
' (have {} , expected {} ): \n {} ' . format (
type ( nd1 [ key ] ) , type ( nd2 [ key ] ) , json_diff ( nd1 [ key ] , nd2 [ key ] ) ) )
2017-07-05 18:46:28 +02:00
continue
2017-06-29 17:18:46 +02:00
nparent = ' {} [ " {} " ] ' . format ( parent , key )
squeue . append ( ( nd1 [ key ] , nd2 [ key ] , nparent ) )
2017-06-28 20:04:00 +02:00
continue
2018-05-23 21:39:05 +02:00
2017-07-05 18:46:28 +02:00
# Check list items
if isinstance ( nd2 [ key ] , type ( [ ] ) ) :
2018-05-23 21:39:05 +02:00
_json_list_cmp ( nd1 [ key ] , nd2 [ key ] , parent , result )
2017-07-05 18:46:28 +02:00
continue
2017-06-28 20:04:00 +02:00
# Compare JSON values
if nd1 [ key ] != nd2 [ key ] :
2017-06-29 17:18:46 +02:00
result . add_error (
2017-09-19 03:19:10 +02:00
' {} [ " {} " ] value is different ( \n {} ) ' . format (
parent , key , json_diff ( nd1 [ key ] , nd2 [ key ] ) ) )
2017-06-29 17:18:46 +02:00
continue
if result . has_errors ( ) :
return result
2017-06-28 20:04:00 +02:00
return None
2018-05-23 21:39:05 +02:00
2018-08-03 18:23:52 +02:00
def router_output_cmp ( router , cmd , expected ) :
"""
Runs ` cmd ` in router and compares the output with ` expected ` .
"""
return difflines ( normalize_text ( router . vtysh_cmd ( cmd ) ) ,
normalize_text ( expected ) ,
title1 = " Current output " ,
title2 = " Expected output " )
def router_json_cmp ( router , cmd , data ) :
"""
Runs ` cmd ` that returns JSON data ( normally the command ends with ' json ' )
and compare with ` data ` contents .
"""
return json_cmp ( router . vtysh_cmd ( cmd , isjson = True ) , data )
2017-06-15 05:25:54 +02:00
def run_and_expect ( func , what , count = 20 , wait = 3 ) :
"""
Run ` func ` and compare the result with ` what ` . Do it for ` count ` times
waiting ` wait ` seconds between tries . By default it tries 20 times with
3 seconds delay between tries .
Returns ( True , func - return ) on success or
( False , func - return ) on failure .
2018-08-03 18:23:52 +02:00
- - -
Helper functions to use with this function :
- router_output_cmp
- router_json_cmp
2017-06-15 05:25:54 +02:00
"""
2018-08-02 01:02:59 +02:00
start_time = time . time ( )
func_name = " <unknown> "
if func . __class__ == functools . partial :
func_name = func . func . __name__
else :
func_name = func . __name__
logger . info (
" ' {} ' polling started (interval {} secs, maximum wait {} secs) " . format (
func_name , wait , int ( wait * count ) ) )
2017-06-15 05:25:54 +02:00
while count > 0 :
result = func ( )
if result != what :
2017-07-24 16:53:19 +02:00
time . sleep ( wait )
2017-06-15 05:25:54 +02:00
count - = 1
continue
2018-08-02 01:02:59 +02:00
end_time = time . time ( )
logger . info ( " ' {} ' succeeded after {:.2f} seconds " . format (
func_name , end_time - start_time ) )
2017-06-15 05:25:54 +02:00
return ( True , result )
2018-08-02 01:02:59 +02:00
end_time = time . time ( )
logger . error ( " ' {} ' failed after {:.2f} seconds " . format (
func_name , end_time - start_time ) )
2017-06-15 05:25:54 +02:00
return ( False , result )
2017-01-30 22:50:48 +01:00
def int2dpid ( dpid ) :
" Converting Integer to DPID "
try :
dpid = hex ( dpid ) [ 2 : ]
dpid = ' 0 ' * ( 16 - len ( dpid ) ) + dpid
return dpid
except IndexError :
raise Exception ( ' Unable to derive default datapath ID - '
' please either specify a dpid or use a '
' canonical switch name such as s23. ' )
2017-04-27 04:54:25 +02:00
def pid_exists ( pid ) :
" Check whether pid exists in the current process table. "
if pid < = 0 :
return False
try :
os . kill ( pid , 0 )
except OSError as err :
if err . errno == errno . ESRCH :
# ESRCH == No such process
return False
elif err . errno == errno . EPERM :
# EPERM clearly means there's a process to deny access to
return True
else :
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else :
return True
2017-09-20 17:04:23 +02:00
def get_textdiff ( text1 , text2 , title1 = " " , title2 = " " , * * opts ) :
2017-05-31 04:39:21 +02:00
" Returns empty string if same or formatted diff "
2017-08-09 19:08:50 +02:00
diff = ' \n ' . join ( difflib . unified_diff ( text1 , text2 ,
2017-09-20 17:04:23 +02:00
fromfile = title1 , tofile = title2 , * * opts ) )
2017-05-31 04:39:21 +02:00
# Clean up line endings
diff = os . linesep . join ( [ s for s in diff . splitlines ( ) if s ] )
return diff
2017-09-20 17:04:23 +02:00
def difflines ( text1 , text2 , title1 = ' ' , title2 = ' ' , * * opts ) :
2017-06-15 05:25:54 +02:00
" Wrapper for get_textdiff to avoid string transformations. "
text1 = ( ' \n ' . join ( text1 . rstrip ( ) . splitlines ( ) ) + ' \n ' ) . splitlines ( 1 )
text2 = ( ' \n ' . join ( text2 . rstrip ( ) . splitlines ( ) ) + ' \n ' ) . splitlines ( 1 )
2017-09-20 17:04:23 +02:00
return get_textdiff ( text1 , text2 , title1 , title2 , * * opts )
2017-06-15 05:25:54 +02:00
def get_file ( content ) :
"""
Generates a temporary file in ' /tmp ' with ` content ` and returns the file name .
"""
fde = tempfile . NamedTemporaryFile ( mode = ' w ' , delete = False )
fname = fde . name
fde . write ( content )
fde . close ( )
return fname
2017-06-30 21:09:07 +02:00
def normalize_text ( text ) :
"""
2018-08-03 17:21:52 +02:00
Strips formating spaces / tabs , carriage returns and trailing whitespace .
2017-06-30 21:09:07 +02:00
"""
text = re . sub ( r ' [ \ t]+ ' , ' ' , text )
text = re . sub ( r ' \ r ' , ' ' , text )
2018-08-03 17:21:52 +02:00
# Remove whitespace in the middle of text.
text = re . sub ( r ' [ \ t]+ \ n ' , ' \n ' , text )
# Remove whitespace at the end of the text.
text = text . rstrip ( )
2017-06-30 21:09:07 +02:00
return text
2018-03-23 20:36:56 +01:00
def module_present ( module , load = True ) :
"""
Returns whether ` module ` is present .
If ` load ` is true , it will try to load it via modprobe .
"""
with open ( ' /proc/modules ' , ' r ' ) as modules_file :
if module . replace ( ' - ' , ' _ ' ) in modules_file . read ( ) :
return True
cmd = ' /sbin/modprobe {} {} ' . format ( ' ' if load else ' -n ' ,
module )
if os . system ( cmd ) != 0 :
return False
else :
return True
2017-07-14 19:00:52 +02:00
def version_cmp ( v1 , v2 ) :
"""
Compare two version strings and returns :
* ` - 1 ` : if ` v1 ` is less than ` v2 `
* ` 0 ` : if ` v1 ` is equal to ` v2 `
* ` 1 ` : if ` v1 ` is greater than ` v2 `
Raises ` ValueError ` if versions are not well formated .
"""
vregex = r ' (?P<whole> \ d+( \ .( \ d+))*) '
v1m = re . match ( vregex , v1 )
v2m = re . match ( vregex , v2 )
if v1m is None or v2m is None :
raise ValueError ( " got a invalid version string " )
# Split values
v1g = v1m . group ( ' whole ' ) . split ( ' . ' )
v2g = v2m . group ( ' whole ' ) . split ( ' . ' )
# Get the longest version string
vnum = len ( v1g )
if len ( v2g ) > vnum :
vnum = len ( v2g )
# Reverse list because we are going to pop the tail
v1g . reverse ( )
v2g . reverse ( )
for _ in range ( vnum ) :
try :
v1n = int ( v1g . pop ( ) )
except IndexError :
while v2g :
v2n = int ( v2g . pop ( ) )
if v2n > 0 :
return - 1
break
try :
v2n = int ( v2g . pop ( ) )
except IndexError :
if v1n > 0 :
return 1
while v1g :
v1n = int ( v1g . pop ( ) )
if v1n > 0 :
2017-11-08 00:17:15 +01:00
return 1
2017-07-14 19:00:52 +02:00
break
if v1n > v2n :
return 1
if v1n < v2n :
return - 1
return 0
2018-07-24 18:20:08 +02:00
def interface_set_status ( node , ifacename , ifaceaction = False , vrf_name = None ) :
if ifaceaction :
str_ifaceaction = ' no shutdown '
else :
str_ifaceaction = ' shutdown '
if vrf_name == None :
cmd = ' vtysh -c \" configure terminal \" -c \" interface {0} \" -c \" {1} \" ' . format ( ifacename , str_ifaceaction )
else :
cmd = ' vtysh -c \" configure terminal \" -c \" interface {0} vrf {1} \" -c \" {2} \" ' . format ( ifacename , vrf_name , str_ifaceaction )
node . run ( cmd )
2018-07-20 16:22:38 +02:00
def ip4_route_zebra ( node , vrf_name = None ) :
"""
Gets an output of ' show ip route ' command . It can be used
with comparing the output to a reference
"""
if vrf_name == None :
tmp = node . vtysh_cmd ( ' show ip route ' )
else :
tmp = node . vtysh_cmd ( ' show ip route vrf {0} ' . format ( vrf_name ) )
output = re . sub ( r " [0-2][0-9]:[0-5][0-9]:[0-5][0-9] " , " XX:XX:XX " , tmp )
2018-08-25 18:46:46 +02:00
lines = output . splitlines ( )
header_found = False
2018-08-27 18:48:45 +02:00
while lines and ( not lines [ 0 ] . strip ( ) or not header_found ) :
2018-08-25 18:46:46 +02:00
if ' > - selected route ' in lines [ 0 ] :
header_found = True
lines = lines [ 1 : ]
return ' \n ' . join ( lines )
2018-07-20 16:22:38 +02:00
2018-08-29 16:35:51 +02:00
def proto_name_to_number ( protocol ) :
return {
' bgp ' : ' 186 ' ,
' isis ' : ' 187 ' ,
' ospf ' : ' 188 ' ,
' rip ' : ' 189 ' ,
' ripng ' : ' 190 ' ,
' nhrp ' : ' 191 ' ,
' eigrp ' : ' 192 ' ,
' ldp ' : ' 193 ' ,
' sharp ' : ' 194 ' ,
' pbr ' : ' 195 ' ,
' static ' : ' 196 '
} . get ( protocol , protocol ) # default return same as input
2017-07-03 20:57:20 +02:00
def ip4_route ( node ) :
"""
Gets a structured return of the command ' ip route ' . It can be used in
conjuction with json_cmp ( ) to provide accurate assert explanations .
Return example :
{
' 10.0.1.0/24 ' : {
' dev ' : ' eth0 ' ,
' via ' : ' 172.16.0.1 ' ,
' proto ' : ' 188 ' ,
} ,
' 10.0.2.0/24 ' : {
' dev ' : ' eth1 ' ,
' proto ' : ' kernel ' ,
}
}
"""
output = normalize_text ( node . run ( ' ip route ' ) ) . splitlines ( )
result = { }
for line in output :
columns = line . split ( ' ' )
route = result [ columns [ 0 ] ] = { }
prev = None
for column in columns :
if prev == ' dev ' :
route [ ' dev ' ] = column
if prev == ' via ' :
route [ ' via ' ] = column
if prev == ' proto ' :
2018-08-29 16:35:51 +02:00
# translate protocol names back to numbers
route [ ' proto ' ] = proto_name_to_number ( column )
2017-07-03 20:57:20 +02:00
if prev == ' metric ' :
route [ ' metric ' ] = column
if prev == ' scope ' :
route [ ' scope ' ] = column
prev = column
return result
def ip6_route ( node ) :
"""
Gets a structured return of the command ' ip -6 route ' . It can be used in
conjuction with json_cmp ( ) to provide accurate assert explanations .
Return example :
{
' 2001:db8:1::/64 ' : {
' dev ' : ' eth0 ' ,
' proto ' : ' 188 ' ,
} ,
' 2001:db8:2::/64 ' : {
' dev ' : ' eth1 ' ,
' proto ' : ' kernel ' ,
}
}
"""
output = normalize_text ( node . run ( ' ip -6 route ' ) ) . splitlines ( )
result = { }
for line in output :
columns = line . split ( ' ' )
route = result [ columns [ 0 ] ] = { }
prev = None
for column in columns :
if prev == ' dev ' :
route [ ' dev ' ] = column
if prev == ' via ' :
route [ ' via ' ] = column
if prev == ' proto ' :
2018-08-29 16:35:51 +02:00
# translate protocol names back to numbers
route [ ' proto ' ] = proto_name_to_number ( column )
2017-07-03 20:57:20 +02:00
if prev == ' metric ' :
route [ ' metric ' ] = column
if prev == ' pref ' :
route [ ' pref ' ] = column
prev = column
return result
2017-07-24 16:53:19 +02:00
def sleep ( amount , reason = None ) :
"""
Sleep wrapper that registers in the log the amount of sleep
"""
if reason is None :
logger . info ( ' Sleeping for {} seconds ' . format ( amount ) )
else :
logger . info ( reason + ' ( {} seconds) ' . format ( amount ) )
time . sleep ( amount )
2017-05-20 11:24:11 +02:00
def checkAddressSanitizerError ( output , router , component ) :
" Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise "
addressSantizerError = re . search ( ' (==[0-9]+==)ERROR: AddressSanitizer: ([^ \ s]*) ' , output )
if addressSantizerError :
sys . stderr . write ( " %s : %s triggered an exception by AddressSanitizer \n " % ( router , component ) )
# Sanitizer Error found in log
pidMark = addressSantizerError . group ( 1 )
addressSantizerLog = re . search ( ' %s (.*) %s ' % ( pidMark , pidMark ) , output , re . DOTALL )
if addressSantizerLog :
callingTest = os . path . basename ( sys . _current_frames ( ) . values ( ) [ 0 ] . f_back . f_back . f_globals [ ' __file__ ' ] )
callingProc = sys . _getframe ( 2 ) . f_code . co_name
with open ( " /tmp/AddressSanitzer.txt " , " a " ) as addrSanFile :
sys . stderr . write ( ' \n ' . join ( addressSantizerLog . group ( 1 ) . splitlines ( ) ) + ' \n ' )
addrSanFile . write ( " ## Error: %s \n \n " % addressSantizerError . group ( 2 ) )
addrSanFile . write ( " ### AddressSanitizer error in topotest ` %s `, test ` %s `, router ` %s ` \n \n " % ( callingTest , callingProc , router ) )
addrSanFile . write ( ' ' + ' \n ' . join ( addressSantizerLog . group ( 1 ) . splitlines ( ) ) + ' \n ' )
addrSanFile . write ( " \n --------------- \n " )
return True
2017-07-07 14:57:07 +02:00
return False
2017-05-20 11:24:11 +02:00
2017-01-30 22:50:48 +01:00
def addRouter ( topo , name ) :
2017-04-08 12:40:51 +02:00
" Adding a FRRouter (or Quagga) to Topology "
2017-01-30 22:50:48 +01:00
MyPrivateDirs = [ ' /etc/frr ' ,
' /etc/quagga ' ,
' /var/run/frr ' ,
' /var/run/quagga ' ,
' /var/log ' ]
return topo . addNode ( name , cls = Router , privateDirs = MyPrivateDirs )
2017-06-14 15:30:10 +02:00
def set_sysctl ( node , sysctl , value ) :
" Set a sysctl value and return None on success or an error string "
valuestr = ' {} ' . format ( value )
command = " sysctl {0} = {1} " . format ( sysctl , valuestr )
cmdret = node . cmd ( command )
matches = re . search ( r ' ([^ ]+) = ([^ \ s]+) ' , cmdret )
if matches is None :
return cmdret
if matches . group ( 1 ) != sysctl :
return cmdret
if matches . group ( 2 ) != valuestr :
return cmdret
return None
def assert_sysctl ( node , sysctl , value ) :
" Set and assert that the sysctl is set with the specified value. "
assert set_sysctl ( node , sysctl , value ) is None
2017-01-30 22:50:48 +01:00
class LinuxRouter ( Node ) :
" A Node with IPv4/IPv6 forwarding enabled. "
def config ( self , * * params ) :
super ( LinuxRouter , self ) . config ( * * params )
# Enable forwarding on the router
2017-06-14 15:30:10 +02:00
assert_sysctl ( self , ' net.ipv4.ip_forward ' , 1 )
assert_sysctl ( self , ' net.ipv6.conf.all.forwarding ' , 1 )
2017-01-30 22:50:48 +01:00
def terminate ( self ) :
"""
Terminate generic LinuxRouter Mininet instance
"""
2017-06-14 15:30:10 +02:00
set_sysctl ( self , ' net.ipv4.ip_forward ' , 0 )
set_sysctl ( self , ' net.ipv6.conf.all.forwarding ' , 0 )
2017-01-30 22:50:48 +01:00
super ( LinuxRouter , self ) . terminate ( )
class Router ( Node ) :
" A Node with IPv4/IPv6 forwarding enabled and Quagga as Routing Engine "
2017-06-21 17:54:40 +02:00
def __init__ ( self , name , * * params ) :
super ( Router , self ) . __init__ ( name , * * params )
2018-05-21 15:47:18 +02:00
self . logdir = params . get ( ' logdir ' , get_test_logdir ( name , True ) )
2017-06-21 17:54:40 +02:00
self . daemondir = None
2018-02-01 02:07:27 +01:00
self . hasmpls = False
2017-06-21 17:54:40 +02:00
self . routertype = ' frr '
self . daemons = { ' zebra ' : 0 , ' ripd ' : 0 , ' ripngd ' : 0 , ' ospfd ' : 0 ,
' ospf6d ' : 0 , ' isisd ' : 0 , ' bgpd ' : 0 , ' pimd ' : 0 ,
2018-02-28 17:06:16 +01:00
' ldpd ' : 0 , ' eigrpd ' : 0 , ' nhrpd ' : 0 , ' staticd ' : 0 ,
' bfdd ' : 0 }
2018-01-31 11:48:11 +01:00
self . daemons_options = { ' zebra ' : ' ' }
2018-05-10 13:54:38 +02:00
self . reportCores = True
2018-06-30 21:18:33 +02:00
self . version = None
2017-06-21 17:54:40 +02:00
2017-06-27 23:11:02 +02:00
def _config_frr ( self , * * params ) :
" Configure FRR binaries "
self . daemondir = params . get ( ' frrdir ' )
if self . daemondir is None :
self . daemondir = ' /usr/lib/frr '
zebra_path = os . path . join ( self . daemondir , ' zebra ' )
if not os . path . isfile ( zebra_path ) :
raise Exception ( " FRR zebra binary doesn ' t exist at {} " . format ( zebra_path ) )
def _config_quagga ( self , * * params ) :
" Configure Quagga binaries "
self . daemondir = params . get ( ' quaggadir ' )
if self . daemondir is None :
self . daemondir = ' /usr/lib/quagga '
zebra_path = os . path . join ( self . daemondir , ' zebra ' )
if not os . path . isfile ( zebra_path ) :
raise Exception ( " Quagga zebra binary doesn ' t exist at {} " . format ( zebra_path ) )
2017-06-21 17:54:40 +02:00
# pylint: disable=W0221
# Some params are only meaningful for the parent class.
2017-01-30 22:50:48 +01:00
def config ( self , * * params ) :
super ( Router , self ) . config ( * * params )
2017-06-21 17:54:40 +02:00
# User did not specify the daemons directory, try to autodetect it.
self . daemondir = params . get ( ' daemondir ' )
if self . daemondir is None :
2017-06-27 23:11:02 +02:00
self . routertype = params . get ( ' routertype ' , ' frr ' )
if self . routertype == ' quagga ' :
self . _config_quagga ( * * params )
else :
self . _config_frr ( * * params )
2017-01-30 22:50:48 +01:00
else :
2017-06-21 17:54:40 +02:00
# Test the provided path
zpath = os . path . join ( self . daemondir , ' zebra ' )
if not os . path . isfile ( zpath ) :
raise Exception ( ' No zebra binary found in {} ' . format ( zpath ) )
# Allow user to specify routertype when the path was specified.
if params . get ( ' routertype ' ) is not None :
self . routertype = self . params . get ( ' routertype ' )
2017-01-30 22:50:48 +01:00
# Enable forwarding on the router
2017-06-14 15:30:10 +02:00
assert_sysctl ( self , ' net.ipv4.ip_forward ' , 1 )
assert_sysctl ( self , ' net.ipv6.conf.all.forwarding ' , 1 )
2017-01-30 22:50:48 +01:00
# Enable coredumps
2017-06-14 15:30:10 +02:00
assert_sysctl ( self , ' kernel.core_uses_pid ' , 1 )
2018-05-09 17:11:47 +02:00
assert_sysctl ( self , ' fs.suid_dumpable ' , 1 )
#this applies to the kernel not the namespace...
#original on ubuntu 17.x, but apport won't save as in namespace
# |/usr/share/apport/apport %p %s %c %d %P
corefile = ' %e _core-sig_ %s -pid_ % p.dmp '
2017-06-14 15:30:10 +02:00
assert_sysctl ( self , ' kernel.core_pattern ' , corefile )
2017-01-30 22:50:48 +01:00
self . cmd ( ' ulimit -c unlimited ' )
# Set ownership of config files
2017-06-21 17:54:40 +02:00
self . cmd ( ' chown {0} : {0} vty /etc/ {0} ' . format ( self . routertype ) )
2017-01-30 22:50:48 +01:00
def terminate ( self ) :
# Delete Running Quagga or FRR Daemons
2017-02-01 16:50:13 +01:00
self . stopRouter ( )
# rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
# for d in StringIO.StringIO(rundaemons):
# self.cmd('kill -7 `cat %s`' % d.rstrip())
# self.waitOutput()
2017-01-30 22:50:48 +01:00
# Disable forwarding
2017-06-14 15:30:10 +02:00
set_sysctl ( self , ' net.ipv4.ip_forward ' , 0 )
set_sysctl ( self , ' net.ipv6.conf.all.forwarding ' , 0 )
2017-01-30 22:50:48 +01:00
super ( Router , self ) . terminate ( )
2018-05-22 13:44:51 +02:00
os . system ( ' chmod -R go+rw /tmp/topotests ' )
2018-07-11 15:49:42 +02:00
def stopRouter ( self , wait = True , assertOnError = True , minErrorVersion = ' 5.1 ' ) :
2017-02-01 16:50:13 +01:00
# Stop Running Quagga or FRR Daemons
rundaemons = self . cmd ( ' ls -1 /var/run/ %s /*.pid ' % self . routertype )
2018-06-30 18:36:01 +02:00
errors = " "
2017-12-08 15:41:35 +01:00
if re . search ( r " No such file or directory " , rundaemons ) :
2018-06-30 18:36:01 +02:00
return errors
2017-02-01 16:50:13 +01:00
if rundaemons is not None :
2017-12-07 19:47:22 +01:00
numRunning = 0
2017-08-31 02:06:35 +02:00
for d in StringIO . StringIO ( rundaemons ) :
daemonpid = self . cmd ( ' cat %s ' % d . rstrip ( ) ) . rstrip ( )
if ( daemonpid . isdigit ( ) and pid_exists ( int ( daemonpid ) ) ) :
2017-12-08 15:19:58 +01:00
logger . info ( ' {} : stopping {} ' . format (
self . name ,
os . path . basename ( d . rstrip ( ) . rsplit ( " . " , 1 ) [ 0 ] )
) )
2017-08-31 02:06:35 +02:00
self . cmd ( ' kill -TERM %s ' % daemonpid )
self . waitOutput ( )
2017-12-07 19:47:22 +01:00
if pid_exists ( int ( daemonpid ) ) :
numRunning + = 1
if wait and numRunning > 0 :
2017-12-08 15:19:58 +01:00
sleep ( 2 , ' {} : waiting for daemons stopping ' . format ( self . name ) )
2017-12-07 19:47:22 +01:00
# 2nd round of kill if daemons didn't exit
for d in StringIO . StringIO ( rundaemons ) :
daemonpid = self . cmd ( ' cat %s ' % d . rstrip ( ) ) . rstrip ( )
if ( daemonpid . isdigit ( ) and pid_exists ( int ( daemonpid ) ) ) :
2017-12-08 15:19:58 +01:00
logger . info ( ' {} : killing {} ' . format (
self . name ,
os . path . basename ( d . rstrip ( ) . rsplit ( " . " , 1 ) [ 0 ] )
) )
2017-12-07 19:47:22 +01:00
self . cmd ( ' kill -7 %s ' % daemonpid )
self . waitOutput ( )
2017-12-08 15:41:35 +01:00
self . cmd ( ' rm -- {} ' . format ( d . rstrip ( ) ) )
2018-05-09 19:02:33 +02:00
if wait :
2018-06-30 18:36:01 +02:00
errors = self . checkRouterCores ( reportOnce = True )
2018-06-30 21:35:54 +02:00
if self . checkRouterVersion ( ' < ' , minErrorVersion ) :
#ignore errors in old versions
errors = " "
2018-06-30 18:36:01 +02:00
if assertOnError and len ( errors ) > 0 :
assert " Errors found - details follow: " == 0 , errors
return errors
2018-05-09 19:02:33 +02:00
2017-01-30 22:50:48 +01:00
def removeIPs ( self ) :
for interface in self . intfNames ( ) :
self . cmd ( ' ip address flush ' , interface )
2018-01-31 11:48:11 +01:00
def checkCapability ( self , daemon , param ) :
if param is not None :
daemon_path = os . path . join ( self . daemondir , daemon )
daemon_search_option = param . replace ( ' - ' , ' ' )
output = self . cmd ( ' {0} -h | grep {1} ' . format (
daemon_path , daemon_search_option ) )
if daemon_search_option not in output :
return False
return True
def loadConf ( self , daemon , source = None , param = None ) :
2017-01-30 22:50:48 +01:00
# print "Daemons before:", self.daemons
if daemon in self . daemons . keys ( ) :
self . daemons [ daemon ] = 1
2018-01-31 11:48:11 +01:00
if param is not None :
self . daemons_options [ daemon ] = param
2017-01-30 22:50:48 +01:00
if source is None :
self . cmd ( ' touch /etc/ %s / %s .conf ' % ( self . routertype , daemon ) )
self . waitOutput ( )
else :
self . cmd ( ' cp %s /etc/ %s / %s .conf ' % ( source , self . routertype , daemon ) )
self . waitOutput ( )
self . cmd ( ' chmod 640 /etc/ %s / %s .conf ' % ( self . routertype , daemon ) )
self . waitOutput ( )
self . cmd ( ' chown %s : %s /etc/ %s / %s .conf ' % ( self . routertype , self . routertype , self . routertype , daemon ) )
self . waitOutput ( )
2018-07-19 14:04:38 +02:00
if ( daemon == ' zebra ' ) and ( self . daemons [ ' staticd ' ] == 0 ) :
2018-07-19 02:41:24 +02:00
# Add staticd with zebra - if it exists
staticd_path = os . path . join ( self . daemondir , ' staticd ' )
if os . path . isfile ( staticd_path ) :
self . daemons [ ' staticd ' ] = 1
2018-07-19 14:04:38 +02:00
self . daemons_options [ ' staticd ' ] = ' '
# Auto-Started staticd has no config, so it will read from zebra config
2017-01-30 22:50:48 +01:00
else :
2017-11-08 19:23:14 +01:00
logger . info ( ' No daemon {} known ' . format ( daemon ) )
2017-01-30 22:50:48 +01:00
# print "Daemons after:", self.daemons
2018-05-09 17:11:47 +02:00
2018-02-06 21:23:46 +01:00
def startRouter ( self , tgen = None ) :
2017-01-30 22:50:48 +01:00
# Disable integrated-vtysh-config
2017-04-07 02:38:48 +02:00
self . cmd ( ' echo " no service integrated-vtysh-config " >> /etc/ %s /vtysh.conf ' % self . routertype )
2017-01-30 22:50:48 +01:00
self . cmd ( ' chown %s : %s vty /etc/ %s /vtysh.conf ' % ( self . routertype , self . routertype , self . routertype ) )
2017-07-07 15:18:25 +02:00
# TODO remove the following lines after all tests are migrated to Topogen.
2017-01-30 22:50:48 +01:00
# Try to find relevant old logfiles in /tmp and delete them
2018-05-09 17:11:47 +02:00
map ( os . remove , glob . glob ( ' {} / {} /*.log ' . format ( self . logdir , self . name ) ) )
2017-01-30 22:50:48 +01:00
# Remove old core files
2018-05-09 17:11:47 +02:00
map ( os . remove , glob . glob ( ' {} / {} /*.dmp ' . format ( self . logdir , self . name ) ) )
2017-01-30 22:50:48 +01:00
# Remove IP addresses from OS first - we have them in zebra.conf
self . removeIPs ( )
# If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
# No error - but return message and skip all the tests
if self . daemons [ ' ldpd ' ] == 1 :
2017-06-21 17:54:40 +02:00
ldpd_path = os . path . join ( self . daemondir , ' ldpd ' )
if not os . path . isfile ( ldpd_path ) :
2017-11-08 19:23:14 +01:00
logger . info ( " LDP Test, but no ldpd compiled or installed " )
2017-01-30 22:50:48 +01:00
return " LDP Test, but no ldpd compiled or installed "
2017-04-26 14:51:50 +02:00
2017-07-18 21:44:58 +02:00
if version_cmp ( platform . release ( ) , ' 4.5 ' ) < 0 :
2017-11-08 19:23:14 +01:00
logger . info ( " LDP Test need Linux Kernel 4.5 minimum " )
2017-07-18 21:44:58 +02:00
return " LDP Test need Linux Kernel 4.5 minimum "
2018-02-06 21:23:46 +01:00
# Check if have mpls
if tgen != None :
self . hasmpls = tgen . hasmpls
if self . hasmpls != True :
logger . info ( " LDP/MPLS Tests will be skipped, platform missing module(s) " )
else :
# Test for MPLS Kernel modules available
self . hasmpls = False
2018-03-23 20:36:56 +01:00
if not module_present ( ' mpls-router ' ) :
2018-02-06 21:23:46 +01:00
logger . info ( ' MPLS tests will not run (missing mpls-router kernel module) ' )
2018-03-23 20:36:56 +01:00
elif not module_present ( ' mpls-iptunnel ' ) :
2018-02-06 21:23:46 +01:00
logger . info ( ' MPLS tests will not run (missing mpls-iptunnel kernel module) ' )
else :
self . hasmpls = True
if self . hasmpls != True :
return " LDP/MPLS Tests need mpls kernel modules "
2018-08-22 21:03:18 +02:00
self . cmd ( ' echo 100000 > /proc/sys/net/mpls/platform_labels ' )
2017-08-20 08:37:17 +02:00
if self . daemons [ ' eigrpd ' ] == 1 :
eigrpd_path = os . path . join ( self . daemondir , ' eigrpd ' )
if not os . path . isfile ( eigrpd_path ) :
2017-11-08 19:23:14 +01:00
logger . info ( " EIGRP Test, but no eigrpd compiled or installed " )
2017-08-20 08:37:17 +02:00
return " EIGRP Test, but no eigrpd compiled or installed "
2018-02-28 17:06:16 +01:00
if self . daemons [ ' bfdd ' ] == 1 :
bfdd_path = os . path . join ( self . daemondir , ' bfdd ' )
if not os . path . isfile ( bfdd_path ) :
logger . info ( " BFD Test, but no bfdd compiled or installed " )
return " BFD Test, but no bfdd compiled or installed "
2017-02-01 16:50:13 +01:00
self . restartRouter ( )
return " "
2018-05-09 17:11:47 +02:00
2017-02-01 16:50:13 +01:00
def restartRouter ( self ) :
2018-05-09 17:11:47 +02:00
# Starts actual daemons without init (ie restart)
# cd to per node directory
self . cmd ( ' cd {} / {} ' . format ( self . logdir , self . name ) )
2018-05-22 13:44:51 +02:00
self . cmd ( ' umask 000 ' )
2018-05-10 13:54:38 +02:00
#Re-enable to allow for report per run
self . reportCores = True
2018-06-30 21:18:33 +02:00
if self . version == None :
self . version = self . cmd ( os . path . join ( self . daemondir , ' bgpd ' ) + ' -v ' ) . split ( ) [ 2 ]
logger . info ( ' {} : running version: {} ' . format ( self . name , self . version ) )
2017-01-30 22:50:48 +01:00
# Start Zebra first
if self . daemons [ ' zebra ' ] == 1 :
2017-06-21 17:54:40 +02:00
zebra_path = os . path . join ( self . daemondir , ' zebra ' )
2018-01-31 11:48:11 +01:00
zebra_option = self . daemons_options [ ' zebra ' ]
2018-05-09 17:11:47 +02:00
self . cmd ( ' {0} {1} > zebra.out 2> zebra.err & ' . format (
2018-01-31 11:48:11 +01:00
zebra_path , zebra_option , self . logdir , self . name
2017-06-21 17:54:40 +02:00
) )
2017-01-30 22:50:48 +01:00
self . waitOutput ( )
2017-07-07 14:57:07 +02:00
logger . debug ( ' {} : {} zebra started ' . format ( self , self . routertype ) )
2017-12-06 18:40:57 +01:00
sleep ( 1 , ' {} : waiting for zebra to start ' . format ( self . name ) )
2018-07-19 02:41:24 +02:00
# Start staticd next if required
if self . daemons [ ' staticd ' ] == 1 :
staticd_path = os . path . join ( self . daemondir , ' staticd ' )
staticd_option = self . daemons_options [ ' staticd ' ]
self . cmd ( ' {0} {1} > staticd.out 2> staticd.err & ' . format (
staticd_path , staticd_option , self . logdir , self . name
) )
self . waitOutput ( )
logger . debug ( ' {} : {} staticd started ' . format ( self , self . routertype ) )
sleep ( 1 , ' {} : waiting for staticd to start ' . format ( self . name ) )
# Fix Link-Local Addresses
2017-01-30 22:50:48 +01:00
# Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
self . cmd ( ' for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS= \' : \' ; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:$ {3} ff:fe$4:$5$6/64; done ' )
# Now start all the other daemons
for daemon in self . daemons :
2017-06-21 17:54:40 +02:00
# Skip disabled daemons and zebra
2018-07-19 02:41:24 +02:00
if self . daemons [ daemon ] == 0 or daemon == ' zebra ' or daemon == ' staticd ' :
2017-06-21 17:54:40 +02:00
continue
daemon_path = os . path . join ( self . daemondir , daemon )
2018-02-28 17:06:16 +01:00
self . cmd ( ' {0} > {1} .out 2> {1} .err & ' . format ( daemon_path , daemon ) )
2017-06-21 17:54:40 +02:00
self . waitOutput ( )
2017-07-07 14:57:07 +02:00
logger . debug ( ' {} : {} {} started ' . format ( self , self . routertype , daemon ) )
2017-02-01 16:50:13 +01:00
def getStdErr ( self , daemon ) :
return self . getLog ( ' err ' , daemon )
def getStdOut ( self , daemon ) :
return self . getLog ( ' out ' , daemon )
def getLog ( self , log , daemon ) :
2018-05-09 17:11:47 +02:00
return self . cmd ( ' cat {} / {} / {} . {} ' . format ( self . logdir , self . name , daemon , log ) )
2018-05-09 19:02:33 +02:00
2018-05-10 13:54:38 +02:00
def checkRouterCores ( self , reportLeaks = True , reportOnce = False ) :
if reportOnce and not self . reportCores :
return
reportMade = False
2018-06-30 18:36:01 +02:00
traces = " "
2018-05-09 19:02:33 +02:00
for daemon in self . daemons :
if ( self . daemons [ daemon ] == 1 ) :
# Look for core file
corefiles = glob . glob ( ' {} / {} / {} _core*.dmp ' . format (
self . logdir , self . name , daemon ) )
if ( len ( corefiles ) > 0 ) :
daemon_path = os . path . join ( self . daemondir , daemon )
backtrace = subprocess . check_output ( [
" gdb {} {} --batch -ex bt 2> /dev/null " . format ( daemon_path , corefiles [ 0 ] )
] , shell = True )
sys . stderr . write ( " \n %s : %s crashed. Core file found - Backtrace follows: \n " % ( self . name , daemon ) )
sys . stderr . write ( " %s " % backtrace )
2018-06-30 18:36:01 +02:00
traces = traces + " \n %s : %s crashed. Core file found - Backtrace follows: \n %s " % ( self . name , daemon , backtrace )
2018-05-10 13:54:38 +02:00
reportMade = True
2018-05-09 19:02:33 +02:00
elif reportLeaks :
log = self . getStdErr ( daemon )
if " memstats " in log :
sys . stderr . write ( " %s : %s has memory leaks: \n " % ( self . name , daemon ) )
2018-06-30 18:36:01 +02:00
traces = traces + " \n %s : %s has memory leaks: \n " % ( self . name , daemon )
2018-05-09 19:02:33 +02:00
log = re . sub ( " core_handler: " , " " , log )
log = re . sub ( r " (showing active allocations in memory group [a-zA-Z0-9]+) " , r " \ n ## \ 1 " , log )
log = re . sub ( " memstats: " , " " , log )
sys . stderr . write ( log )
2018-05-10 13:54:38 +02:00
reportMade = True
2018-05-09 19:02:33 +02:00
# Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
if checkAddressSanitizerError ( self . getStdErr ( daemon ) , self . name , daemon ) :
sys . stderr . write ( " %s : Daemon %s killed by AddressSanitizer " % ( self . name , daemon ) )
2018-06-30 18:36:01 +02:00
traces = traces + " \n %s : Daemon %s killed by AddressSanitizer " % ( self . name , daemon )
2018-05-10 13:54:38 +02:00
reportMade = True
if reportMade :
self . reportCores = False
2018-06-30 18:36:01 +02:00
return traces
2018-05-09 19:02:33 +02:00
2017-01-30 22:50:48 +01:00
def checkRouterRunning ( self ) :
2017-05-19 11:16:42 +02:00
" Check if router daemons are running and collect crashinfo they don ' t run "
2017-01-30 22:50:48 +01:00
global fatal_error
daemonsRunning = self . cmd ( ' vtysh -c " show log " | grep " Logging configuration for " ' )
2017-05-20 11:24:11 +02:00
# Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
if checkAddressSanitizerError ( daemonsRunning , self . name , " vtysh " ) :
return " %s : vtysh killed by AddressSanitizer " % ( self . name )
2017-01-30 22:50:48 +01:00
for daemon in self . daemons :
if ( self . daemons [ daemon ] == 1 ) and not ( daemon in daemonsRunning ) :
sys . stderr . write ( " %s : Daemon %s not running \n " % ( self . name , daemon ) )
# Look for core file
2018-05-09 17:11:47 +02:00
corefiles = glob . glob ( ' {} / {} / {} _core*.dmp ' . format (
2017-07-07 15:18:25 +02:00
self . logdir , self . name , daemon ) )
2017-01-30 22:50:48 +01:00
if ( len ( corefiles ) > 0 ) :
2017-06-21 17:54:40 +02:00
daemon_path = os . path . join ( self . daemondir , daemon )
backtrace = subprocess . check_output ( [
" gdb {} {} --batch -ex bt 2> /dev/null " . format ( daemon_path , corefiles [ 0 ] )
] , shell = True )
2017-01-30 22:50:48 +01:00
sys . stderr . write ( " \n %s : %s crashed. Core file found - Backtrace follows: \n " % ( self . name , daemon ) )
sys . stderr . write ( " %s \n " % backtrace )
else :
# No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2018-05-09 17:11:47 +02:00
if os . path . isfile ( ' {} / {} / {} .log ' . format ( self . logdir , self . name , daemon ) ) :
2017-07-07 15:18:25 +02:00
log_tail = subprocess . check_output ( [
2018-05-09 17:11:47 +02:00
" tail -n20 {} / {} / {} .log 2> /dev/null " . format (
2017-07-07 15:18:25 +02:00
self . logdir , self . name , daemon )
] , shell = True )
2017-01-30 22:50:48 +01:00
sys . stderr . write ( " \n From %s %s %s log file: \n " % ( self . routertype , self . name , daemon ) )
sys . stderr . write ( " %s \n " % log_tail )
2017-05-20 11:24:11 +02:00
2017-05-19 11:16:42 +02:00
# Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2017-05-20 11:24:11 +02:00
if checkAddressSanitizerError ( self . getStdErr ( daemon ) , self . name , daemon ) :
2017-05-20 05:30:40 +02:00
return " %s : Daemon %s not running - killed by AddressSanitizer " % ( self . name , daemon )
2017-01-30 22:50:48 +01:00
return " %s : Daemon %s not running " % ( self . name , daemon )
return " "
2018-06-30 21:18:33 +02:00
def checkRouterVersion ( self , cmpop , version ) :
"""
Compares router version using operation ` cmpop ` with ` version ` .
Valid ` cmpop ` values :
* ` > = ` : has the same version or greater
* ' > ' : has greater version
* ' = ' : has the same version
* ' < ' : has a lesser version
* ' <= ' : has the same version or lesser
Usage example : router . checkRouterVersion ( ' > ' , ' 1.0 ' )
"""
2018-09-07 00:52:43 +02:00
# Make sure we have version information first
if self . version == None :
self . version = self . cmd ( os . path . join ( self . daemondir , ' bgpd ' ) + ' -v ' ) . split ( ) [ 2 ]
logger . info ( ' {} : running version: {} ' . format ( self . name , self . version ) )
2018-06-30 21:18:33 +02:00
rversion = self . version
if rversion is None :
return False
result = version_cmp ( rversion , version )
if cmpop == ' >= ' :
return result > = 0
if cmpop == ' > ' :
return result > 0
if cmpop == ' = ' :
return result == 0
if cmpop == ' < ' :
return result < 0
if cmpop == ' < ' :
return result < 0
if cmpop == ' <= ' :
return result < = 0
2017-01-30 22:50:48 +01:00
def get_ipv6_linklocal ( self ) :
" Get LinkLocal Addresses from interfaces "
linklocal = [ ]
ifaces = self . cmd ( ' ip -6 address ' )
# Fix newlines (make them all the same)
ifaces = ( ' \n ' . join ( ifaces . splitlines ( ) ) + ' \n ' ) . splitlines ( )
interface = " "
ll_per_if_count = 0
for line in ifaces :
m = re . search ( ' [0-9]+: ([^:@]+)[@if0-9:]+ < ' , line )
if m :
interface = m . group ( 1 )
ll_per_if_count = 0
m = re . search ( ' inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link ' , line )
if m :
local = m . group ( 1 )
ll_per_if_count + = 1
if ( ll_per_if_count > 1 ) :
linklocal + = [ [ " %s - %s " % ( interface , ll_per_if_count ) , local ] ]
else :
linklocal + = [ [ interface , local ] ]
return linklocal
2017-04-08 12:40:51 +02:00
def daemon_available ( self , daemon ) :
" Check if specified daemon is installed (and for ldp if kernel supports MPLS) "
2017-06-21 17:54:40 +02:00
daemon_path = os . path . join ( self . daemondir , daemon )
if not os . path . isfile ( daemon_path ) :
2017-04-08 12:40:51 +02:00
return False
if ( daemon == ' ldpd ' ) :
2017-07-27 01:22:14 +02:00
if version_cmp ( platform . release ( ) , ' 4.5 ' ) < 0 :
return False
2018-03-23 20:36:56 +01:00
if not module_present ( ' mpls-router ' , load = False ) :
2017-04-08 12:40:51 +02:00
return False
2018-03-23 20:36:56 +01:00
if not module_present ( ' mpls-iptunnel ' , load = False ) :
2017-07-27 01:22:14 +02:00
return False
2017-04-08 12:40:51 +02:00
return True
2018-03-23 20:36:56 +01:00
2017-04-08 12:40:51 +02:00
def get_routertype ( self ) :
" Return the type of Router (frr or quagga) "
return self . routertype
2017-04-27 04:54:25 +02:00
def report_memory_leaks ( self , filename_prefix , testscript ) :
" Report Memory Leaks to file prefixed with given string "
leakfound = False
filename = filename_prefix + re . sub ( r " \ .py " , " " , testscript ) + " .txt "
for daemon in self . daemons :
if ( self . daemons [ daemon ] == 1 ) :
log = self . getStdErr ( daemon )
if " memstats " in log :
# Found memory leak
2017-07-07 14:57:07 +02:00
logger . info ( ' \n Router {} {} StdErr Log: \n {} ' . format (
self . name , daemon , log ) )
2017-04-27 04:54:25 +02:00
if not leakfound :
leakfound = True
# Check if file already exists
fileexists = os . path . isfile ( filename )
leakfile = open ( filename , " a " )
if not fileexists :
# New file - add header
leakfile . write ( " # Memory Leak Detection for topotest %s \n \n " % testscript )
leakfile . write ( " ## Router %s \n " % self . name )
leakfile . write ( " ### Process %s \n " % daemon )
log = re . sub ( " core_handler: " , " " , log )
log = re . sub ( r " (showing active allocations in memory group [a-zA-Z0-9]+) " , r " \ n#### \ 1 \ n " , log )
log = re . sub ( " memstats: " , " " , log )
leakfile . write ( log )
leakfile . write ( " \n " )
if leakfound :
leakfile . close ( )
2017-04-08 12:40:51 +02:00
2017-01-30 22:50:48 +01:00
class LegacySwitch ( OVSSwitch ) :
" A Legacy Switch without OpenFlow "
def __init__ ( self , name , * * params ) :
OVSSwitch . __init__ ( self , name , failMode = ' standalone ' , * * params )
self . switchIP = None