2017-01-30 22:50:48 +01:00
#!/usr/bin/env python
#
# topotest.py
# Library of helper functions for NetDEF Topology Tests
#
# Copyright (c) 2016 by
# Network Device Education Foundation, Inc. ("NetDEF")
#
# Permission to use, copy, modify, and/or distribute this software
# for any purpose with or without fee is hereby granted, provided
# that the above copyright notice and this permission notice appear
# in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NETDEF DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NETDEF BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
#
import os
2017-04-27 04:54:25 +02:00
import errno
2017-01-30 22:50:48 +01:00
import re
import sys
import glob
import StringIO
import subprocess
2017-06-15 05:25:54 +02:00
import tempfile
2017-01-30 22:50:48 +01:00
import platform
2017-05-31 04:39:21 +02:00
import difflib
2017-07-24 16:53:19 +02:00
import time
2017-01-30 22:50:48 +01:00
2017-07-07 14:57:07 +02:00
from lib . topolog import logger
2017-01-30 22:50:48 +01:00
from mininet . topo import Topo
from mininet . net import Mininet
from mininet . node import Node , OVSSwitch , Host
from mininet . log import setLogLevel , info
from mininet . cli import CLI
from mininet . link import Intf
2017-06-29 17:18:46 +02:00
class json_cmp_result ( object ) :
" json_cmp result class for better assertion messages "
def __init__ ( self ) :
self . errors = [ ]
def add_error ( self , error ) :
" Append error message to the result "
self . errors . append ( error )
def has_errors ( self ) :
" Returns True if there were errors, otherwise False. "
return len ( self . errors ) > 0
2017-06-28 20:04:00 +02:00
def json_cmp ( d1 , d2 , reason = False ) :
"""
JSON compare function . Receives two parameters :
* ` d1 ` : json value
* ` d2 ` : json subset which we expect
Returns ` None ` when all keys that ` d1 ` has matches ` d2 ` ,
otherwise a string containing what failed .
Note : key absence can be tested by adding a key with value ` None ` .
"""
2017-06-29 17:18:46 +02:00
squeue = [ ( d1 , d2 , ' json ' ) ]
result = json_cmp_result ( )
2017-06-28 20:04:00 +02:00
for s in squeue :
2017-06-29 17:18:46 +02:00
nd1 , nd2 , parent = s
2017-06-28 20:04:00 +02:00
s1 , s2 = set ( nd1 ) , set ( nd2 )
# Expect all required fields to exist.
s2_req = set ( [ key for key in nd2 if nd2 [ key ] is not None ] )
diff = s2_req - s1
if diff != set ( { } ) :
2017-06-29 17:18:46 +02:00
result . add_error ( ' expected key(s) {} in {} (have {} ) ' . format (
str ( list ( diff ) ) , parent , str ( list ( s1 ) ) ) )
2017-06-28 20:04:00 +02:00
for key in s2 . intersection ( s1 ) :
# Test for non existence of key in d2
if nd2 [ key ] is None :
2017-06-29 17:18:46 +02:00
result . add_error ( ' " {} " should not exist in {} (have {} ) ' . format (
key , parent , str ( s1 ) ) )
continue
2017-06-28 20:04:00 +02:00
# If nd1 key is a dict, we have to recurse in it later.
if isinstance ( nd2 [ key ] , type ( { } ) ) :
2017-07-05 18:46:28 +02:00
if not isinstance ( nd1 [ key ] , type ( { } ) ) :
result . add_error (
' {} [ " {} " ] has different type than expected ' . format ( parent , key ) +
' (have {} , expected {} ) ' . format ( type ( nd1 [ key ] ) , type ( nd2 [ key ] ) ) )
continue
2017-06-29 17:18:46 +02:00
nparent = ' {} [ " {} " ] ' . format ( parent , key )
squeue . append ( ( nd1 [ key ] , nd2 [ key ] , nparent ) )
2017-06-28 20:04:00 +02:00
continue
2017-07-05 18:46:28 +02:00
# Check list items
if isinstance ( nd2 [ key ] , type ( [ ] ) ) :
if not isinstance ( nd1 [ key ] , type ( [ ] ) ) :
result . add_error (
' {} [ " {} " ] has different type than expected ' . format ( parent , key ) +
' (have {} , expected {} ) ' . format ( type ( nd1 [ key ] ) , type ( nd2 [ key ] ) ) )
continue
# Check list size
if len ( nd2 [ key ] ) > len ( nd1 [ key ] ) :
result . add_error (
' {} [ " {} " ] too few items ' . format ( parent , key ) +
' (have ( {} ) " {} " , expected ( {} ) " {} " ) ' . format (
len ( nd1 [ key ] ) , str ( nd1 [ key ] ) , len ( nd2 [ key ] ) , str ( nd2 [ key ] ) ) )
continue
# List all unmatched items errors
unmatched = [ ]
for expected in nd2 [ key ] :
matched = False
for value in nd1 [ key ] :
if json_cmp ( { ' json ' : value } , { ' json ' : expected } ) is None :
matched = True
break
if matched :
break
if not matched :
unmatched . append ( expected )
# If there are unmatched items, error out.
if unmatched :
result . add_error (
' {} [ " {} " ] value is different (have " {} " , expected " {} " ) ' . format (
parent , key , str ( nd1 [ key ] ) , str ( nd2 [ key ] ) ) )
continue
2017-06-28 20:04:00 +02:00
# Compare JSON values
if nd1 [ key ] != nd2 [ key ] :
2017-06-29 17:18:46 +02:00
result . add_error (
' {} [ " {} " ] value is different (have " {} " , expected " {} " ) ' . format (
parent , key , str ( nd1 [ key ] ) , str ( nd2 [ key ] ) ) )
continue
if result . has_errors ( ) :
return result
2017-06-28 20:04:00 +02:00
return None
2017-06-15 05:25:54 +02:00
def run_and_expect ( func , what , count = 20 , wait = 3 ) :
"""
Run ` func ` and compare the result with ` what ` . Do it for ` count ` times
waiting ` wait ` seconds between tries . By default it tries 20 times with
3 seconds delay between tries .
Returns ( True , func - return ) on success or
( False , func - return ) on failure .
"""
while count > 0 :
result = func ( )
if result != what :
2017-07-24 16:53:19 +02:00
time . sleep ( wait )
2017-06-15 05:25:54 +02:00
count - = 1
continue
return ( True , result )
return ( False , result )
2017-01-30 22:50:48 +01:00
def int2dpid ( dpid ) :
" Converting Integer to DPID "
try :
dpid = hex ( dpid ) [ 2 : ]
dpid = ' 0 ' * ( 16 - len ( dpid ) ) + dpid
return dpid
except IndexError :
raise Exception ( ' Unable to derive default datapath ID - '
' please either specify a dpid or use a '
' canonical switch name such as s23. ' )
2017-04-27 04:54:25 +02:00
def pid_exists ( pid ) :
" Check whether pid exists in the current process table. "
if pid < = 0 :
return False
try :
os . kill ( pid , 0 )
except OSError as err :
if err . errno == errno . ESRCH :
# ESRCH == No such process
return False
elif err . errno == errno . EPERM :
# EPERM clearly means there's a process to deny access to
return True
else :
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else :
return True
2017-05-31 04:39:21 +02:00
def get_textdiff ( text1 , text2 , title1 = " " , title2 = " " ) :
" Returns empty string if same or formatted diff "
diff = ' \n ' . join ( difflib . context_diff ( text1 , text2 ,
fromfile = title1 , tofile = title2 ) )
# Clean up line endings
diff = os . linesep . join ( [ s for s in diff . splitlines ( ) if s ] )
return diff
2017-06-15 05:25:54 +02:00
def difflines ( text1 , text2 , title1 = ' ' , title2 = ' ' ) :
" Wrapper for get_textdiff to avoid string transformations. "
text1 = ( ' \n ' . join ( text1 . rstrip ( ) . splitlines ( ) ) + ' \n ' ) . splitlines ( 1 )
text2 = ( ' \n ' . join ( text2 . rstrip ( ) . splitlines ( ) ) + ' \n ' ) . splitlines ( 1 )
return get_textdiff ( text1 , text2 , title1 , title2 )
def get_file ( content ) :
"""
Generates a temporary file in ' /tmp ' with ` content ` and returns the file name .
"""
fde = tempfile . NamedTemporaryFile ( mode = ' w ' , delete = False )
fname = fde . name
fde . write ( content )
fde . close ( )
return fname
2017-06-30 21:09:07 +02:00
def normalize_text ( text ) :
"""
Strips formating spaces / tabs and carriage returns .
"""
text = re . sub ( r ' [ \ t]+ ' , ' ' , text )
text = re . sub ( r ' \ r ' , ' ' , text )
return text
2017-07-14 19:00:52 +02:00
def version_cmp ( v1 , v2 ) :
"""
Compare two version strings and returns :
* ` - 1 ` : if ` v1 ` is less than ` v2 `
* ` 0 ` : if ` v1 ` is equal to ` v2 `
* ` 1 ` : if ` v1 ` is greater than ` v2 `
Raises ` ValueError ` if versions are not well formated .
"""
vregex = r ' (?P<whole> \ d+( \ .( \ d+))*) '
v1m = re . match ( vregex , v1 )
v2m = re . match ( vregex , v2 )
if v1m is None or v2m is None :
raise ValueError ( " got a invalid version string " )
# Split values
v1g = v1m . group ( ' whole ' ) . split ( ' . ' )
v2g = v2m . group ( ' whole ' ) . split ( ' . ' )
# Get the longest version string
vnum = len ( v1g )
if len ( v2g ) > vnum :
vnum = len ( v2g )
# Reverse list because we are going to pop the tail
v1g . reverse ( )
v2g . reverse ( )
for _ in range ( vnum ) :
try :
v1n = int ( v1g . pop ( ) )
except IndexError :
while v2g :
v2n = int ( v2g . pop ( ) )
if v2n > 0 :
return - 1
break
try :
v2n = int ( v2g . pop ( ) )
except IndexError :
if v1n > 0 :
return 1
while v1g :
v1n = int ( v1g . pop ( ) )
if v1n > 0 :
return - 1
break
if v1n > v2n :
return 1
if v1n < v2n :
return - 1
return 0
2017-07-03 20:57:20 +02:00
def ip4_route ( node ) :
"""
Gets a structured return of the command ' ip route ' . It can be used in
conjuction with json_cmp ( ) to provide accurate assert explanations .
Return example :
{
' 10.0.1.0/24 ' : {
' dev ' : ' eth0 ' ,
' via ' : ' 172.16.0.1 ' ,
' proto ' : ' 188 ' ,
} ,
' 10.0.2.0/24 ' : {
' dev ' : ' eth1 ' ,
' proto ' : ' kernel ' ,
}
}
"""
output = normalize_text ( node . run ( ' ip route ' ) ) . splitlines ( )
result = { }
for line in output :
columns = line . split ( ' ' )
route = result [ columns [ 0 ] ] = { }
prev = None
for column in columns :
if prev == ' dev ' :
route [ ' dev ' ] = column
if prev == ' via ' :
route [ ' via ' ] = column
if prev == ' proto ' :
route [ ' proto ' ] = column
if prev == ' metric ' :
route [ ' metric ' ] = column
if prev == ' scope ' :
route [ ' scope ' ] = column
prev = column
return result
def ip6_route ( node ) :
"""
Gets a structured return of the command ' ip -6 route ' . It can be used in
conjuction with json_cmp ( ) to provide accurate assert explanations .
Return example :
{
' 2001:db8:1::/64 ' : {
' dev ' : ' eth0 ' ,
' proto ' : ' 188 ' ,
} ,
' 2001:db8:2::/64 ' : {
' dev ' : ' eth1 ' ,
' proto ' : ' kernel ' ,
}
}
"""
output = normalize_text ( node . run ( ' ip -6 route ' ) ) . splitlines ( )
result = { }
for line in output :
columns = line . split ( ' ' )
route = result [ columns [ 0 ] ] = { }
prev = None
for column in columns :
if prev == ' dev ' :
route [ ' dev ' ] = column
if prev == ' via ' :
route [ ' via ' ] = column
if prev == ' proto ' :
route [ ' proto ' ] = column
if prev == ' metric ' :
route [ ' metric ' ] = column
if prev == ' pref ' :
route [ ' pref ' ] = column
prev = column
return result
2017-07-24 16:53:19 +02:00
def sleep ( amount , reason = None ) :
"""
Sleep wrapper that registers in the log the amount of sleep
"""
if reason is None :
logger . info ( ' Sleeping for {} seconds ' . format ( amount ) )
else :
logger . info ( reason + ' ( {} seconds) ' . format ( amount ) )
time . sleep ( amount )
2017-05-20 11:24:11 +02:00
def checkAddressSanitizerError ( output , router , component ) :
" Checks for AddressSanitizer in output. If found, then logs it and returns true, false otherwise "
addressSantizerError = re . search ( ' (==[0-9]+==)ERROR: AddressSanitizer: ([^ \ s]*) ' , output )
if addressSantizerError :
sys . stderr . write ( " %s : %s triggered an exception by AddressSanitizer \n " % ( router , component ) )
# Sanitizer Error found in log
pidMark = addressSantizerError . group ( 1 )
addressSantizerLog = re . search ( ' %s (.*) %s ' % ( pidMark , pidMark ) , output , re . DOTALL )
if addressSantizerLog :
callingTest = os . path . basename ( sys . _current_frames ( ) . values ( ) [ 0 ] . f_back . f_back . f_globals [ ' __file__ ' ] )
callingProc = sys . _getframe ( 2 ) . f_code . co_name
with open ( " /tmp/AddressSanitzer.txt " , " a " ) as addrSanFile :
sys . stderr . write ( ' \n ' . join ( addressSantizerLog . group ( 1 ) . splitlines ( ) ) + ' \n ' )
addrSanFile . write ( " ## Error: %s \n \n " % addressSantizerError . group ( 2 ) )
addrSanFile . write ( " ### AddressSanitizer error in topotest ` %s `, test ` %s `, router ` %s ` \n \n " % ( callingTest , callingProc , router ) )
addrSanFile . write ( ' ' + ' \n ' . join ( addressSantizerLog . group ( 1 ) . splitlines ( ) ) + ' \n ' )
addrSanFile . write ( " \n --------------- \n " )
return True
2017-07-07 14:57:07 +02:00
return False
2017-05-20 11:24:11 +02:00
2017-01-30 22:50:48 +01:00
def addRouter ( topo , name ) :
2017-04-08 12:40:51 +02:00
" Adding a FRRouter (or Quagga) to Topology "
2017-01-30 22:50:48 +01:00
MyPrivateDirs = [ ' /etc/frr ' ,
' /etc/quagga ' ,
' /var/run/frr ' ,
' /var/run/quagga ' ,
' /var/log ' ]
return topo . addNode ( name , cls = Router , privateDirs = MyPrivateDirs )
2017-06-14 15:30:10 +02:00
def set_sysctl ( node , sysctl , value ) :
" Set a sysctl value and return None on success or an error string "
valuestr = ' {} ' . format ( value )
command = " sysctl {0} = {1} " . format ( sysctl , valuestr )
cmdret = node . cmd ( command )
matches = re . search ( r ' ([^ ]+) = ([^ \ s]+) ' , cmdret )
if matches is None :
return cmdret
if matches . group ( 1 ) != sysctl :
return cmdret
if matches . group ( 2 ) != valuestr :
return cmdret
return None
def assert_sysctl ( node , sysctl , value ) :
" Set and assert that the sysctl is set with the specified value. "
assert set_sysctl ( node , sysctl , value ) is None
2017-01-30 22:50:48 +01:00
class LinuxRouter ( Node ) :
" A Node with IPv4/IPv6 forwarding enabled. "
def config ( self , * * params ) :
super ( LinuxRouter , self ) . config ( * * params )
# Enable forwarding on the router
2017-06-14 15:30:10 +02:00
assert_sysctl ( self , ' net.ipv4.ip_forward ' , 1 )
assert_sysctl ( self , ' net.ipv6.conf.all.forwarding ' , 1 )
2017-01-30 22:50:48 +01:00
def terminate ( self ) :
"""
Terminate generic LinuxRouter Mininet instance
"""
2017-06-14 15:30:10 +02:00
set_sysctl ( self , ' net.ipv4.ip_forward ' , 0 )
set_sysctl ( self , ' net.ipv6.conf.all.forwarding ' , 0 )
2017-01-30 22:50:48 +01:00
super ( LinuxRouter , self ) . terminate ( )
class Router ( Node ) :
" A Node with IPv4/IPv6 forwarding enabled and Quagga as Routing Engine "
2017-06-21 17:54:40 +02:00
def __init__ ( self , name , * * params ) :
super ( Router , self ) . __init__ ( name , * * params )
2017-07-07 15:18:25 +02:00
self . logdir = params . get ( ' logdir ' , ' /tmp ' )
2017-06-21 17:54:40 +02:00
self . daemondir = None
self . routertype = ' frr '
self . daemons = { ' zebra ' : 0 , ' ripd ' : 0 , ' ripngd ' : 0 , ' ospfd ' : 0 ,
' ospf6d ' : 0 , ' isisd ' : 0 , ' bgpd ' : 0 , ' pimd ' : 0 ,
' ldpd ' : 0 }
2017-06-27 23:11:02 +02:00
def _config_frr ( self , * * params ) :
" Configure FRR binaries "
self . daemondir = params . get ( ' frrdir ' )
if self . daemondir is None :
self . daemondir = ' /usr/lib/frr '
zebra_path = os . path . join ( self . daemondir , ' zebra ' )
if not os . path . isfile ( zebra_path ) :
raise Exception ( " FRR zebra binary doesn ' t exist at {} " . format ( zebra_path ) )
def _config_quagga ( self , * * params ) :
" Configure Quagga binaries "
self . daemondir = params . get ( ' quaggadir ' )
if self . daemondir is None :
self . daemondir = ' /usr/lib/quagga '
zebra_path = os . path . join ( self . daemondir , ' zebra ' )
if not os . path . isfile ( zebra_path ) :
raise Exception ( " Quagga zebra binary doesn ' t exist at {} " . format ( zebra_path ) )
2017-06-21 17:54:40 +02:00
# pylint: disable=W0221
# Some params are only meaningful for the parent class.
2017-01-30 22:50:48 +01:00
def config ( self , * * params ) :
super ( Router , self ) . config ( * * params )
2017-06-21 17:54:40 +02:00
# User did not specify the daemons directory, try to autodetect it.
self . daemondir = params . get ( ' daemondir ' )
if self . daemondir is None :
2017-06-27 23:11:02 +02:00
self . routertype = params . get ( ' routertype ' , ' frr ' )
if self . routertype == ' quagga ' :
self . _config_quagga ( * * params )
else :
self . _config_frr ( * * params )
2017-01-30 22:50:48 +01:00
else :
2017-06-21 17:54:40 +02:00
# Test the provided path
zpath = os . path . join ( self . daemondir , ' zebra ' )
if not os . path . isfile ( zpath ) :
raise Exception ( ' No zebra binary found in {} ' . format ( zpath ) )
# Allow user to specify routertype when the path was specified.
if params . get ( ' routertype ' ) is not None :
self . routertype = self . params . get ( ' routertype ' )
2017-01-30 22:50:48 +01:00
# Enable forwarding on the router
2017-06-14 15:30:10 +02:00
assert_sysctl ( self , ' net.ipv4.ip_forward ' , 1 )
assert_sysctl ( self , ' net.ipv6.conf.all.forwarding ' , 1 )
2017-01-30 22:50:48 +01:00
# Enable coredumps
2017-06-14 15:30:10 +02:00
assert_sysctl ( self , ' kernel.core_uses_pid ' , 1 )
assert_sysctl ( self , ' fs.suid_dumpable ' , 2 )
2017-07-07 15:18:25 +02:00
corefile = ' {} / {} _ %e _core-sig_ %s -pid_ % p.dmp ' . format ( self . logdir , self . name )
2017-06-14 15:30:10 +02:00
assert_sysctl ( self , ' kernel.core_pattern ' , corefile )
2017-01-30 22:50:48 +01:00
self . cmd ( ' ulimit -c unlimited ' )
# Set ownership of config files
2017-06-21 17:54:40 +02:00
self . cmd ( ' chown {0} : {0} vty /etc/ {0} ' . format ( self . routertype ) )
2017-01-30 22:50:48 +01:00
def terminate ( self ) :
# Delete Running Quagga or FRR Daemons
2017-02-01 16:50:13 +01:00
self . stopRouter ( )
# rundaemons = self.cmd('ls -1 /var/run/%s/*.pid' % self.routertype)
# for d in StringIO.StringIO(rundaemons):
# self.cmd('kill -7 `cat %s`' % d.rstrip())
# self.waitOutput()
2017-01-30 22:50:48 +01:00
# Disable forwarding
2017-06-14 15:30:10 +02:00
set_sysctl ( self , ' net.ipv4.ip_forward ' , 0 )
set_sysctl ( self , ' net.ipv6.conf.all.forwarding ' , 0 )
2017-01-30 22:50:48 +01:00
super ( Router , self ) . terminate ( )
2017-02-01 16:50:13 +01:00
def stopRouter ( self ) :
# Stop Running Quagga or FRR Daemons
rundaemons = self . cmd ( ' ls -1 /var/run/ %s /*.pid ' % self . routertype )
if rundaemons is not None :
for d in StringIO . StringIO ( rundaemons ) :
2017-04-27 04:54:25 +02:00
daemonpid = self . cmd ( ' cat %s ' % d . rstrip ( ) ) . rstrip ( )
2017-05-31 05:24:20 +02:00
if ( daemonpid . isdigit ( ) and pid_exists ( int ( daemonpid ) ) ) :
2017-04-27 04:54:25 +02:00
self . cmd ( ' kill -7 %s ' % daemonpid )
self . waitOutput ( )
2017-01-30 22:50:48 +01:00
def removeIPs ( self ) :
for interface in self . intfNames ( ) :
self . cmd ( ' ip address flush ' , interface )
def loadConf ( self , daemon , source = None ) :
# print "Daemons before:", self.daemons
if daemon in self . daemons . keys ( ) :
self . daemons [ daemon ] = 1
if source is None :
self . cmd ( ' touch /etc/ %s / %s .conf ' % ( self . routertype , daemon ) )
self . waitOutput ( )
else :
self . cmd ( ' cp %s /etc/ %s / %s .conf ' % ( source , self . routertype , daemon ) )
self . waitOutput ( )
self . cmd ( ' chmod 640 /etc/ %s / %s .conf ' % ( self . routertype , daemon ) )
self . waitOutput ( )
self . cmd ( ' chown %s : %s /etc/ %s / %s .conf ' % ( self . routertype , self . routertype , self . routertype , daemon ) )
self . waitOutput ( )
else :
2017-07-07 14:57:07 +02:00
logger . warning ( ' No daemon {} known ' . format ( daemon ) )
2017-01-30 22:50:48 +01:00
# print "Daemons after:", self.daemons
def startRouter ( self ) :
# Disable integrated-vtysh-config
2017-04-07 02:38:48 +02:00
self . cmd ( ' echo " no service integrated-vtysh-config " >> /etc/ %s /vtysh.conf ' % self . routertype )
2017-01-30 22:50:48 +01:00
self . cmd ( ' chown %s : %s vty /etc/ %s /vtysh.conf ' % ( self . routertype , self . routertype , self . routertype ) )
2017-07-07 15:18:25 +02:00
# TODO remove the following lines after all tests are migrated to Topogen.
2017-01-30 22:50:48 +01:00
# Try to find relevant old logfiles in /tmp and delete them
map ( os . remove , glob . glob ( " /tmp/* %s *.log " % self . name ) )
# Remove old core files
map ( os . remove , glob . glob ( " /tmp/ %s *.dmp " % self . name ) )
# Remove IP addresses from OS first - we have them in zebra.conf
self . removeIPs ( )
# If ldp is used, check for LDP to be compiled and Linux Kernel to be 4.5 or higher
# No error - but return message and skip all the tests
if self . daemons [ ' ldpd ' ] == 1 :
2017-06-21 17:54:40 +02:00
ldpd_path = os . path . join ( self . daemondir , ' ldpd ' )
if not os . path . isfile ( ldpd_path ) :
2017-07-07 14:57:07 +02:00
logger . warning ( " LDP Test, but no ldpd compiled or installed " )
2017-01-30 22:50:48 +01:00
return " LDP Test, but no ldpd compiled or installed "
2017-04-26 14:51:50 +02:00
2017-07-18 21:44:58 +02:00
if version_cmp ( platform . release ( ) , ' 4.5 ' ) < 0 :
logger . warning ( " LDP Test need Linux Kernel 4.5 minimum " )
return " LDP Test need Linux Kernel 4.5 minimum "
2017-06-21 17:54:40 +02:00
2017-07-26 22:37:39 +02:00
# Check if required kernel modules are available with a dryrun modprobe
# Silent accept of modprobe command assumes ok status
if self . cmd ( ' /sbin/modprobe -n mpls-router ' ) != " " :
logger . warning ( " LDP Test needs mpls-router kernel module " )
return " LDP Test needs mpls-router kernel module "
if self . cmd ( ' /sbin/modprobe -n mpls-iptunnel ' ) != " " :
logger . warning ( " LDP Test needs mpls-iptunnel kernel module " )
return " LDP Test needs mpls-router kernel module "
2017-01-30 22:50:48 +01:00
self . cmd ( ' /sbin/modprobe mpls-router ' )
self . cmd ( ' /sbin/modprobe mpls-iptunnel ' )
self . cmd ( ' echo 100000 > /proc/sys/net/mpls/platform_labels ' )
2017-02-01 16:50:13 +01:00
# Init done - now restarting daemons
self . restartRouter ( )
return " "
def restartRouter ( self ) :
# Starts actuall daemons without init (ie restart)
2017-01-30 22:50:48 +01:00
# Start Zebra first
if self . daemons [ ' zebra ' ] == 1 :
2017-06-21 17:54:40 +02:00
zebra_path = os . path . join ( self . daemondir , ' zebra ' )
2017-07-07 15:18:25 +02:00
self . cmd ( ' {0} > {1} / {2} -zebra.out 2> {1} / {2} -zebra.err & ' . format (
zebra_path , self . logdir , self . name
2017-06-21 17:54:40 +02:00
) )
2017-01-30 22:50:48 +01:00
self . waitOutput ( )
2017-07-07 14:57:07 +02:00
logger . debug ( ' {} : {} zebra started ' . format ( self , self . routertype ) )
2017-07-24 16:53:19 +02:00
time . sleep ( 1 )
2017-01-30 22:50:48 +01:00
# Fix Link-Local Addresses
# Somehow (on Mininet only), Zebra removes the IPv6 Link-Local addresses on start. Fix this
self . cmd ( ' for i in `ls /sys/class/net/` ; do mac=`cat /sys/class/net/$i/address`; IFS= \' : \' ; set $mac; unset IFS; ip address add dev $i scope link fe80::$(printf %02x $((0x$1 ^ 2)))$2:$ {3} ff:fe$4:$5$6/64; done ' )
# Now start all the other daemons
for daemon in self . daemons :
2017-06-21 17:54:40 +02:00
# Skip disabled daemons and zebra
if self . daemons [ daemon ] == 0 or daemon == ' zebra ' :
continue
daemon_path = os . path . join ( self . daemondir , daemon )
2017-07-07 15:18:25 +02:00
self . cmd ( ' {0} > {1} / {2} - {3} .out 2> {1} / {2} - {3} .err & ' . format (
daemon_path , self . logdir , self . name , daemon
2017-06-21 17:54:40 +02:00
) )
self . waitOutput ( )
2017-07-07 14:57:07 +02:00
logger . debug ( ' {} : {} {} started ' . format ( self , self . routertype , daemon ) )
2017-02-01 16:50:13 +01:00
def getStdErr ( self , daemon ) :
return self . getLog ( ' err ' , daemon )
def getStdOut ( self , daemon ) :
return self . getLog ( ' out ' , daemon )
def getLog ( self , log , daemon ) :
2017-07-07 15:18:25 +02:00
return self . cmd ( ' cat {} / {} - {} . {} ' . format ( self . logdir , self . name , daemon , log ) )
2017-01-30 22:50:48 +01:00
def checkRouterRunning ( self ) :
2017-05-19 11:16:42 +02:00
" Check if router daemons are running and collect crashinfo they don ' t run "
2017-01-30 22:50:48 +01:00
global fatal_error
daemonsRunning = self . cmd ( ' vtysh -c " show log " | grep " Logging configuration for " ' )
2017-05-20 11:24:11 +02:00
# Look for AddressSanitizer Errors in vtysh output and append to /tmp/AddressSanitzer.txt if found
if checkAddressSanitizerError ( daemonsRunning , self . name , " vtysh " ) :
return " %s : vtysh killed by AddressSanitizer " % ( self . name )
2017-01-30 22:50:48 +01:00
for daemon in self . daemons :
if ( self . daemons [ daemon ] == 1 ) and not ( daemon in daemonsRunning ) :
sys . stderr . write ( " %s : Daemon %s not running \n " % ( self . name , daemon ) )
# Look for core file
2017-07-07 15:18:25 +02:00
corefiles = glob . glob ( ' {} / {} _ {} _core*.dmp ' . format (
self . logdir , self . name , daemon ) )
2017-01-30 22:50:48 +01:00
if ( len ( corefiles ) > 0 ) :
2017-06-21 17:54:40 +02:00
daemon_path = os . path . join ( self . daemondir , daemon )
backtrace = subprocess . check_output ( [
" gdb {} {} --batch -ex bt 2> /dev/null " . format ( daemon_path , corefiles [ 0 ] )
] , shell = True )
2017-01-30 22:50:48 +01:00
sys . stderr . write ( " \n %s : %s crashed. Core file found - Backtrace follows: \n " % ( self . name , daemon ) )
sys . stderr . write ( " %s \n " % backtrace )
else :
# No core found - If we find matching logfile in /tmp, then print last 20 lines from it.
2017-07-07 15:18:25 +02:00
if os . path . isfile ( ' {} / {} - {} .log ' . format ( self . logdir , self . name , daemon ) ) :
log_tail = subprocess . check_output ( [
" tail -n20 {} / {} - {} .log 2> /dev/null " . format (
self . logdir , self . name , daemon )
] , shell = True )
2017-01-30 22:50:48 +01:00
sys . stderr . write ( " \n From %s %s %s log file: \n " % ( self . routertype , self . name , daemon ) )
sys . stderr . write ( " %s \n " % log_tail )
2017-05-20 11:24:11 +02:00
2017-05-19 11:16:42 +02:00
# Look for AddressSanitizer Errors and append to /tmp/AddressSanitzer.txt if found
2017-05-20 11:24:11 +02:00
if checkAddressSanitizerError ( self . getStdErr ( daemon ) , self . name , daemon ) :
2017-05-20 05:30:40 +02:00
return " %s : Daemon %s not running - killed by AddressSanitizer " % ( self . name , daemon )
2017-01-30 22:50:48 +01:00
return " %s : Daemon %s not running " % ( self . name , daemon )
return " "
def get_ipv6_linklocal ( self ) :
" Get LinkLocal Addresses from interfaces "
linklocal = [ ]
ifaces = self . cmd ( ' ip -6 address ' )
# Fix newlines (make them all the same)
ifaces = ( ' \n ' . join ( ifaces . splitlines ( ) ) + ' \n ' ) . splitlines ( )
interface = " "
ll_per_if_count = 0
for line in ifaces :
m = re . search ( ' [0-9]+: ([^:@]+)[@if0-9:]+ < ' , line )
if m :
interface = m . group ( 1 )
ll_per_if_count = 0
m = re . search ( ' inet6 (fe80::[0-9a-f]+:[0-9a-f]+:[0-9a-f]+:[0-9a-f]+)[/0-9]* scope link ' , line )
if m :
local = m . group ( 1 )
ll_per_if_count + = 1
if ( ll_per_if_count > 1 ) :
linklocal + = [ [ " %s - %s " % ( interface , ll_per_if_count ) , local ] ]
else :
linklocal + = [ [ interface , local ] ]
return linklocal
2017-04-08 12:40:51 +02:00
def daemon_available ( self , daemon ) :
" Check if specified daemon is installed (and for ldp if kernel supports MPLS) "
2017-06-21 17:54:40 +02:00
daemon_path = os . path . join ( self . daemondir , daemon )
if not os . path . isfile ( daemon_path ) :
2017-04-08 12:40:51 +02:00
return False
if ( daemon == ' ldpd ' ) :
2017-04-26 22:22:34 +02:00
kernel_version = re . search ( r ' ([0-9]+) \ .([0-9]+).* ' , platform . release ( ) )
2017-04-08 12:40:51 +02:00
if kernel_version :
2017-04-26 22:22:34 +02:00
if ( float ( kernel_version . group ( 1 ) ) < 4 or
2017-04-26 22:35:48 +02:00
( float ( kernel_version . group ( 1 ) ) == 4 and float ( kernel_version . group ( 2 ) ) < 5 ) ) :
2017-04-08 12:40:51 +02:00
return False
else :
return False
return True
def get_routertype ( self ) :
" Return the type of Router (frr or quagga) "
return self . routertype
2017-04-27 04:54:25 +02:00
def report_memory_leaks ( self , filename_prefix , testscript ) :
" Report Memory Leaks to file prefixed with given string "
leakfound = False
filename = filename_prefix + re . sub ( r " \ .py " , " " , testscript ) + " .txt "
for daemon in self . daemons :
if ( self . daemons [ daemon ] == 1 ) :
log = self . getStdErr ( daemon )
if " memstats " in log :
# Found memory leak
2017-07-07 14:57:07 +02:00
logger . info ( ' \n Router {} {} StdErr Log: \n {} ' . format (
self . name , daemon , log ) )
2017-04-27 04:54:25 +02:00
if not leakfound :
leakfound = True
# Check if file already exists
fileexists = os . path . isfile ( filename )
leakfile = open ( filename , " a " )
if not fileexists :
# New file - add header
leakfile . write ( " # Memory Leak Detection for topotest %s \n \n " % testscript )
leakfile . write ( " ## Router %s \n " % self . name )
leakfile . write ( " ### Process %s \n " % daemon )
log = re . sub ( " core_handler: " , " " , log )
log = re . sub ( r " (showing active allocations in memory group [a-zA-Z0-9]+) " , r " \ n#### \ 1 \ n " , log )
log = re . sub ( " memstats: " , " " , log )
leakfile . write ( log )
leakfile . write ( " \n " )
if leakfound :
leakfile . close ( )
2017-04-08 12:40:51 +02:00
2017-01-30 22:50:48 +01:00
class LegacySwitch ( OVSSwitch ) :
" A Legacy Switch without OpenFlow "
def __init__ ( self , name , * * params ) :
OVSSwitch . __init__ ( self , name , failMode = ' standalone ' , * * params )
self . switchIP = None