mirror of
https://github.com/FRRouting/frr.git
synced 2025-04-30 13:37:17 +02:00
python: apply black formatting
The python/ directory hasn't been shoved into black yet (unlike topotests, where most FRR python code is.) Run black over it. Signed-off-by: David Lamparter <equinox@opensourcerouting.org>
This commit is contained in:
parent
3727be24a0
commit
00f0c39903
|
@ -321,15 +321,31 @@ extra_info = {
|
||||||
"lsp_processq_complete",
|
"lsp_processq_complete",
|
||||||
],
|
],
|
||||||
# zebra - main WQ
|
# zebra - main WQ
|
||||||
("mq_add_handler", "work_queue_add"): ["meta_queue_process",],
|
("mq_add_handler", "work_queue_add"): [
|
||||||
("meta_queue_process", "work_queue_add"): ["meta_queue_process",],
|
"meta_queue_process",
|
||||||
|
],
|
||||||
|
("meta_queue_process", "work_queue_add"): [
|
||||||
|
"meta_queue_process",
|
||||||
|
],
|
||||||
# bgpd - label pool WQ
|
# bgpd - label pool WQ
|
||||||
("bgp_lp_get", "work_queue_add"): ["lp_cbq_docallback",],
|
("bgp_lp_get", "work_queue_add"): [
|
||||||
("bgp_lp_event_chunk", "work_queue_add"): ["lp_cbq_docallback",],
|
"lp_cbq_docallback",
|
||||||
("bgp_lp_event_zebra_up", "work_queue_add"): ["lp_cbq_docallback",],
|
],
|
||||||
|
("bgp_lp_event_chunk", "work_queue_add"): [
|
||||||
|
"lp_cbq_docallback",
|
||||||
|
],
|
||||||
|
("bgp_lp_event_zebra_up", "work_queue_add"): [
|
||||||
|
"lp_cbq_docallback",
|
||||||
|
],
|
||||||
# bgpd - main WQ
|
# bgpd - main WQ
|
||||||
("bgp_process", "work_queue_add"): ["bgp_process_wq", "bgp_processq_del",],
|
("bgp_process", "work_queue_add"): [
|
||||||
("bgp_add_eoiu_mark", "work_queue_add"): ["bgp_process_wq", "bgp_processq_del",],
|
"bgp_process_wq",
|
||||||
|
"bgp_processq_del",
|
||||||
|
],
|
||||||
|
("bgp_add_eoiu_mark", "work_queue_add"): [
|
||||||
|
"bgp_process_wq",
|
||||||
|
"bgp_processq_del",
|
||||||
|
],
|
||||||
# clear node WQ
|
# clear node WQ
|
||||||
("bgp_clear_route_table", "work_queue_add"): [
|
("bgp_clear_route_table", "work_queue_add"): [
|
||||||
"bgp_clear_route_node",
|
"bgp_clear_route_node",
|
||||||
|
@ -337,7 +353,9 @@ extra_info = {
|
||||||
"bgp_clear_node_complete",
|
"bgp_clear_node_complete",
|
||||||
],
|
],
|
||||||
# rfapi WQs
|
# rfapi WQs
|
||||||
("rfapi_close", "work_queue_add"): ["rfapi_deferred_close_workfunc",],
|
("rfapi_close", "work_queue_add"): [
|
||||||
|
"rfapi_deferred_close_workfunc",
|
||||||
|
],
|
||||||
("rfapiRibUpdatePendingNode", "work_queue_add"): [
|
("rfapiRibUpdatePendingNode", "work_queue_add"): [
|
||||||
"rfapiRibDoQueuedCallback",
|
"rfapiRibDoQueuedCallback",
|
||||||
"rfapiRibQueueItemDelete",
|
"rfapiRibQueueItemDelete",
|
||||||
|
|
|
@ -36,7 +36,10 @@ from _clippy import (
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
frr_top_src = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
frr_top_src = os.path.dirname(
|
||||||
|
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def graph_iterate(graph):
|
def graph_iterate(graph):
|
||||||
"""iterator yielding all nodes of a graph
|
"""iterator yielding all nodes of a graph
|
||||||
|
|
|
@ -16,7 +16,7 @@
|
||||||
# with this program; see the file COPYING; if not, write to the Free Software
|
# with this program; see the file COPYING; if not, write to the Free Software
|
||||||
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||||
|
|
||||||
'''
|
"""
|
||||||
Wrapping layer and additional utility around _clippy.ELFFile.
|
Wrapping layer and additional utility around _clippy.ELFFile.
|
||||||
|
|
||||||
Essentially, the C bits have the low-level ELF access bits that should be
|
Essentially, the C bits have the low-level ELF access bits that should be
|
||||||
|
@ -28,7 +28,7 @@ across architecture, word size and even endianness boundaries. Both the C
|
||||||
module (through GElf_*) and this code (cf. struct.unpack format mangling
|
module (through GElf_*) and this code (cf. struct.unpack format mangling
|
||||||
in ELFDissectStruct) will take appropriate measures to flip and resize
|
in ELFDissectStruct) will take appropriate measures to flip and resize
|
||||||
fields as needed.
|
fields as needed.
|
||||||
'''
|
"""
|
||||||
|
|
||||||
import struct
|
import struct
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
|
@ -40,16 +40,18 @@ from _clippy import ELFFile, ELFAccessError
|
||||||
# data access
|
# data access
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
class ELFNull(object):
|
class ELFNull(object):
|
||||||
'''
|
"""
|
||||||
NULL pointer, returned instead of ELFData
|
NULL pointer, returned instead of ELFData
|
||||||
'''
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
self.symname = None
|
self.symname = None
|
||||||
self._dstsect = None
|
self._dstsect = None
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<ptr: NULL>'
|
return "<ptr: NULL>"
|
||||||
|
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash(None)
|
return hash(None)
|
||||||
|
@ -57,33 +59,37 @@ class ELFNull(object):
|
||||||
def get_string(self):
|
def get_string(self):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
class ELFUnresolved(object):
|
class ELFUnresolved(object):
|
||||||
'''
|
"""
|
||||||
Reference to an unresolved external symbol, returned instead of ELFData
|
Reference to an unresolved external symbol, returned instead of ELFData
|
||||||
|
|
||||||
:param symname: name of the referenced symbol
|
:param symname: name of the referenced symbol
|
||||||
:param addend: offset added to the symbol, normally zero
|
:param addend: offset added to the symbol, normally zero
|
||||||
'''
|
"""
|
||||||
|
|
||||||
def __init__(self, symname, addend):
|
def __init__(self, symname, addend):
|
||||||
self.addend = addend
|
self.addend = addend
|
||||||
self.symname = symname
|
self.symname = symname
|
||||||
self._dstsect = None
|
self._dstsect = None
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<unresolved: %s+%d>' % (self.symname, self.addend)
|
return "<unresolved: %s+%d>" % (self.symname, self.addend)
|
||||||
|
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash((self.symname, self.addend))
|
return hash((self.symname, self.addend))
|
||||||
|
|
||||||
|
|
||||||
class ELFData(object):
|
class ELFData(object):
|
||||||
'''
|
"""
|
||||||
Actual data somewhere in the ELF file.
|
Actual data somewhere in the ELF file.
|
||||||
|
|
||||||
:type dstsect: ELFSubset
|
:type dstsect: ELFSubset
|
||||||
:param dstsect: container data area (section or entire file)
|
:param dstsect: container data area (section or entire file)
|
||||||
:param dstoffs: byte offset into dstsect
|
:param dstoffs: byte offset into dstsect
|
||||||
:param dstlen: byte size of object, or None if unknown, open-ended or string
|
:param dstlen: byte size of object, or None if unknown, open-ended or string
|
||||||
'''
|
"""
|
||||||
|
|
||||||
def __init__(self, dstsect, dstoffs, dstlen):
|
def __init__(self, dstsect, dstoffs, dstlen):
|
||||||
self._dstsect = dstsect
|
self._dstsect = dstsect
|
||||||
self._dstoffs = dstoffs
|
self._dstoffs = dstoffs
|
||||||
|
@ -91,62 +97,74 @@ class ELFData(object):
|
||||||
self.symname = None
|
self.symname = None
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<ptr: %s+0x%05x/%d>' % (self._dstsect.name, self._dstoffs, self._dstlen or -1)
|
return "<ptr: %s+0x%05x/%d>" % (
|
||||||
|
self._dstsect.name,
|
||||||
|
self._dstoffs,
|
||||||
|
self._dstlen or -1,
|
||||||
|
)
|
||||||
|
|
||||||
def __hash__(self):
|
def __hash__(self):
|
||||||
return hash((self._dstsect, self._dstoffs))
|
return hash((self._dstsect, self._dstoffs))
|
||||||
|
|
||||||
def get_string(self):
|
def get_string(self):
|
||||||
'''
|
"""
|
||||||
Interpret as C string / null terminated UTF-8 and get the actual text.
|
Interpret as C string / null terminated UTF-8 and get the actual text.
|
||||||
'''
|
"""
|
||||||
try:
|
try:
|
||||||
return self._dstsect[self._dstoffs:str].decode('UTF-8')
|
return self._dstsect[self._dstoffs : str].decode("UTF-8")
|
||||||
except:
|
except:
|
||||||
import pdb; pdb.set_trace()
|
import pdb
|
||||||
|
|
||||||
|
pdb.set_trace()
|
||||||
|
|
||||||
def get_data(self, reflen):
|
def get_data(self, reflen):
|
||||||
'''
|
"""
|
||||||
Interpret as some structure (and check vs. expected length)
|
Interpret as some structure (and check vs. expected length)
|
||||||
|
|
||||||
:param reflen: expected size of the object, compared against actual
|
:param reflen: expected size of the object, compared against actual
|
||||||
size (which is only known in rare cases, mostly when directly
|
size (which is only known in rare cases, mostly when directly
|
||||||
accessing a symbol since symbols have their destination object
|
accessing a symbol since symbols have their destination object
|
||||||
size recorded)
|
size recorded)
|
||||||
'''
|
"""
|
||||||
if self._dstlen is not None and self._dstlen != reflen:
|
if self._dstlen is not None and self._dstlen != reflen:
|
||||||
raise ValueError('symbol size mismatch (got %d, expected %d)' % (self._dstlen, reflen))
|
raise ValueError(
|
||||||
return self._dstsect[self._dstoffs:self._dstoffs+reflen]
|
"symbol size mismatch (got %d, expected %d)" % (self._dstlen, reflen)
|
||||||
|
)
|
||||||
|
return self._dstsect[self._dstoffs : self._dstoffs + reflen]
|
||||||
|
|
||||||
def offset(self, offs, within_symbol=False):
|
def offset(self, offs, within_symbol=False):
|
||||||
'''
|
"""
|
||||||
Get another ELFData at an offset
|
Get another ELFData at an offset
|
||||||
|
|
||||||
:param offs: byte offset, can be negative (e.g. in container_of)
|
:param offs: byte offset, can be negative (e.g. in container_of)
|
||||||
:param within_symbol: retain length information
|
:param within_symbol: retain length information
|
||||||
'''
|
"""
|
||||||
if self._dstlen is None or not within_symbol:
|
if self._dstlen is None or not within_symbol:
|
||||||
return ELFData(self._dstsect, self._dstoffs + offs, None)
|
return ELFData(self._dstsect, self._dstoffs + offs, None)
|
||||||
else:
|
else:
|
||||||
return ELFData(self._dstsect, self._dstoffs + offs, self._dstlen - offs)
|
return ELFData(self._dstsect, self._dstoffs + offs, self._dstlen - offs)
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# dissection data items
|
# dissection data items
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
class ELFDissectData(object):
|
class ELFDissectData(object):
|
||||||
'''
|
"""
|
||||||
Common bits for ELFDissectStruct and ELFDissectUnion
|
Common bits for ELFDissectStruct and ELFDissectUnion
|
||||||
'''
|
"""
|
||||||
|
|
||||||
def __len__(self):
|
def __len__(self):
|
||||||
'''
|
"""
|
||||||
Used for boolean evaluation, e.g. "if struct: ..."
|
Used for boolean evaluation, e.g. "if struct: ..."
|
||||||
'''
|
"""
|
||||||
return not (isinstance(self._data, ELFNull) or isinstance(self._data, ELFUnresolved))
|
return not (
|
||||||
|
isinstance(self._data, ELFNull) or isinstance(self._data, ELFUnresolved)
|
||||||
|
)
|
||||||
|
|
||||||
def container_of(self, parent, fieldname):
|
def container_of(self, parent, fieldname):
|
||||||
'''
|
"""
|
||||||
Assume this struct is embedded in a larger struct and get at the larger
|
Assume this struct is embedded in a larger struct and get at the larger
|
||||||
|
|
||||||
Python ``self.container_of(a, b)`` = C ``container_of(self, a, b)``
|
Python ``self.container_of(a, b)`` = C ``container_of(self, a, b)``
|
||||||
|
@ -154,25 +172,26 @@ class ELFDissectData(object):
|
||||||
:param parent: class (not instance) of the larger struct
|
:param parent: class (not instance) of the larger struct
|
||||||
:param fieldname: fieldname that refers back to this
|
:param fieldname: fieldname that refers back to this
|
||||||
:returns: instance of parent, with fieldname set to this object
|
:returns: instance of parent, with fieldname set to this object
|
||||||
'''
|
"""
|
||||||
offset = 0
|
offset = 0
|
||||||
if not hasattr(parent, '_efields'):
|
if not hasattr(parent, "_efields"):
|
||||||
parent._setup_efields()
|
parent._setup_efields()
|
||||||
|
|
||||||
for field in parent._efields[self.elfclass]:
|
for field in parent._efields[self.elfclass]:
|
||||||
if field[0] == fieldname:
|
if field[0] == fieldname:
|
||||||
break
|
break
|
||||||
spec = field[1]
|
spec = field[1]
|
||||||
if spec == 'P':
|
if spec == "P":
|
||||||
spec = 'I' if self.elfclass == 32 else 'Q'
|
spec = "I" if self.elfclass == 32 else "Q"
|
||||||
offset += struct.calcsize(spec)
|
offset += struct.calcsize(spec)
|
||||||
else:
|
else:
|
||||||
raise AttributeError('%r not found in %r.fields' % (fieldname, parent))
|
raise AttributeError("%r not found in %r.fields" % (fieldname, parent))
|
||||||
|
|
||||||
|
return parent(self._data.offset(-offset), replace={fieldname: self})
|
||||||
|
|
||||||
return parent(self._data.offset(-offset), replace = {fieldname: self})
|
|
||||||
|
|
||||||
class ELFDissectStruct(ELFDissectData):
|
class ELFDissectStruct(ELFDissectData):
|
||||||
'''
|
"""
|
||||||
Decode and provide access to a struct somewhere in the ELF file
|
Decode and provide access to a struct somewhere in the ELF file
|
||||||
|
|
||||||
Handles pointers and strings somewhat nicely. Create a subclass for each
|
Handles pointers and strings somewhat nicely. Create a subclass for each
|
||||||
|
@ -205,30 +224,31 @@ class ELFDissectStruct(ELFDissectData):
|
||||||
.. attribute:: fieldrename
|
.. attribute:: fieldrename
|
||||||
|
|
||||||
Dictionary to rename fields, useful if fields comes from tiabwarfo.py.
|
Dictionary to rename fields, useful if fields comes from tiabwarfo.py.
|
||||||
'''
|
"""
|
||||||
|
|
||||||
class Pointer(object):
|
class Pointer(object):
|
||||||
'''
|
"""
|
||||||
Quick wrapper for pointers to further structs
|
Quick wrapper for pointers to further structs
|
||||||
|
|
||||||
This is just here to avoid going into infinite loops when loading
|
This is just here to avoid going into infinite loops when loading
|
||||||
structs that have pointers to each other (e.g. struct xref <-->
|
structs that have pointers to each other (e.g. struct xref <-->
|
||||||
struct xrefdata.) The pointer destination is only instantiated when
|
struct xrefdata.) The pointer destination is only instantiated when
|
||||||
actually accessed.
|
actually accessed.
|
||||||
'''
|
"""
|
||||||
|
|
||||||
def __init__(self, cls, ptr):
|
def __init__(self, cls, ptr):
|
||||||
self.cls = cls
|
self.cls = cls
|
||||||
self.ptr = ptr
|
self.ptr = ptr
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<Pointer:%s %r>' % (self.cls.__name__, self.ptr)
|
return "<Pointer:%s %r>" % (self.cls.__name__, self.ptr)
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
if isinstance(self.ptr, ELFNull):
|
if isinstance(self.ptr, ELFNull):
|
||||||
return None
|
return None
|
||||||
return self.cls(self.ptr)
|
return self.cls(self.ptr)
|
||||||
|
|
||||||
def __new__(cls, dataptr, parent = None, replace = None):
|
def __new__(cls, dataptr, parent=None, replace=None):
|
||||||
if dataptr._dstsect is None:
|
if dataptr._dstsect is None:
|
||||||
return super().__new__(cls)
|
return super().__new__(cls)
|
||||||
|
|
||||||
|
@ -239,19 +259,19 @@ class ELFDissectStruct(ELFDissectData):
|
||||||
dataptr._dstsect._pointers[(cls, dataptr)] = obj
|
dataptr._dstsect._pointers[(cls, dataptr)] = obj
|
||||||
return obj
|
return obj
|
||||||
|
|
||||||
replacements = 'lLnN'
|
replacements = "lLnN"
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _preproc_structspec(cls, elfclass, spec):
|
def _preproc_structspec(cls, elfclass, spec):
|
||||||
elfbits = elfclass
|
elfbits = elfclass
|
||||||
|
|
||||||
if hasattr(spec, 'calcsize'):
|
if hasattr(spec, "calcsize"):
|
||||||
spec = '%ds' % (spec.calcsize(elfclass),)
|
spec = "%ds" % (spec.calcsize(elfclass),)
|
||||||
|
|
||||||
if elfbits == 32:
|
if elfbits == 32:
|
||||||
repl = ['i', 'I']
|
repl = ["i", "I"]
|
||||||
else:
|
else:
|
||||||
repl = ['q', 'Q']
|
repl = ["q", "Q"]
|
||||||
for c in cls.replacements:
|
for c in cls.replacements:
|
||||||
spec = spec.replace(c, repl[int(c.isupper())])
|
spec = spec.replace(c, repl[int(c.isupper())])
|
||||||
return spec
|
return spec
|
||||||
|
@ -269,8 +289,8 @@ class ELFDissectStruct(ELFDissectData):
|
||||||
size += struct.calcsize(newf[1])
|
size += struct.calcsize(newf[1])
|
||||||
cls._esize[elfclass] = size
|
cls._esize[elfclass] = size
|
||||||
|
|
||||||
def __init__(self, dataptr, parent = None, replace = None):
|
def __init__(self, dataptr, parent=None, replace=None):
|
||||||
if not hasattr(self.__class__, '_efields'):
|
if not hasattr(self.__class__, "_efields"):
|
||||||
self._setup_efields()
|
self._setup_efields()
|
||||||
|
|
||||||
self._fdata = None
|
self._fdata = None
|
||||||
|
@ -290,12 +310,12 @@ class ELFDissectStruct(ELFDissectData):
|
||||||
# need to correlate output from struct.unpack with extra metadata
|
# need to correlate output from struct.unpack with extra metadata
|
||||||
# about the particular fields, so note down byte offsets (in locs)
|
# about the particular fields, so note down byte offsets (in locs)
|
||||||
# and tuple indices of pointers (in ptrs)
|
# and tuple indices of pointers (in ptrs)
|
||||||
pspec = ''
|
pspec = ""
|
||||||
locs = {}
|
locs = {}
|
||||||
ptrs = set()
|
ptrs = set()
|
||||||
|
|
||||||
for idx, spec in enumerate(pspecl):
|
for idx, spec in enumerate(pspecl):
|
||||||
if spec == 'P':
|
if spec == "P":
|
||||||
ptrs.add(idx)
|
ptrs.add(idx)
|
||||||
spec = self._elfsect.ptrtype
|
spec = self._elfsect.ptrtype
|
||||||
|
|
||||||
|
@ -326,7 +346,9 @@ class ELFDissectStruct(ELFDissectData):
|
||||||
self._fdata[name] = replace[name]
|
self._fdata[name] = replace[name]
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if isinstance(self.fields[i][1], type) and issubclass(self.fields[i][1], ELFDissectData):
|
if isinstance(self.fields[i][1], type) and issubclass(
|
||||||
|
self.fields[i][1], ELFDissectData
|
||||||
|
):
|
||||||
dataobj = self.fields[i][1](dataptr.offset(locs[i]), self)
|
dataobj = self.fields[i][1](dataptr.offset(locs[i]), self)
|
||||||
self._fdata[name] = dataobj
|
self._fdata[name] = dataobj
|
||||||
continue
|
continue
|
||||||
|
@ -353,35 +375,39 @@ class ELFDissectStruct(ELFDissectData):
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
if not isinstance(self._data, ELFData):
|
if not isinstance(self._data, ELFData):
|
||||||
return '<%s: %r>' % (self.__class__.__name__, self._data)
|
return "<%s: %r>" % (self.__class__.__name__, self._data)
|
||||||
return '<%s: %s>' % (self.__class__.__name__,
|
return "<%s: %s>" % (
|
||||||
', '.join(['%s=%r' % t for t in self._fdata.items()]))
|
self.__class__.__name__,
|
||||||
|
", ".join(["%s=%r" % t for t in self._fdata.items()]),
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def calcsize(cls, elfclass):
|
def calcsize(cls, elfclass):
|
||||||
'''
|
"""
|
||||||
Sum up byte size of this struct
|
Sum up byte size of this struct
|
||||||
|
|
||||||
Wraps struct.calcsize with some extra features.
|
Wraps struct.calcsize with some extra features.
|
||||||
'''
|
"""
|
||||||
if not hasattr(cls, '_efields'):
|
if not hasattr(cls, "_efields"):
|
||||||
cls._setup_efields()
|
cls._setup_efields()
|
||||||
|
|
||||||
pspec = ''.join([f[1] for f in cls._efields[elfclass]])
|
pspec = "".join([f[1] for f in cls._efields[elfclass]])
|
||||||
|
|
||||||
ptrtype = 'I' if elfclass == 32 else 'Q'
|
ptrtype = "I" if elfclass == 32 else "Q"
|
||||||
pspec = pspec.replace('P', ptrtype)
|
pspec = pspec.replace("P", ptrtype)
|
||||||
|
|
||||||
return struct.calcsize(pspec)
|
return struct.calcsize(pspec)
|
||||||
|
|
||||||
|
|
||||||
class ELFDissectUnion(ELFDissectData):
|
class ELFDissectUnion(ELFDissectData):
|
||||||
'''
|
"""
|
||||||
Decode multiple structs in the same place.
|
Decode multiple structs in the same place.
|
||||||
|
|
||||||
Not currently used (and hence not tested.) Worked at some point but not
|
Not currently used (and hence not tested.) Worked at some point but not
|
||||||
needed anymore and may be borked now. Remove this comment when using.
|
needed anymore and may be borked now. Remove this comment when using.
|
||||||
'''
|
"""
|
||||||
def __init__(self, dataptr, parent = None):
|
|
||||||
|
def __init__(self, dataptr, parent=None):
|
||||||
self._dataptr = dataptr
|
self._dataptr = dataptr
|
||||||
self._parent = parent
|
self._parent = parent
|
||||||
self.members = []
|
self.members = []
|
||||||
|
@ -391,20 +417,25 @@ class ELFDissectUnion(ELFDissectData):
|
||||||
setattr(self, name, item)
|
setattr(self, name, item)
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
return '<%s: %s>' % (self.__class__.__name__, ', '.join([repr(i) for i in self.members]))
|
return "<%s: %s>" % (
|
||||||
|
self.__class__.__name__,
|
||||||
|
", ".join([repr(i) for i in self.members]),
|
||||||
|
)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def calcsize(cls, elfclass):
|
def calcsize(cls, elfclass):
|
||||||
return max([member.calcsize(elfclass) for name, member in cls.members])
|
return max([member.calcsize(elfclass) for name, member in cls.members])
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# wrappers for spans of ELF data
|
# wrappers for spans of ELF data
|
||||||
#
|
#
|
||||||
|
|
||||||
|
|
||||||
class ELFSubset(object):
|
class ELFSubset(object):
|
||||||
'''
|
"""
|
||||||
Common abstract base for section-level and file-level access.
|
Common abstract base for section-level and file-level access.
|
||||||
'''
|
"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -415,7 +446,7 @@ class ELFSubset(object):
|
||||||
return hash(self.name)
|
return hash(self.name)
|
||||||
|
|
||||||
def __getitem__(self, k):
|
def __getitem__(self, k):
|
||||||
'''
|
"""
|
||||||
Read data from slice
|
Read data from slice
|
||||||
|
|
||||||
Subscript **must** be a slice; a simple index will not return a byte
|
Subscript **must** be a slice; a simple index will not return a byte
|
||||||
|
@ -425,22 +456,22 @@ class ELFSubset(object):
|
||||||
- `this[123:456]` - extract specific range
|
- `this[123:456]` - extract specific range
|
||||||
- `this[123:str]` - extract until null byte. The slice stop value is
|
- `this[123:str]` - extract until null byte. The slice stop value is
|
||||||
the `str` type (or, technically, `unicode`.)
|
the `str` type (or, technically, `unicode`.)
|
||||||
'''
|
"""
|
||||||
return self._obj[k]
|
return self._obj[k]
|
||||||
|
|
||||||
def getreloc(self, offset):
|
def getreloc(self, offset):
|
||||||
'''
|
"""
|
||||||
Check for a relocation record at the specified offset.
|
Check for a relocation record at the specified offset.
|
||||||
'''
|
"""
|
||||||
return self._obj.getreloc(offset)
|
return self._obj.getreloc(offset)
|
||||||
|
|
||||||
def iter_data(self, scls, slice_ = slice(None)):
|
def iter_data(self, scls, slice_=slice(None)):
|
||||||
'''
|
"""
|
||||||
Assume an array of structs present at a particular slice and decode
|
Assume an array of structs present at a particular slice and decode
|
||||||
|
|
||||||
:param scls: ELFDissectData subclass for the struct
|
:param scls: ELFDissectData subclass for the struct
|
||||||
:param slice_: optional range specification
|
:param slice_: optional range specification
|
||||||
'''
|
"""
|
||||||
size = scls.calcsize(self._elffile.elfclass)
|
size = scls.calcsize(self._elffile.elfclass)
|
||||||
|
|
||||||
offset = slice_.start or 0
|
offset = slice_.start or 0
|
||||||
|
@ -453,7 +484,7 @@ class ELFSubset(object):
|
||||||
offset += size
|
offset += size
|
||||||
|
|
||||||
def pointer(self, offset):
|
def pointer(self, offset):
|
||||||
'''
|
"""
|
||||||
Try to dereference a pointer value
|
Try to dereference a pointer value
|
||||||
|
|
||||||
This checks whether there's a relocation at the given offset and
|
This checks whether there's a relocation at the given offset and
|
||||||
|
@ -463,10 +494,12 @@ class ELFSubset(object):
|
||||||
:param offset: byte offset from beginning of section,
|
:param offset: byte offset from beginning of section,
|
||||||
or virtual address in file
|
or virtual address in file
|
||||||
:returns: ELFData wrapping pointed-to object
|
:returns: ELFData wrapping pointed-to object
|
||||||
'''
|
"""
|
||||||
|
|
||||||
ptrsize = struct.calcsize(self.ptrtype)
|
ptrsize = struct.calcsize(self.ptrtype)
|
||||||
data = struct.unpack(self.endian + self.ptrtype, self[offset:offset + ptrsize])[0]
|
data = struct.unpack(
|
||||||
|
self.endian + self.ptrtype, self[offset : offset + ptrsize]
|
||||||
|
)[0]
|
||||||
|
|
||||||
reloc = self.getreloc(offset)
|
reloc = self.getreloc(offset)
|
||||||
dstsect = None
|
dstsect = None
|
||||||
|
@ -497,14 +530,15 @@ class ELFSubset(object):
|
||||||
# wrap_data is different between file & section
|
# wrap_data is different between file & section
|
||||||
return self._wrap_data(data, dstsect)
|
return self._wrap_data(data, dstsect)
|
||||||
|
|
||||||
|
|
||||||
class ELFDissectSection(ELFSubset):
|
class ELFDissectSection(ELFSubset):
|
||||||
'''
|
"""
|
||||||
Access the contents of an ELF section like ``.text`` or ``.data``
|
Access the contents of an ELF section like ``.text`` or ``.data``
|
||||||
|
|
||||||
:param elfwrap: ELFDissectFile wrapper for the file
|
:param elfwrap: ELFDissectFile wrapper for the file
|
||||||
:param idx: section index in section header table
|
:param idx: section index in section header table
|
||||||
:param section: section object from C module
|
:param section: section object from C module
|
||||||
'''
|
"""
|
||||||
|
|
||||||
def __init__(self, elfwrap, idx, section):
|
def __init__(self, elfwrap, idx, section):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -524,8 +558,9 @@ class ELFDissectSection(ELFSubset):
|
||||||
dstsect = self._elfwrap.get_section(dstsect.idx)
|
dstsect = self._elfwrap.get_section(dstsect.idx)
|
||||||
return ELFData(dstsect, offs, None)
|
return ELFData(dstsect, offs, None)
|
||||||
|
|
||||||
|
|
||||||
class ELFDissectFile(ELFSubset):
|
class ELFDissectFile(ELFSubset):
|
||||||
'''
|
"""
|
||||||
Access the contents of an ELF file.
|
Access the contents of an ELF file.
|
||||||
|
|
||||||
Note that offsets for array subscript and relocation/pointer access are
|
Note that offsets for array subscript and relocation/pointer access are
|
||||||
|
@ -537,7 +572,7 @@ class ELFDissectFile(ELFSubset):
|
||||||
address like 0x400000 on x86.
|
address like 0x400000 on x86.
|
||||||
|
|
||||||
:param filename: ELF file to open
|
:param filename: ELF file to open
|
||||||
'''
|
"""
|
||||||
|
|
||||||
def __init__(self, filename):
|
def __init__(self, filename):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
@ -546,8 +581,8 @@ class ELFDissectFile(ELFSubset):
|
||||||
self._elffile = self._obj = ELFFile(filename)
|
self._elffile = self._obj = ELFFile(filename)
|
||||||
self._sections = {}
|
self._sections = {}
|
||||||
|
|
||||||
self.ptrtype = 'I' if self._elffile.elfclass == 32 else 'Q'
|
self.ptrtype = "I" if self._elffile.elfclass == 32 else "Q"
|
||||||
self.endian = '>' if self._elffile.bigendian else '<'
|
self.endian = ">" if self._elffile.bigendian else "<"
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def _elfwrap(self):
|
def _elfwrap(self):
|
||||||
|
@ -557,9 +592,9 @@ class ELFDissectFile(ELFSubset):
|
||||||
return ELFData(self, data, None)
|
return ELFData(self, data, None)
|
||||||
|
|
||||||
def get_section(self, secname):
|
def get_section(self, secname):
|
||||||
'''
|
"""
|
||||||
Look up section by name or index
|
Look up section by name or index
|
||||||
'''
|
"""
|
||||||
if isinstance(secname, int):
|
if isinstance(secname, int):
|
||||||
sh_idx = secname
|
sh_idx = secname
|
||||||
section = self._elffile.get_section_idx(secname)
|
section = self._elffile.get_section_idx(secname)
|
||||||
|
|
|
@ -19,13 +19,14 @@
|
||||||
import struct
|
import struct
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
|
|
||||||
def bititer(data, bits, startbit = True):
|
|
||||||
'''
|
def bititer(data, bits, startbit=True):
|
||||||
|
"""
|
||||||
just iterate the individual bits out from a bytes object
|
just iterate the individual bits out from a bytes object
|
||||||
|
|
||||||
if startbit is True, an '1' bit is inserted at the very beginning
|
if startbit is True, an '1' bit is inserted at the very beginning
|
||||||
goes <bits> at a time, starts at LSB.
|
goes <bits> at a time, starts at LSB.
|
||||||
'''
|
"""
|
||||||
bitavail, v = 0, 0
|
bitavail, v = 0, 0
|
||||||
if startbit and len(data) > 0:
|
if startbit and len(data) > 0:
|
||||||
v = data.pop(0)
|
v = data.pop(0)
|
||||||
|
@ -41,31 +42,33 @@ def bititer(data, bits, startbit = True):
|
||||||
bitavail -= bits
|
bitavail -= bits
|
||||||
v >>= bits
|
v >>= bits
|
||||||
|
|
||||||
|
|
||||||
def base32c(data):
|
def base32c(data):
|
||||||
'''
|
"""
|
||||||
Crockford base32 with extra dashes
|
Crockford base32 with extra dashes
|
||||||
'''
|
"""
|
||||||
chs = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
|
chs = "0123456789ABCDEFGHJKMNPQRSTVWXYZ"
|
||||||
o = ''
|
o = ""
|
||||||
if type(data) == str:
|
if type(data) == str:
|
||||||
data = [ord(v) for v in data]
|
data = [ord(v) for v in data]
|
||||||
else:
|
else:
|
||||||
data = list(data)
|
data = list(data)
|
||||||
for i, bits in enumerate(bititer(data, 5)):
|
for i, bits in enumerate(bititer(data, 5)):
|
||||||
if i == 5:
|
if i == 5:
|
||||||
o = o + '-'
|
o = o + "-"
|
||||||
elif i == 10:
|
elif i == 10:
|
||||||
break
|
break
|
||||||
o = o + chs[bits]
|
o = o + chs[bits]
|
||||||
return o
|
return o
|
||||||
|
|
||||||
def uidhash(filename, hashstr, hashu32a, hashu32b):
|
|
||||||
'''
|
|
||||||
xref Unique ID hash used in FRRouting
|
|
||||||
'''
|
|
||||||
filename = '/'.join(filename.rsplit('/')[-2:])
|
|
||||||
|
|
||||||
hdata = filename.encode('UTF-8') + hashstr.encode('UTF-8')
|
def uidhash(filename, hashstr, hashu32a, hashu32b):
|
||||||
hdata += struct.pack('>II', hashu32a, hashu32b)
|
"""
|
||||||
|
xref Unique ID hash used in FRRouting
|
||||||
|
"""
|
||||||
|
filename = "/".join(filename.rsplit("/")[-2:])
|
||||||
|
|
||||||
|
hdata = filename.encode("UTF-8") + hashstr.encode("UTF-8")
|
||||||
|
hdata += struct.pack(">II", hashu32a, hashu32b)
|
||||||
i = sha256(hdata).digest()
|
i = sha256(hdata).digest()
|
||||||
return base32c(i)
|
return base32c(i)
|
||||||
|
|
|
@ -161,7 +161,10 @@ for clippy_file in clippy_scan:
|
||||||
# combine daemon .xref files into frr.xref
|
# combine daemon .xref files into frr.xref
|
||||||
out_lines.append("")
|
out_lines.append("")
|
||||||
xref_targets = [
|
xref_targets = [
|
||||||
target for target in xref_targets if target not in [
|
target
|
||||||
|
for target in xref_targets
|
||||||
|
if target
|
||||||
|
not in [
|
||||||
"bgpd/rfp-example/rfptest/rfptest",
|
"bgpd/rfp-example/rfptest/rfptest",
|
||||||
"pimd/mtracebis",
|
"pimd/mtracebis",
|
||||||
"tools/ssd",
|
"tools/ssd",
|
||||||
|
|
|
@ -5,9 +5,11 @@ import os
|
||||||
try:
|
try:
|
||||||
import _clippy
|
import _clippy
|
||||||
except ImportError:
|
except ImportError:
|
||||||
sys.stderr.write('''these tests need to be run with the _clippy C extension
|
sys.stderr.write(
|
||||||
|
"""these tests need to be run with the _clippy C extension
|
||||||
module available. Try running "clippy runtests.py ...".
|
module available. Try running "clippy runtests.py ...".
|
||||||
''')
|
"""
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
os.chdir(os.path.dirname(os.path.abspath(__file__)))
|
||||||
|
|
|
@ -22,20 +22,21 @@ import pytest
|
||||||
from pprint import pprint
|
from pprint import pprint
|
||||||
|
|
||||||
root = os.path.dirname(os.path.dirname(__file__))
|
root = os.path.dirname(os.path.dirname(__file__))
|
||||||
sys.path.append(os.path.join(root, 'python'))
|
sys.path.append(os.path.join(root, "python"))
|
||||||
|
|
||||||
import xrelfo
|
import xrelfo
|
||||||
from clippy import elf, uidhash
|
from clippy import elf, uidhash
|
||||||
|
|
||||||
|
|
||||||
def test_uidhash():
|
def test_uidhash():
|
||||||
assert uidhash.uidhash("lib/test_xref.c", "logging call", 3, 0) \
|
assert uidhash.uidhash("lib/test_xref.c", "logging call", 3, 0) == "H7KJB-67TBH"
|
||||||
== 'H7KJB-67TBH'
|
|
||||||
|
|
||||||
def test_xrelfo_other():
|
def test_xrelfo_other():
|
||||||
for data in [
|
for data in [
|
||||||
elf.ELFNull(),
|
elf.ELFNull(),
|
||||||
elf.ELFUnresolved('somesym', 0),
|
elf.ELFUnresolved("somesym", 0),
|
||||||
]:
|
]:
|
||||||
|
|
||||||
dissect = xrelfo.XrefPtr(data)
|
dissect = xrelfo.XrefPtr(data)
|
||||||
print(repr(dissect))
|
print(repr(dissect))
|
||||||
|
@ -43,9 +44,10 @@ def test_xrelfo_other():
|
||||||
with pytest.raises(AttributeError):
|
with pytest.raises(AttributeError):
|
||||||
dissect.xref
|
dissect.xref
|
||||||
|
|
||||||
|
|
||||||
def test_xrelfo_obj():
|
def test_xrelfo_obj():
|
||||||
xrelfo_ = xrelfo.Xrelfo()
|
xrelfo_ = xrelfo.Xrelfo()
|
||||||
edf = xrelfo_.load_elf(os.path.join(root, 'lib/.libs/zclient.o'), 'zclient.lo')
|
edf = xrelfo_.load_elf(os.path.join(root, "lib/.libs/zclient.o"), "zclient.lo")
|
||||||
xrefs = xrelfo_._xrefs
|
xrefs = xrelfo_._xrefs
|
||||||
|
|
||||||
with pytest.raises(elf.ELFAccessError):
|
with pytest.raises(elf.ELFAccessError):
|
||||||
|
@ -54,12 +56,13 @@ def test_xrelfo_obj():
|
||||||
pprint(xrefs[0])
|
pprint(xrefs[0])
|
||||||
pprint(xrefs[0]._data)
|
pprint(xrefs[0]._data)
|
||||||
|
|
||||||
|
|
||||||
def test_xrelfo_bin():
|
def test_xrelfo_bin():
|
||||||
xrelfo_ = xrelfo.Xrelfo()
|
xrelfo_ = xrelfo.Xrelfo()
|
||||||
edf = xrelfo_.load_elf(os.path.join(root, 'lib/.libs/libfrr.so'), 'libfrr.la')
|
edf = xrelfo_.load_elf(os.path.join(root, "lib/.libs/libfrr.so"), "libfrr.la")
|
||||||
xrefs = xrelfo_._xrefs
|
xrefs = xrelfo_._xrefs
|
||||||
|
|
||||||
assert edf[0:4] == b'\x7fELF'
|
assert edf[0:4] == b"\x7fELF"
|
||||||
|
|
||||||
pprint(xrefs[0])
|
pprint(xrefs[0])
|
||||||
pprint(xrefs[0]._data)
|
pprint(xrefs[0]._data)
|
||||||
|
|
|
@ -23,10 +23,19 @@ import re
|
||||||
import argparse
|
import argparse
|
||||||
import json
|
import json
|
||||||
|
|
||||||
structs = ['xref', 'xref_logmsg', 'xref_threadsched', 'xref_install_element', 'xrefdata', 'xrefdata_logmsg', 'cmd_element']
|
structs = [
|
||||||
|
"xref",
|
||||||
|
"xref_logmsg",
|
||||||
|
"xref_threadsched",
|
||||||
|
"xref_install_element",
|
||||||
|
"xrefdata",
|
||||||
|
"xrefdata_logmsg",
|
||||||
|
"cmd_element",
|
||||||
|
]
|
||||||
|
|
||||||
def extract(filename='lib/.libs/libfrr.so'):
|
|
||||||
'''
|
def extract(filename="lib/.libs/libfrr.so"):
|
||||||
|
"""
|
||||||
Convert output from "pahole" to JSON.
|
Convert output from "pahole" to JSON.
|
||||||
|
|
||||||
Example pahole output:
|
Example pahole output:
|
||||||
|
@ -41,26 +50,30 @@ def extract(filename='lib/.libs/libfrr.so'):
|
||||||
/* size: 32, cachelines: 1, members: 5 */
|
/* size: 32, cachelines: 1, members: 5 */
|
||||||
/* last cacheline: 32 bytes */
|
/* last cacheline: 32 bytes */
|
||||||
};
|
};
|
||||||
'''
|
"""
|
||||||
pahole = subprocess.check_output(['pahole', '-C', ','.join(structs), filename]).decode('UTF-8')
|
pahole = subprocess.check_output(
|
||||||
|
["pahole", "-C", ",".join(structs), filename]
|
||||||
|
).decode("UTF-8")
|
||||||
|
|
||||||
struct_re = re.compile(r'^struct ([^ ]+) \{([^\}]+)};', flags=re.M | re.S)
|
struct_re = re.compile(r"^struct ([^ ]+) \{([^\}]+)};", flags=re.M | re.S)
|
||||||
field_re = re.compile(r'^\s*(?P<type>[^;\(]+)\s+(?P<name>[^;\[\]]+)(?:\[(?P<array>\d+)\])?;\s*\/\*(?P<comment>.*)\*\/\s*$')
|
field_re = re.compile(
|
||||||
comment_re = re.compile(r'^\s*\/\*.*\*\/\s*$')
|
r"^\s*(?P<type>[^;\(]+)\s+(?P<name>[^;\[\]]+)(?:\[(?P<array>\d+)\])?;\s*\/\*(?P<comment>.*)\*\/\s*$"
|
||||||
|
)
|
||||||
|
comment_re = re.compile(r"^\s*\/\*.*\*\/\s*$")
|
||||||
|
|
||||||
pastructs = struct_re.findall(pahole)
|
pastructs = struct_re.findall(pahole)
|
||||||
out = {}
|
out = {}
|
||||||
|
|
||||||
for sname, data in pastructs:
|
for sname, data in pastructs:
|
||||||
this = out.setdefault(sname, {})
|
this = out.setdefault(sname, {})
|
||||||
fields = this.setdefault('fields', [])
|
fields = this.setdefault("fields", [])
|
||||||
|
|
||||||
lines = data.strip().splitlines()
|
lines = data.strip().splitlines()
|
||||||
|
|
||||||
next_offs = 0
|
next_offs = 0
|
||||||
|
|
||||||
for line in lines:
|
for line in lines:
|
||||||
if line.strip() == '':
|
if line.strip() == "":
|
||||||
continue
|
continue
|
||||||
m = comment_re.match(line)
|
m = comment_re.match(line)
|
||||||
if m is not None:
|
if m is not None:
|
||||||
|
@ -68,51 +81,55 @@ def extract(filename='lib/.libs/libfrr.so'):
|
||||||
|
|
||||||
m = field_re.match(line)
|
m = field_re.match(line)
|
||||||
if m is not None:
|
if m is not None:
|
||||||
offs, size = m.group('comment').strip().split()
|
offs, size = m.group("comment").strip().split()
|
||||||
offs = int(offs)
|
offs = int(offs)
|
||||||
size = int(size)
|
size = int(size)
|
||||||
typ_ = m.group('type').strip()
|
typ_ = m.group("type").strip()
|
||||||
name = m.group('name')
|
name = m.group("name")
|
||||||
|
|
||||||
if name.startswith('(*'):
|
if name.startswith("(*"):
|
||||||
# function pointer
|
# function pointer
|
||||||
typ_ = typ_ + ' *'
|
typ_ = typ_ + " *"
|
||||||
name = name[2:].split(')')[0]
|
name = name[2:].split(")")[0]
|
||||||
|
|
||||||
data = {
|
data = {
|
||||||
'name': name,
|
"name": name,
|
||||||
'type': typ_,
|
"type": typ_,
|
||||||
# 'offset': offs,
|
# 'offset': offs,
|
||||||
# 'size': size,
|
# 'size': size,
|
||||||
}
|
}
|
||||||
if m.group('array'):
|
if m.group("array"):
|
||||||
data['array'] = int(m.group('array'))
|
data["array"] = int(m.group("array"))
|
||||||
|
|
||||||
fields.append(data)
|
fields.append(data)
|
||||||
if offs != next_offs:
|
if offs != next_offs:
|
||||||
raise ValueError('%d padding bytes before struct %s.%s' % (offs - next_offs, sname, name))
|
raise ValueError(
|
||||||
|
"%d padding bytes before struct %s.%s"
|
||||||
|
% (offs - next_offs, sname, name)
|
||||||
|
)
|
||||||
next_offs = offs + size
|
next_offs = offs + size
|
||||||
continue
|
continue
|
||||||
|
|
||||||
raise ValueError('cannot process line: %s' % line)
|
raise ValueError("cannot process line: %s" % line)
|
||||||
|
|
||||||
return out
|
return out
|
||||||
|
|
||||||
|
|
||||||
class FieldApplicator(object):
|
class FieldApplicator(object):
|
||||||
'''
|
"""
|
||||||
Fill ELFDissectStruct fields list from pahole/JSON
|
Fill ELFDissectStruct fields list from pahole/JSON
|
||||||
|
|
||||||
Uses the JSON file created by the above code to fill in the struct fields
|
Uses the JSON file created by the above code to fill in the struct fields
|
||||||
in subclasses of ELFDissectStruct.
|
in subclasses of ELFDissectStruct.
|
||||||
'''
|
"""
|
||||||
|
|
||||||
# only what we really need. add more as needed.
|
# only what we really need. add more as needed.
|
||||||
packtypes = {
|
packtypes = {
|
||||||
'int': 'i',
|
"int": "i",
|
||||||
'uint8_t': 'B',
|
"uint8_t": "B",
|
||||||
'uint16_t': 'H',
|
"uint16_t": "H",
|
||||||
'uint32_t': 'I',
|
"uint32_t": "I",
|
||||||
'char': 's',
|
"char": "s",
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, data):
|
def __init__(self, data):
|
||||||
|
@ -126,60 +143,65 @@ class FieldApplicator(object):
|
||||||
|
|
||||||
def resolve(self, cls):
|
def resolve(self, cls):
|
||||||
out = []
|
out = []
|
||||||
#offset = 0
|
# offset = 0
|
||||||
|
|
||||||
|
fieldrename = getattr(cls, "fieldrename", {})
|
||||||
|
|
||||||
fieldrename = getattr(cls, 'fieldrename', {})
|
|
||||||
def mkname(n):
|
def mkname(n):
|
||||||
return (fieldrename.get(n, n),)
|
return (fieldrename.get(n, n),)
|
||||||
|
|
||||||
for field in self.data[cls.struct]['fields']:
|
for field in self.data[cls.struct]["fields"]:
|
||||||
typs = field['type'].split()
|
typs = field["type"].split()
|
||||||
typs = [i for i in typs if i not in ['const']]
|
typs = [i for i in typs if i not in ["const"]]
|
||||||
|
|
||||||
# this will break reuse of xrefstructs.json across 32bit & 64bit
|
# this will break reuse of xrefstructs.json across 32bit & 64bit
|
||||||
# platforms
|
# platforms
|
||||||
|
|
||||||
#if field['offset'] != offset:
|
# if field['offset'] != offset:
|
||||||
# assert offset < field['offset']
|
# assert offset < field['offset']
|
||||||
# out.append(('_pad', '%ds' % (field['offset'] - offset,)))
|
# out.append(('_pad', '%ds' % (field['offset'] - offset,)))
|
||||||
|
|
||||||
# pretty hacky C types handling, but covers what we need
|
# pretty hacky C types handling, but covers what we need
|
||||||
|
|
||||||
ptrlevel = 0
|
ptrlevel = 0
|
||||||
while typs[-1] == '*':
|
while typs[-1] == "*":
|
||||||
typs.pop(-1)
|
typs.pop(-1)
|
||||||
ptrlevel += 1
|
ptrlevel += 1
|
||||||
|
|
||||||
if ptrlevel > 0:
|
if ptrlevel > 0:
|
||||||
packtype = ('P', None)
|
packtype = ("P", None)
|
||||||
if ptrlevel == 1:
|
if ptrlevel == 1:
|
||||||
if typs[0] == 'char':
|
if typs[0] == "char":
|
||||||
packtype = ('P', str)
|
packtype = ("P", str)
|
||||||
elif typs[0] == 'struct' and typs[1] in self.clsmap:
|
elif typs[0] == "struct" and typs[1] in self.clsmap:
|
||||||
packtype = ('P', self.clsmap[typs[1]])
|
packtype = ("P", self.clsmap[typs[1]])
|
||||||
elif typs[0] == 'enum':
|
elif typs[0] == "enum":
|
||||||
packtype = ('I',)
|
packtype = ("I",)
|
||||||
elif typs[0] in self.packtypes:
|
elif typs[0] in self.packtypes:
|
||||||
packtype = (self.packtypes[typs[0]],)
|
packtype = (self.packtypes[typs[0]],)
|
||||||
elif typs[0] == 'struct':
|
elif typs[0] == "struct":
|
||||||
if typs[1] in self.clsmap:
|
if typs[1] in self.clsmap:
|
||||||
packtype = (self.clsmap[typs[1]],)
|
packtype = (self.clsmap[typs[1]],)
|
||||||
else:
|
else:
|
||||||
raise ValueError('embedded struct %s not in extracted data' % (typs[1],))
|
raise ValueError(
|
||||||
|
"embedded struct %s not in extracted data" % (typs[1],)
|
||||||
|
)
|
||||||
else:
|
else:
|
||||||
raise ValueError('cannot decode field %s in struct %s (%s)' % (
|
raise ValueError(
|
||||||
cls.struct, field['name'], field['type']))
|
"cannot decode field %s in struct %s (%s)"
|
||||||
|
% (cls.struct, field["name"], field["type"])
|
||||||
|
)
|
||||||
|
|
||||||
if 'array' in field and typs[0] == 'char':
|
if "array" in field and typs[0] == "char":
|
||||||
packtype = ('%ds' % field['array'],)
|
packtype = ("%ds" % field["array"],)
|
||||||
out.append(mkname(field['name']) + packtype)
|
out.append(mkname(field["name"]) + packtype)
|
||||||
elif 'array' in field:
|
elif "array" in field:
|
||||||
for i in range(0, field['array']):
|
for i in range(0, field["array"]):
|
||||||
out.append(mkname('%s_%d' % (field['name'], i)) + packtype)
|
out.append(mkname("%s_%d" % (field["name"], i)) + packtype)
|
||||||
else:
|
else:
|
||||||
out.append(mkname(field['name']) + packtype)
|
out.append(mkname(field["name"]) + packtype)
|
||||||
|
|
||||||
#offset = field['offset'] + field['size']
|
# offset = field['offset'] + field['size']
|
||||||
|
|
||||||
cls.fields = out
|
cls.fields = out
|
||||||
|
|
||||||
|
@ -187,16 +209,30 @@ class FieldApplicator(object):
|
||||||
for cls in self.classes:
|
for cls in self.classes:
|
||||||
self.resolve(cls)
|
self.resolve(cls)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argp = argparse.ArgumentParser(description = 'FRR DWARF structure extractor')
|
argp = argparse.ArgumentParser(description="FRR DWARF structure extractor")
|
||||||
argp.add_argument('-o', dest='output', type=str, help='write JSON output', default='python/xrefstructs.json')
|
argp.add_argument(
|
||||||
argp.add_argument('-i', dest='input', type=str, help='ELF file to read', default='lib/.libs/libfrr.so')
|
"-o",
|
||||||
|
dest="output",
|
||||||
|
type=str,
|
||||||
|
help="write JSON output",
|
||||||
|
default="python/xrefstructs.json",
|
||||||
|
)
|
||||||
|
argp.add_argument(
|
||||||
|
"-i",
|
||||||
|
dest="input",
|
||||||
|
type=str,
|
||||||
|
help="ELF file to read",
|
||||||
|
default="lib/.libs/libfrr.so",
|
||||||
|
)
|
||||||
args = argp.parse_args()
|
args = argp.parse_args()
|
||||||
|
|
||||||
out = extract(args.input)
|
out = extract(args.input)
|
||||||
with open(args.output + '.tmp', 'w') as fd:
|
with open(args.output + ".tmp", "w") as fd:
|
||||||
json.dump(out, fd, indent=2, sort_keys=True)
|
json.dump(out, fd, indent=2, sort_keys=True)
|
||||||
os.rename(args.output + '.tmp', args.output)
|
os.rename(args.output + ".tmp", args.output)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
325
python/xrelfo.py
325
python/xrelfo.py
|
@ -40,13 +40,15 @@ from tiabwarfo import FieldApplicator
|
||||||
from xref2vtysh import CommandEntry
|
from xref2vtysh import CommandEntry
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(os.path.join(frr_top_src, 'python', 'xrefstructs.json'), 'r') as fd:
|
with open(os.path.join(frr_top_src, "python", "xrefstructs.json"), "r") as fd:
|
||||||
xrefstructs = json.load(fd)
|
xrefstructs = json.load(fd)
|
||||||
except FileNotFoundError:
|
except FileNotFoundError:
|
||||||
sys.stderr.write('''
|
sys.stderr.write(
|
||||||
|
"""
|
||||||
The "xrefstructs.json" file (created by running tiabwarfo.py with the pahole
|
The "xrefstructs.json" file (created by running tiabwarfo.py with the pahole
|
||||||
tool available) could not be found. It should be included with the sources.
|
tool available) could not be found. It should be included with the sources.
|
||||||
''')
|
"""
|
||||||
|
)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
# constants, need to be kept in sync manually...
|
# constants, need to be kept in sync manually...
|
||||||
|
@ -58,7 +60,7 @@ XREFT_INSTALL_ELEMENT = 0x301
|
||||||
|
|
||||||
# LOG_*
|
# LOG_*
|
||||||
priovals = {}
|
priovals = {}
|
||||||
prios = ['0', '1', '2', 'E', 'W', 'N', 'I', 'D']
|
prios = ["0", "1", "2", "E", "W", "N", "I", "D"]
|
||||||
|
|
||||||
|
|
||||||
class XrelfoJson(object):
|
class XrelfoJson(object):
|
||||||
|
@ -71,9 +73,10 @@ class XrelfoJson(object):
|
||||||
def to_dict(self, refs):
|
def to_dict(self, refs):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
class Xref(ELFDissectStruct, XrelfoJson):
|
class Xref(ELFDissectStruct, XrelfoJson):
|
||||||
struct = 'xref'
|
struct = "xref"
|
||||||
fieldrename = {'type': 'typ'}
|
fieldrename = {"type": "typ"}
|
||||||
containers = {}
|
containers = {}
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
|
@ -86,7 +89,7 @@ class Xref(ELFDissectStruct, XrelfoJson):
|
||||||
def container(self):
|
def container(self):
|
||||||
if self._container is None:
|
if self._container is None:
|
||||||
if self.typ in self.containers:
|
if self.typ in self.containers:
|
||||||
self._container = self.container_of(self.containers[self.typ], 'xref')
|
self._container = self.container_of(self.containers[self.typ], "xref")
|
||||||
return self._container
|
return self._container
|
||||||
|
|
||||||
def check(self, *args, **kwargs):
|
def check(self, *args, **kwargs):
|
||||||
|
@ -95,10 +98,10 @@ class Xref(ELFDissectStruct, XrelfoJson):
|
||||||
|
|
||||||
|
|
||||||
class Xrefdata(ELFDissectStruct):
|
class Xrefdata(ELFDissectStruct):
|
||||||
struct = 'xrefdata'
|
struct = "xrefdata"
|
||||||
|
|
||||||
# uid is all zeroes in the data loaded from ELF
|
# uid is all zeroes in the data loaded from ELF
|
||||||
fieldrename = {'uid': '_uid'}
|
fieldrename = {"uid": "_uid"}
|
||||||
|
|
||||||
def ref_from(self, xref, typ):
|
def ref_from(self, xref, typ):
|
||||||
self.xref = xref
|
self.xref = xref
|
||||||
|
@ -109,38 +112,83 @@ class Xrefdata(ELFDissectStruct):
|
||||||
return None
|
return None
|
||||||
return uidhash(self.xref.file, self.hashstr, self.hashu32_0, self.hashu32_1)
|
return uidhash(self.xref.file, self.hashstr, self.hashu32_0, self.hashu32_1)
|
||||||
|
|
||||||
|
|
||||||
class XrefPtr(ELFDissectStruct):
|
class XrefPtr(ELFDissectStruct):
|
||||||
fields = [
|
fields = [
|
||||||
('xref', 'P', Xref),
|
("xref", "P", Xref),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
class XrefThreadSched(ELFDissectStruct, XrelfoJson):
|
class XrefThreadSched(ELFDissectStruct, XrelfoJson):
|
||||||
struct = 'xref_threadsched'
|
struct = "xref_threadsched"
|
||||||
|
|
||||||
|
|
||||||
Xref.containers[XREFT_THREADSCHED] = XrefThreadSched
|
Xref.containers[XREFT_THREADSCHED] = XrefThreadSched
|
||||||
|
|
||||||
|
|
||||||
class XrefLogmsg(ELFDissectStruct, XrelfoJson):
|
class XrefLogmsg(ELFDissectStruct, XrelfoJson):
|
||||||
struct = 'xref_logmsg'
|
struct = "xref_logmsg"
|
||||||
|
|
||||||
def _warn_fmt(self, text):
|
def _warn_fmt(self, text):
|
||||||
lines = text.split('\n')
|
lines = text.split("\n")
|
||||||
yield ((self.xref.file, self.xref.line), '%s:%d: %s (in %s())%s\n' % (self.xref.file, self.xref.line, lines[0], self.xref.func, ''.join(['\n' + l for l in lines[1:]])))
|
yield (
|
||||||
|
(self.xref.file, self.xref.line),
|
||||||
|
"%s:%d: %s (in %s())%s\n"
|
||||||
|
% (
|
||||||
|
self.xref.file,
|
||||||
|
self.xref.line,
|
||||||
|
lines[0],
|
||||||
|
self.xref.func,
|
||||||
|
"".join(["\n" + l for l in lines[1:]]),
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
fmt_regexes = [
|
fmt_regexes = [
|
||||||
(re.compile(r'([\n\t]+)'), 'error: log message contains tab or newline'),
|
(re.compile(r"([\n\t]+)"), "error: log message contains tab or newline"),
|
||||||
# (re.compile(r'^(\s+)'), 'warning: log message starts with whitespace'),
|
# (re.compile(r'^(\s+)'), 'warning: log message starts with whitespace'),
|
||||||
(re.compile(r'^((?:warn(?:ing)?|error):\s*)', re.I), 'warning: log message starts with severity'),
|
(
|
||||||
|
re.compile(r"^((?:warn(?:ing)?|error):\s*)", re.I),
|
||||||
|
"warning: log message starts with severity",
|
||||||
|
),
|
||||||
]
|
]
|
||||||
arg_regexes = [
|
arg_regexes = [
|
||||||
# the (?<![\?:] ) avoids warning for x ? inet_ntop(...) : "(bla)"
|
# the (?<![\?:] ) avoids warning for x ? inet_ntop(...) : "(bla)"
|
||||||
(re.compile(r'((?<![\?:] )inet_ntop\s*\(\s*(?:[AP]F_INET|2)\s*,)'), 'cleanup: replace inet_ntop(AF_INET, ...) with %pI4', lambda s: True),
|
(
|
||||||
(re.compile(r'((?<![\?:] )inet_ntop\s*\(\s*(?:[AP]F_INET6|10)\s*,)'), 'cleanup: replace inet_ntop(AF_INET6, ...) with %pI6', lambda s: True),
|
re.compile(r"((?<![\?:] )inet_ntop\s*\(\s*(?:[AP]F_INET|2)\s*,)"),
|
||||||
(re.compile(r'((?<![\?:] )inet_ntoa)'), 'cleanup: replace inet_ntoa(...) with %pI4', lambda s: True),
|
"cleanup: replace inet_ntop(AF_INET, ...) with %pI4",
|
||||||
(re.compile(r'((?<![\?:] )ipaddr2str)'), 'cleanup: replace ipaddr2str(...) with %pIA', lambda s: True),
|
lambda s: True,
|
||||||
(re.compile(r'((?<![\?:] )prefix2str)'), 'cleanup: replace prefix2str(...) with %pFX', lambda s: True),
|
),
|
||||||
(re.compile(r'((?<![\?:] )prefix_mac2str)'), 'cleanup: replace prefix_mac2str(...) with %pEA', lambda s: True),
|
(
|
||||||
(re.compile(r'((?<![\?:] )sockunion2str)'), 'cleanup: replace sockunion2str(...) with %pSU', lambda s: True),
|
re.compile(r"((?<![\?:] )inet_ntop\s*\(\s*(?:[AP]F_INET6|10)\s*,)"),
|
||||||
|
"cleanup: replace inet_ntop(AF_INET6, ...) with %pI6",
|
||||||
# (re.compile(r'^(\s*__(?:func|FUNCTION|PRETTY_FUNCTION)__\s*)'), 'error: debug message starts with __func__', lambda s: (s.priority & 7 == 7) ),
|
lambda s: True,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
re.compile(r"((?<![\?:] )inet_ntoa)"),
|
||||||
|
"cleanup: replace inet_ntoa(...) with %pI4",
|
||||||
|
lambda s: True,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
re.compile(r"((?<![\?:] )ipaddr2str)"),
|
||||||
|
"cleanup: replace ipaddr2str(...) with %pIA",
|
||||||
|
lambda s: True,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
re.compile(r"((?<![\?:] )prefix2str)"),
|
||||||
|
"cleanup: replace prefix2str(...) with %pFX",
|
||||||
|
lambda s: True,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
re.compile(r"((?<![\?:] )prefix_mac2str)"),
|
||||||
|
"cleanup: replace prefix_mac2str(...) with %pEA",
|
||||||
|
lambda s: True,
|
||||||
|
),
|
||||||
|
(
|
||||||
|
re.compile(r"((?<![\?:] )sockunion2str)"),
|
||||||
|
"cleanup: replace sockunion2str(...) with %pSU",
|
||||||
|
lambda s: True,
|
||||||
|
),
|
||||||
|
# (re.compile(r'^(\s*__(?:func|FUNCTION|PRETTY_FUNCTION)__\s*)'), 'error: debug message starts with __func__', lambda s: (s.priority & 7 == 7) ),
|
||||||
]
|
]
|
||||||
|
|
||||||
def check(self, wopt):
|
def check(self, wopt):
|
||||||
|
@ -150,11 +198,11 @@ class XrefLogmsg(ELFDissectStruct, XrelfoJson):
|
||||||
out = []
|
out = []
|
||||||
for i, text in enumerate(items):
|
for i, text in enumerate(items):
|
||||||
if (i % 2) == 1:
|
if (i % 2) == 1:
|
||||||
out.append('\033[41;37;1m%s\033[m' % repr(text)[1:-1])
|
out.append("\033[41;37;1m%s\033[m" % repr(text)[1:-1])
|
||||||
else:
|
else:
|
||||||
out.append(repr(text)[1:-1])
|
out.append(repr(text)[1:-1])
|
||||||
|
|
||||||
excerpt = ''.join(out)
|
excerpt = "".join(out)
|
||||||
else:
|
else:
|
||||||
excerpt = repr(itext)[1:-1]
|
excerpt = repr(itext)[1:-1]
|
||||||
return excerpt
|
return excerpt
|
||||||
|
@ -175,70 +223,99 @@ class XrefLogmsg(ELFDissectStruct, XrelfoJson):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
excerpt = fmt_msg(rex, self.args)
|
excerpt = fmt_msg(rex, self.args)
|
||||||
yield from self._warn_fmt('%s:\n\t"%s",\n\t%s' % (msg, repr(self.fmtstring)[1:-1], excerpt))
|
yield from self._warn_fmt(
|
||||||
|
'%s:\n\t"%s",\n\t%s' % (msg, repr(self.fmtstring)[1:-1], excerpt)
|
||||||
|
)
|
||||||
|
|
||||||
def dump(self):
|
def dump(self):
|
||||||
print('%-60s %s%s %-25s [EC %d] %s' % (
|
print(
|
||||||
'%s:%d %s()' % (self.xref.file, self.xref.line, self.xref.func),
|
"%-60s %s%s %-25s [EC %d] %s"
|
||||||
prios[self.priority & 7],
|
% (
|
||||||
priovals.get(self.priority & 0x30, ' '),
|
"%s:%d %s()" % (self.xref.file, self.xref.line, self.xref.func),
|
||||||
self.xref.xrefdata.uid, self.ec, self.fmtstring))
|
prios[self.priority & 7],
|
||||||
|
priovals.get(self.priority & 0x30, " "),
|
||||||
|
self.xref.xrefdata.uid,
|
||||||
|
self.ec,
|
||||||
|
self.fmtstring,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
def to_dict(self, xrelfo):
|
def to_dict(self, xrelfo):
|
||||||
jsobj = dict([(i, getattr(self.xref, i)) for i in ['file', 'line', 'func']])
|
jsobj = dict([(i, getattr(self.xref, i)) for i in ["file", "line", "func"]])
|
||||||
if self.ec != 0:
|
if self.ec != 0:
|
||||||
jsobj['ec'] = self.ec
|
jsobj["ec"] = self.ec
|
||||||
jsobj['fmtstring'] = self.fmtstring
|
jsobj["fmtstring"] = self.fmtstring
|
||||||
jsobj['args'] = self.args
|
jsobj["args"] = self.args
|
||||||
jsobj['priority'] = self.priority & 7
|
jsobj["priority"] = self.priority & 7
|
||||||
jsobj['type'] = 'logmsg'
|
jsobj["type"] = "logmsg"
|
||||||
jsobj['binary'] = self._elfsect._elfwrap.orig_filename
|
jsobj["binary"] = self._elfsect._elfwrap.orig_filename
|
||||||
|
|
||||||
if self.priority & 0x10:
|
if self.priority & 0x10:
|
||||||
jsobj.setdefault('flags', []).append('errno')
|
jsobj.setdefault("flags", []).append("errno")
|
||||||
if self.priority & 0x20:
|
if self.priority & 0x20:
|
||||||
jsobj.setdefault('flags', []).append('getaddrinfo')
|
jsobj.setdefault("flags", []).append("getaddrinfo")
|
||||||
|
|
||||||
|
xrelfo["refs"].setdefault(self.xref.xrefdata.uid, []).append(jsobj)
|
||||||
|
|
||||||
xrelfo['refs'].setdefault(self.xref.xrefdata.uid, []).append(jsobj)
|
|
||||||
|
|
||||||
Xref.containers[XREFT_LOGMSG] = XrefLogmsg
|
Xref.containers[XREFT_LOGMSG] = XrefLogmsg
|
||||||
|
|
||||||
|
|
||||||
class CmdElement(ELFDissectStruct, XrelfoJson):
|
class CmdElement(ELFDissectStruct, XrelfoJson):
|
||||||
struct = 'cmd_element'
|
struct = "cmd_element"
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
super().__init__(*args, **kwargs)
|
super().__init__(*args, **kwargs)
|
||||||
|
|
||||||
def to_dict(self, xrelfo):
|
def to_dict(self, xrelfo):
|
||||||
jsobj = xrelfo['cli'].setdefault(self.name, {}).setdefault(self._elfsect._elfwrap.orig_filename, {})
|
jsobj = (
|
||||||
|
xrelfo["cli"]
|
||||||
|
.setdefault(self.name, {})
|
||||||
|
.setdefault(self._elfsect._elfwrap.orig_filename, {})
|
||||||
|
)
|
||||||
|
|
||||||
jsobj.update({
|
jsobj.update(
|
||||||
'string': self.string,
|
{
|
||||||
'doc': self.doc,
|
"string": self.string,
|
||||||
})
|
"doc": self.doc,
|
||||||
|
}
|
||||||
|
)
|
||||||
if self.attr:
|
if self.attr:
|
||||||
jsobj['attr'] = attr = self.attr
|
jsobj["attr"] = attr = self.attr
|
||||||
for attrname in CmdAttr.__members__:
|
for attrname in CmdAttr.__members__:
|
||||||
val = CmdAttr[attrname]
|
val = CmdAttr[attrname]
|
||||||
if attr & val:
|
if attr & val:
|
||||||
jsobj.setdefault('attrs', []).append(attrname.lower())
|
jsobj.setdefault("attrs", []).append(attrname.lower())
|
||||||
attr &= ~val
|
attr &= ~val
|
||||||
|
|
||||||
jsobj['defun'] = dict([(i, getattr(self.xref, i)) for i in ['file', 'line', 'func']])
|
jsobj["defun"] = dict(
|
||||||
|
[(i, getattr(self.xref, i)) for i in ["file", "line", "func"]]
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
Xref.containers[XREFT_DEFUN] = CmdElement
|
Xref.containers[XREFT_DEFUN] = CmdElement
|
||||||
|
|
||||||
|
|
||||||
class XrefInstallElement(ELFDissectStruct, XrelfoJson):
|
class XrefInstallElement(ELFDissectStruct, XrelfoJson):
|
||||||
struct = 'xref_install_element'
|
struct = "xref_install_element"
|
||||||
|
|
||||||
def to_dict(self, xrelfo):
|
def to_dict(self, xrelfo):
|
||||||
jsobj = xrelfo['cli'].setdefault(self.cmd_element.name, {}).setdefault(self._elfsect._elfwrap.orig_filename, {})
|
jsobj = (
|
||||||
nodes = jsobj.setdefault('nodes', [])
|
xrelfo["cli"]
|
||||||
|
.setdefault(self.cmd_element.name, {})
|
||||||
|
.setdefault(self._elfsect._elfwrap.orig_filename, {})
|
||||||
|
)
|
||||||
|
nodes = jsobj.setdefault("nodes", [])
|
||||||
|
|
||||||
|
nodes.append(
|
||||||
|
{
|
||||||
|
"node": self.node_type,
|
||||||
|
"install": dict(
|
||||||
|
[(i, getattr(self.xref, i)) for i in ["file", "line", "func"]]
|
||||||
|
),
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
nodes.append({
|
|
||||||
'node': self.node_type,
|
|
||||||
'install': dict([(i, getattr(self.xref, i)) for i in ['file', 'line', 'func']]),
|
|
||||||
})
|
|
||||||
|
|
||||||
Xref.containers[XREFT_INSTALL_ELEMENT] = XrefInstallElement
|
Xref.containers[XREFT_INSTALL_ELEMENT] = XrefInstallElement
|
||||||
|
|
||||||
|
@ -255,86 +332,90 @@ fieldapply()
|
||||||
|
|
||||||
class Xrelfo(dict):
|
class Xrelfo(dict):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__({
|
super().__init__(
|
||||||
'refs': {},
|
{
|
||||||
'cli': {},
|
"refs": {},
|
||||||
})
|
"cli": {},
|
||||||
|
}
|
||||||
|
)
|
||||||
self._xrefs = []
|
self._xrefs = []
|
||||||
|
|
||||||
def load_file(self, filename):
|
def load_file(self, filename):
|
||||||
orig_filename = filename
|
orig_filename = filename
|
||||||
if filename.endswith('.la') or filename.endswith('.lo'):
|
if filename.endswith(".la") or filename.endswith(".lo"):
|
||||||
with open(filename, 'r') as fd:
|
with open(filename, "r") as fd:
|
||||||
for line in fd:
|
for line in fd:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
if line.startswith('#') or line == '' or '=' not in line:
|
if line.startswith("#") or line == "" or "=" not in line:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
var, val = line.split('=', 1)
|
var, val = line.split("=", 1)
|
||||||
if var not in ['library_names', 'pic_object']:
|
if var not in ["library_names", "pic_object"]:
|
||||||
continue
|
continue
|
||||||
if val.startswith("'") or val.startswith('"'):
|
if val.startswith("'") or val.startswith('"'):
|
||||||
val = val[1:-1]
|
val = val[1:-1]
|
||||||
|
|
||||||
if var == 'pic_object':
|
if var == "pic_object":
|
||||||
filename = os.path.join(os.path.dirname(filename), val)
|
filename = os.path.join(os.path.dirname(filename), val)
|
||||||
break
|
break
|
||||||
|
|
||||||
val = val.strip().split()[0]
|
val = val.strip().split()[0]
|
||||||
filename = os.path.join(os.path.dirname(filename), '.libs', val)
|
filename = os.path.join(os.path.dirname(filename), ".libs", val)
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
raise ValueError('could not process libtool file "%s"' % orig_filename)
|
raise ValueError(
|
||||||
|
'could not process libtool file "%s"' % orig_filename
|
||||||
|
)
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
with open(filename, 'rb') as fd:
|
with open(filename, "rb") as fd:
|
||||||
hdr = fd.read(4)
|
hdr = fd.read(4)
|
||||||
|
|
||||||
if hdr == b'\x7fELF':
|
if hdr == b"\x7fELF":
|
||||||
self.load_elf(filename, orig_filename)
|
self.load_elf(filename, orig_filename)
|
||||||
return
|
return
|
||||||
|
|
||||||
if hdr[:2] == b'#!':
|
if hdr[:2] == b"#!":
|
||||||
path, name = os.path.split(filename)
|
path, name = os.path.split(filename)
|
||||||
filename = os.path.join(path, '.libs', name)
|
filename = os.path.join(path, ".libs", name)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if hdr[:1] == b'{':
|
if hdr[:1] == b"{":
|
||||||
with open(filename, 'r') as fd:
|
with open(filename, "r") as fd:
|
||||||
self.load_json(fd)
|
self.load_json(fd)
|
||||||
return
|
return
|
||||||
|
|
||||||
raise ValueError('cannot determine file type for %s' % (filename))
|
raise ValueError("cannot determine file type for %s" % (filename))
|
||||||
|
|
||||||
def load_elf(self, filename, orig_filename):
|
def load_elf(self, filename, orig_filename):
|
||||||
edf = ELFDissectFile(filename)
|
edf = ELFDissectFile(filename)
|
||||||
edf.orig_filename = orig_filename
|
edf.orig_filename = orig_filename
|
||||||
|
|
||||||
note = edf._elffile.find_note('FRRouting', 'XREF')
|
note = edf._elffile.find_note("FRRouting", "XREF")
|
||||||
if note is not None:
|
if note is not None:
|
||||||
endian = '>' if edf._elffile.bigendian else '<'
|
endian = ">" if edf._elffile.bigendian else "<"
|
||||||
mem = edf._elffile[note]
|
mem = edf._elffile[note]
|
||||||
if edf._elffile.elfclass == 64:
|
if edf._elffile.elfclass == 64:
|
||||||
start, end = struct.unpack(endian + 'QQ', mem)
|
start, end = struct.unpack(endian + "QQ", mem)
|
||||||
start += note.start
|
start += note.start
|
||||||
end += note.start + 8
|
end += note.start + 8
|
||||||
else:
|
else:
|
||||||
start, end = struct.unpack(endian + 'II', mem)
|
start, end = struct.unpack(endian + "II", mem)
|
||||||
start += note.start
|
start += note.start
|
||||||
end += note.start + 4
|
end += note.start + 4
|
||||||
|
|
||||||
ptrs = edf.iter_data(XrefPtr, slice(start, end))
|
ptrs = edf.iter_data(XrefPtr, slice(start, end))
|
||||||
|
|
||||||
else:
|
else:
|
||||||
xrefarray = edf.get_section('xref_array')
|
xrefarray = edf.get_section("xref_array")
|
||||||
if xrefarray is None:
|
if xrefarray is None:
|
||||||
raise ValueError('file has neither xref note nor xref_array section')
|
raise ValueError("file has neither xref note nor xref_array section")
|
||||||
|
|
||||||
ptrs = xrefarray.iter_data(XrefPtr)
|
ptrs = xrefarray.iter_data(XrefPtr)
|
||||||
|
|
||||||
for ptr in ptrs:
|
for ptr in ptrs:
|
||||||
if ptr.xref is None:
|
if ptr.xref is None:
|
||||||
print('NULL xref')
|
print("NULL xref")
|
||||||
continue
|
continue
|
||||||
self._xrefs.append(ptr.xref)
|
self._xrefs.append(ptr.xref)
|
||||||
|
|
||||||
|
@ -347,15 +428,15 @@ class Xrelfo(dict):
|
||||||
|
|
||||||
def load_json(self, fd):
|
def load_json(self, fd):
|
||||||
data = json.load(fd)
|
data = json.load(fd)
|
||||||
for uid, items in data['refs'].items():
|
for uid, items in data["refs"].items():
|
||||||
myitems = self['refs'].setdefault(uid, [])
|
myitems = self["refs"].setdefault(uid, [])
|
||||||
for item in items:
|
for item in items:
|
||||||
if item in myitems:
|
if item in myitems:
|
||||||
continue
|
continue
|
||||||
myitems.append(item)
|
myitems.append(item)
|
||||||
|
|
||||||
for cmd, items in data['cli'].items():
|
for cmd, items in data["cli"].items():
|
||||||
self['cli'].setdefault(cmd, {}).update(items)
|
self["cli"].setdefault(cmd, {}).update(items)
|
||||||
|
|
||||||
return data
|
return data
|
||||||
|
|
||||||
|
@ -363,24 +444,33 @@ class Xrelfo(dict):
|
||||||
for xref in self._xrefs:
|
for xref in self._xrefs:
|
||||||
yield from xref.check(checks)
|
yield from xref.check(checks)
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
argp = argparse.ArgumentParser(description = 'FRR xref ELF extractor')
|
argp = argparse.ArgumentParser(description="FRR xref ELF extractor")
|
||||||
argp.add_argument('-o', dest='output', type=str, help='write JSON output')
|
argp.add_argument("-o", dest="output", type=str, help="write JSON output")
|
||||||
argp.add_argument('--out-by-file', type=str, help='write by-file JSON output')
|
argp.add_argument("--out-by-file", type=str, help="write by-file JSON output")
|
||||||
argp.add_argument('-c', dest='vtysh_cmds', type=str, help='write vtysh_cmd.c')
|
argp.add_argument("-c", dest="vtysh_cmds", type=str, help="write vtysh_cmd.c")
|
||||||
argp.add_argument('-Wlog-format', action='store_const', const=True)
|
argp.add_argument("-Wlog-format", action="store_const", const=True)
|
||||||
argp.add_argument('-Wlog-args', action='store_const', const=True)
|
argp.add_argument("-Wlog-args", action="store_const", const=True)
|
||||||
argp.add_argument('-Werror', action='store_const', const=True)
|
argp.add_argument("-Werror", action="store_const", const=True)
|
||||||
argp.add_argument('--profile', action='store_const', const=True)
|
argp.add_argument("--profile", action="store_const", const=True)
|
||||||
argp.add_argument('binaries', metavar='BINARY', nargs='+', type=str, help='files to read (ELF files or libtool objects)')
|
argp.add_argument(
|
||||||
|
"binaries",
|
||||||
|
metavar="BINARY",
|
||||||
|
nargs="+",
|
||||||
|
type=str,
|
||||||
|
help="files to read (ELF files or libtool objects)",
|
||||||
|
)
|
||||||
args = argp.parse_args()
|
args = argp.parse_args()
|
||||||
|
|
||||||
if args.profile:
|
if args.profile:
|
||||||
import cProfile
|
import cProfile
|
||||||
cProfile.runctx('_main(args)', globals(), {'args': args}, sort='cumtime')
|
|
||||||
|
cProfile.runctx("_main(args)", globals(), {"args": args}, sort="cumtime")
|
||||||
else:
|
else:
|
||||||
_main(args)
|
_main(args)
|
||||||
|
|
||||||
|
|
||||||
def _main(args):
|
def _main(args):
|
||||||
errors = 0
|
errors = 0
|
||||||
xrelfo = Xrelfo()
|
xrelfo = Xrelfo()
|
||||||
|
@ -390,60 +480,59 @@ def _main(args):
|
||||||
xrelfo.load_file(fn)
|
xrelfo.load_file(fn)
|
||||||
except:
|
except:
|
||||||
errors += 1
|
errors += 1
|
||||||
sys.stderr.write('while processing %s:\n' % (fn))
|
sys.stderr.write("while processing %s:\n" % (fn))
|
||||||
traceback.print_exc()
|
traceback.print_exc()
|
||||||
|
|
||||||
for option in dir(args):
|
for option in dir(args):
|
||||||
if option.startswith('W') and option != 'Werror':
|
if option.startswith("W") and option != "Werror":
|
||||||
checks = sorted(xrelfo.check(args))
|
checks = sorted(xrelfo.check(args))
|
||||||
sys.stderr.write(''.join([c[-1] for c in checks]))
|
sys.stderr.write("".join([c[-1] for c in checks]))
|
||||||
|
|
||||||
if args.Werror and len(checks) > 0:
|
if args.Werror and len(checks) > 0:
|
||||||
errors += 1
|
errors += 1
|
||||||
break
|
break
|
||||||
|
|
||||||
|
refs = xrelfo["refs"]
|
||||||
refs = xrelfo['refs']
|
|
||||||
|
|
||||||
counts = {}
|
counts = {}
|
||||||
for k, v in refs.items():
|
for k, v in refs.items():
|
||||||
strs = set([i['fmtstring'] for i in v])
|
strs = set([i["fmtstring"] for i in v])
|
||||||
if len(strs) != 1:
|
if len(strs) != 1:
|
||||||
print('\033[31;1m%s\033[m' % k)
|
print("\033[31;1m%s\033[m" % k)
|
||||||
counts[k] = len(v)
|
counts[k] = len(v)
|
||||||
|
|
||||||
out = xrelfo
|
out = xrelfo
|
||||||
outbyfile = {}
|
outbyfile = {}
|
||||||
for uid, locs in refs.items():
|
for uid, locs in refs.items():
|
||||||
for loc in locs:
|
for loc in locs:
|
||||||
filearray = outbyfile.setdefault(loc['file'], [])
|
filearray = outbyfile.setdefault(loc["file"], [])
|
||||||
loc = dict(loc)
|
loc = dict(loc)
|
||||||
del loc['file']
|
del loc["file"]
|
||||||
filearray.append(loc)
|
filearray.append(loc)
|
||||||
|
|
||||||
for k in outbyfile.keys():
|
for k in outbyfile.keys():
|
||||||
outbyfile[k] = sorted(outbyfile[k], key=lambda x: x['line'])
|
outbyfile[k] = sorted(outbyfile[k], key=lambda x: x["line"])
|
||||||
|
|
||||||
if errors:
|
if errors:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if args.output:
|
if args.output:
|
||||||
with open(args.output + '.tmp', 'w') as fd:
|
with open(args.output + ".tmp", "w") as fd:
|
||||||
json.dump(out, fd, indent=2, sort_keys=True, **json_dump_args)
|
json.dump(out, fd, indent=2, sort_keys=True, **json_dump_args)
|
||||||
os.rename(args.output + '.tmp', args.output)
|
os.rename(args.output + ".tmp", args.output)
|
||||||
|
|
||||||
if args.out_by_file:
|
if args.out_by_file:
|
||||||
with open(args.out_by_file + '.tmp', 'w') as fd:
|
with open(args.out_by_file + ".tmp", "w") as fd:
|
||||||
json.dump(outbyfile, fd, indent=2, sort_keys=True, **json_dump_args)
|
json.dump(outbyfile, fd, indent=2, sort_keys=True, **json_dump_args)
|
||||||
os.rename(args.out_by_file + '.tmp', args.out_by_file)
|
os.rename(args.out_by_file + ".tmp", args.out_by_file)
|
||||||
|
|
||||||
if args.vtysh_cmds:
|
if args.vtysh_cmds:
|
||||||
with open(args.vtysh_cmds + '.tmp', 'w') as fd:
|
with open(args.vtysh_cmds + ".tmp", "w") as fd:
|
||||||
CommandEntry.run(out, fd)
|
CommandEntry.run(out, fd)
|
||||||
os.rename(args.vtysh_cmds + '.tmp', args.vtysh_cmds)
|
os.rename(args.vtysh_cmds + ".tmp", args.vtysh_cmds)
|
||||||
if args.Werror and CommandEntry.warn_counter:
|
if args.Werror and CommandEntry.warn_counter:
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == "__main__":
|
||||||
main()
|
main()
|
||||||
|
|
Loading…
Reference in a new issue