diff --git a/python/callgraph-dot.py b/python/callgraph-dot.py index f80766a080..7d9825bd10 100644 --- a/python/callgraph-dot.py +++ b/python/callgraph-dot.py @@ -321,15 +321,31 @@ extra_info = { "lsp_processq_complete", ], # zebra - main WQ - ("mq_add_handler", "work_queue_add"): ["meta_queue_process",], - ("meta_queue_process", "work_queue_add"): ["meta_queue_process",], + ("mq_add_handler", "work_queue_add"): [ + "meta_queue_process", + ], + ("meta_queue_process", "work_queue_add"): [ + "meta_queue_process", + ], # bgpd - label pool WQ - ("bgp_lp_get", "work_queue_add"): ["lp_cbq_docallback",], - ("bgp_lp_event_chunk", "work_queue_add"): ["lp_cbq_docallback",], - ("bgp_lp_event_zebra_up", "work_queue_add"): ["lp_cbq_docallback",], + ("bgp_lp_get", "work_queue_add"): [ + "lp_cbq_docallback", + ], + ("bgp_lp_event_chunk", "work_queue_add"): [ + "lp_cbq_docallback", + ], + ("bgp_lp_event_zebra_up", "work_queue_add"): [ + "lp_cbq_docallback", + ], # bgpd - main WQ - ("bgp_process", "work_queue_add"): ["bgp_process_wq", "bgp_processq_del",], - ("bgp_add_eoiu_mark", "work_queue_add"): ["bgp_process_wq", "bgp_processq_del",], + ("bgp_process", "work_queue_add"): [ + "bgp_process_wq", + "bgp_processq_del", + ], + ("bgp_add_eoiu_mark", "work_queue_add"): [ + "bgp_process_wq", + "bgp_processq_del", + ], # clear node WQ ("bgp_clear_route_table", "work_queue_add"): [ "bgp_clear_route_node", @@ -337,7 +353,9 @@ extra_info = { "bgp_clear_node_complete", ], # rfapi WQs - ("rfapi_close", "work_queue_add"): ["rfapi_deferred_close_workfunc",], + ("rfapi_close", "work_queue_add"): [ + "rfapi_deferred_close_workfunc", + ], ("rfapiRibUpdatePendingNode", "work_queue_add"): [ "rfapiRibDoQueuedCallback", "rfapiRibQueueItemDelete", diff --git a/python/clippy/__init__.py b/python/clippy/__init__.py index 281e2bb3c6..7c73598e5d 100644 --- a/python/clippy/__init__.py +++ b/python/clippy/__init__.py @@ -36,7 +36,10 @@ from _clippy import ( ) -frr_top_src = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +frr_top_src = os.path.dirname( + os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +) + def graph_iterate(graph): """iterator yielding all nodes of a graph diff --git a/python/clippy/elf.py b/python/clippy/elf.py index 02cb2e38b3..5aaba832a9 100644 --- a/python/clippy/elf.py +++ b/python/clippy/elf.py @@ -16,7 +16,7 @@ # with this program; see the file COPYING; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -''' +""" Wrapping layer and additional utility around _clippy.ELFFile. Essentially, the C bits have the low-level ELF access bits that should be @@ -28,7 +28,7 @@ across architecture, word size and even endianness boundaries. Both the C module (through GElf_*) and this code (cf. struct.unpack format mangling in ELFDissectStruct) will take appropriate measures to flip and resize fields as needed. -''' +""" import struct from collections import OrderedDict @@ -40,16 +40,18 @@ from _clippy import ELFFile, ELFAccessError # data access # + class ELFNull(object): - ''' + """ NULL pointer, returned instead of ELFData - ''' + """ + def __init__(self): self.symname = None self._dstsect = None def __repr__(self): - return '' + return "" def __hash__(self): return hash(None) @@ -57,33 +59,37 @@ class ELFNull(object): def get_string(self): return None + class ELFUnresolved(object): - ''' + """ Reference to an unresolved external symbol, returned instead of ELFData :param symname: name of the referenced symbol :param addend: offset added to the symbol, normally zero - ''' + """ + def __init__(self, symname, addend): self.addend = addend self.symname = symname self._dstsect = None def __repr__(self): - return '' % (self.symname, self.addend) + return "" % (self.symname, self.addend) def __hash__(self): return hash((self.symname, self.addend)) + class ELFData(object): - ''' + """ Actual data somewhere in the ELF file. :type dstsect: ELFSubset :param dstsect: container data area (section or entire file) :param dstoffs: byte offset into dstsect :param dstlen: byte size of object, or None if unknown, open-ended or string - ''' + """ + def __init__(self, dstsect, dstoffs, dstlen): self._dstsect = dstsect self._dstoffs = dstoffs @@ -91,62 +97,74 @@ class ELFData(object): self.symname = None def __repr__(self): - return '' % (self._dstsect.name, self._dstoffs, self._dstlen or -1) + return "" % ( + self._dstsect.name, + self._dstoffs, + self._dstlen or -1, + ) def __hash__(self): return hash((self._dstsect, self._dstoffs)) def get_string(self): - ''' + """ Interpret as C string / null terminated UTF-8 and get the actual text. - ''' + """ try: - return self._dstsect[self._dstoffs:str].decode('UTF-8') + return self._dstsect[self._dstoffs : str].decode("UTF-8") except: - import pdb; pdb.set_trace() + import pdb + + pdb.set_trace() def get_data(self, reflen): - ''' + """ Interpret as some structure (and check vs. expected length) :param reflen: expected size of the object, compared against actual size (which is only known in rare cases, mostly when directly accessing a symbol since symbols have their destination object size recorded) - ''' + """ if self._dstlen is not None and self._dstlen != reflen: - raise ValueError('symbol size mismatch (got %d, expected %d)' % (self._dstlen, reflen)) - return self._dstsect[self._dstoffs:self._dstoffs+reflen] + raise ValueError( + "symbol size mismatch (got %d, expected %d)" % (self._dstlen, reflen) + ) + return self._dstsect[self._dstoffs : self._dstoffs + reflen] def offset(self, offs, within_symbol=False): - ''' + """ Get another ELFData at an offset :param offs: byte offset, can be negative (e.g. in container_of) :param within_symbol: retain length information - ''' + """ if self._dstlen is None or not within_symbol: return ELFData(self._dstsect, self._dstoffs + offs, None) else: return ELFData(self._dstsect, self._dstoffs + offs, self._dstlen - offs) + # # dissection data items # + class ELFDissectData(object): - ''' + """ Common bits for ELFDissectStruct and ELFDissectUnion - ''' + """ def __len__(self): - ''' + """ Used for boolean evaluation, e.g. "if struct: ..." - ''' - return not (isinstance(self._data, ELFNull) or isinstance(self._data, ELFUnresolved)) + """ + return not ( + isinstance(self._data, ELFNull) or isinstance(self._data, ELFUnresolved) + ) def container_of(self, parent, fieldname): - ''' + """ Assume this struct is embedded in a larger struct and get at the larger Python ``self.container_of(a, b)`` = C ``container_of(self, a, b)`` @@ -154,25 +172,26 @@ class ELFDissectData(object): :param parent: class (not instance) of the larger struct :param fieldname: fieldname that refers back to this :returns: instance of parent, with fieldname set to this object - ''' + """ offset = 0 - if not hasattr(parent, '_efields'): + if not hasattr(parent, "_efields"): parent._setup_efields() for field in parent._efields[self.elfclass]: if field[0] == fieldname: break spec = field[1] - if spec == 'P': - spec = 'I' if self.elfclass == 32 else 'Q' + if spec == "P": + spec = "I" if self.elfclass == 32 else "Q" offset += struct.calcsize(spec) else: - raise AttributeError('%r not found in %r.fields' % (fieldname, parent)) + raise AttributeError("%r not found in %r.fields" % (fieldname, parent)) + + return parent(self._data.offset(-offset), replace={fieldname: self}) - return parent(self._data.offset(-offset), replace = {fieldname: self}) class ELFDissectStruct(ELFDissectData): - ''' + """ Decode and provide access to a struct somewhere in the ELF file Handles pointers and strings somewhat nicely. Create a subclass for each @@ -205,30 +224,31 @@ class ELFDissectStruct(ELFDissectData): .. attribute:: fieldrename Dictionary to rename fields, useful if fields comes from tiabwarfo.py. - ''' + """ class Pointer(object): - ''' + """ Quick wrapper for pointers to further structs This is just here to avoid going into infinite loops when loading structs that have pointers to each other (e.g. struct xref <--> struct xrefdata.) The pointer destination is only instantiated when actually accessed. - ''' + """ + def __init__(self, cls, ptr): self.cls = cls self.ptr = ptr def __repr__(self): - return '' % (self.cls.__name__, self.ptr) + return "" % (self.cls.__name__, self.ptr) def __call__(self): if isinstance(self.ptr, ELFNull): return None return self.cls(self.ptr) - def __new__(cls, dataptr, parent = None, replace = None): + def __new__(cls, dataptr, parent=None, replace=None): if dataptr._dstsect is None: return super().__new__(cls) @@ -239,19 +259,19 @@ class ELFDissectStruct(ELFDissectData): dataptr._dstsect._pointers[(cls, dataptr)] = obj return obj - replacements = 'lLnN' + replacements = "lLnN" @classmethod def _preproc_structspec(cls, elfclass, spec): elfbits = elfclass - if hasattr(spec, 'calcsize'): - spec = '%ds' % (spec.calcsize(elfclass),) + if hasattr(spec, "calcsize"): + spec = "%ds" % (spec.calcsize(elfclass),) if elfbits == 32: - repl = ['i', 'I'] + repl = ["i", "I"] else: - repl = ['q', 'Q'] + repl = ["q", "Q"] for c in cls.replacements: spec = spec.replace(c, repl[int(c.isupper())]) return spec @@ -269,8 +289,8 @@ class ELFDissectStruct(ELFDissectData): size += struct.calcsize(newf[1]) cls._esize[elfclass] = size - def __init__(self, dataptr, parent = None, replace = None): - if not hasattr(self.__class__, '_efields'): + def __init__(self, dataptr, parent=None, replace=None): + if not hasattr(self.__class__, "_efields"): self._setup_efields() self._fdata = None @@ -290,12 +310,12 @@ class ELFDissectStruct(ELFDissectData): # need to correlate output from struct.unpack with extra metadata # about the particular fields, so note down byte offsets (in locs) # and tuple indices of pointers (in ptrs) - pspec = '' + pspec = "" locs = {} ptrs = set() for idx, spec in enumerate(pspecl): - if spec == 'P': + if spec == "P": ptrs.add(idx) spec = self._elfsect.ptrtype @@ -326,7 +346,9 @@ class ELFDissectStruct(ELFDissectData): self._fdata[name] = replace[name] continue - if isinstance(self.fields[i][1], type) and issubclass(self.fields[i][1], ELFDissectData): + if isinstance(self.fields[i][1], type) and issubclass( + self.fields[i][1], ELFDissectData + ): dataobj = self.fields[i][1](dataptr.offset(locs[i]), self) self._fdata[name] = dataobj continue @@ -353,35 +375,39 @@ class ELFDissectStruct(ELFDissectData): def __repr__(self): if not isinstance(self._data, ELFData): - return '<%s: %r>' % (self.__class__.__name__, self._data) - return '<%s: %s>' % (self.__class__.__name__, - ', '.join(['%s=%r' % t for t in self._fdata.items()])) + return "<%s: %r>" % (self.__class__.__name__, self._data) + return "<%s: %s>" % ( + self.__class__.__name__, + ", ".join(["%s=%r" % t for t in self._fdata.items()]), + ) @classmethod def calcsize(cls, elfclass): - ''' + """ Sum up byte size of this struct Wraps struct.calcsize with some extra features. - ''' - if not hasattr(cls, '_efields'): + """ + if not hasattr(cls, "_efields"): cls._setup_efields() - pspec = ''.join([f[1] for f in cls._efields[elfclass]]) + pspec = "".join([f[1] for f in cls._efields[elfclass]]) - ptrtype = 'I' if elfclass == 32 else 'Q' - pspec = pspec.replace('P', ptrtype) + ptrtype = "I" if elfclass == 32 else "Q" + pspec = pspec.replace("P", ptrtype) return struct.calcsize(pspec) + class ELFDissectUnion(ELFDissectData): - ''' + """ Decode multiple structs in the same place. Not currently used (and hence not tested.) Worked at some point but not needed anymore and may be borked now. Remove this comment when using. - ''' - def __init__(self, dataptr, parent = None): + """ + + def __init__(self, dataptr, parent=None): self._dataptr = dataptr self._parent = parent self.members = [] @@ -391,20 +417,25 @@ class ELFDissectUnion(ELFDissectData): setattr(self, name, item) def __repr__(self): - return '<%s: %s>' % (self.__class__.__name__, ', '.join([repr(i) for i in self.members])) + return "<%s: %s>" % ( + self.__class__.__name__, + ", ".join([repr(i) for i in self.members]), + ) @classmethod def calcsize(cls, elfclass): return max([member.calcsize(elfclass) for name, member in cls.members]) + # # wrappers for spans of ELF data # + class ELFSubset(object): - ''' + """ Common abstract base for section-level and file-level access. - ''' + """ def __init__(self): super().__init__() @@ -415,7 +446,7 @@ class ELFSubset(object): return hash(self.name) def __getitem__(self, k): - ''' + """ Read data from slice Subscript **must** be a slice; a simple index will not return a byte @@ -425,22 +456,22 @@ class ELFSubset(object): - `this[123:456]` - extract specific range - `this[123:str]` - extract until null byte. The slice stop value is the `str` type (or, technically, `unicode`.) - ''' + """ return self._obj[k] def getreloc(self, offset): - ''' + """ Check for a relocation record at the specified offset. - ''' + """ return self._obj.getreloc(offset) - def iter_data(self, scls, slice_ = slice(None)): - ''' + def iter_data(self, scls, slice_=slice(None)): + """ Assume an array of structs present at a particular slice and decode :param scls: ELFDissectData subclass for the struct :param slice_: optional range specification - ''' + """ size = scls.calcsize(self._elffile.elfclass) offset = slice_.start or 0 @@ -453,7 +484,7 @@ class ELFSubset(object): offset += size def pointer(self, offset): - ''' + """ Try to dereference a pointer value This checks whether there's a relocation at the given offset and @@ -463,10 +494,12 @@ class ELFSubset(object): :param offset: byte offset from beginning of section, or virtual address in file :returns: ELFData wrapping pointed-to object - ''' + """ ptrsize = struct.calcsize(self.ptrtype) - data = struct.unpack(self.endian + self.ptrtype, self[offset:offset + ptrsize])[0] + data = struct.unpack( + self.endian + self.ptrtype, self[offset : offset + ptrsize] + )[0] reloc = self.getreloc(offset) dstsect = None @@ -497,14 +530,15 @@ class ELFSubset(object): # wrap_data is different between file & section return self._wrap_data(data, dstsect) + class ELFDissectSection(ELFSubset): - ''' + """ Access the contents of an ELF section like ``.text`` or ``.data`` :param elfwrap: ELFDissectFile wrapper for the file :param idx: section index in section header table :param section: section object from C module - ''' + """ def __init__(self, elfwrap, idx, section): super().__init__() @@ -524,8 +558,9 @@ class ELFDissectSection(ELFSubset): dstsect = self._elfwrap.get_section(dstsect.idx) return ELFData(dstsect, offs, None) + class ELFDissectFile(ELFSubset): - ''' + """ Access the contents of an ELF file. Note that offsets for array subscript and relocation/pointer access are @@ -537,7 +572,7 @@ class ELFDissectFile(ELFSubset): address like 0x400000 on x86. :param filename: ELF file to open - ''' + """ def __init__(self, filename): super().__init__() @@ -546,8 +581,8 @@ class ELFDissectFile(ELFSubset): self._elffile = self._obj = ELFFile(filename) self._sections = {} - self.ptrtype = 'I' if self._elffile.elfclass == 32 else 'Q' - self.endian = '>' if self._elffile.bigendian else '<' + self.ptrtype = "I" if self._elffile.elfclass == 32 else "Q" + self.endian = ">" if self._elffile.bigendian else "<" @property def _elfwrap(self): @@ -557,9 +592,9 @@ class ELFDissectFile(ELFSubset): return ELFData(self, data, None) def get_section(self, secname): - ''' + """ Look up section by name or index - ''' + """ if isinstance(secname, int): sh_idx = secname section = self._elffile.get_section_idx(secname) diff --git a/python/clippy/uidhash.py b/python/clippy/uidhash.py index bf994d389e..0fd886221a 100644 --- a/python/clippy/uidhash.py +++ b/python/clippy/uidhash.py @@ -19,13 +19,14 @@ import struct from hashlib import sha256 -def bititer(data, bits, startbit = True): - ''' + +def bititer(data, bits, startbit=True): + """ just iterate the individual bits out from a bytes object if startbit is True, an '1' bit is inserted at the very beginning goes at a time, starts at LSB. - ''' + """ bitavail, v = 0, 0 if startbit and len(data) > 0: v = data.pop(0) @@ -41,31 +42,33 @@ def bititer(data, bits, startbit = True): bitavail -= bits v >>= bits + def base32c(data): - ''' + """ Crockford base32 with extra dashes - ''' + """ chs = "0123456789ABCDEFGHJKMNPQRSTVWXYZ" - o = '' + o = "" if type(data) == str: data = [ord(v) for v in data] else: data = list(data) for i, bits in enumerate(bititer(data, 5)): if i == 5: - o = o + '-' + o = o + "-" elif i == 10: break o = o + chs[bits] return o -def uidhash(filename, hashstr, hashu32a, hashu32b): - ''' - xref Unique ID hash used in FRRouting - ''' - filename = '/'.join(filename.rsplit('/')[-2:]) - hdata = filename.encode('UTF-8') + hashstr.encode('UTF-8') - hdata += struct.pack('>II', hashu32a, hashu32b) +def uidhash(filename, hashstr, hashu32a, hashu32b): + """ + xref Unique ID hash used in FRRouting + """ + filename = "/".join(filename.rsplit("/")[-2:]) + + hdata = filename.encode("UTF-8") + hashstr.encode("UTF-8") + hdata += struct.pack(">II", hashu32a, hashu32b) i = sha256(hdata).digest() return base32c(i) diff --git a/python/makefile.py b/python/makefile.py index 7a682615ef..573871fb68 100644 --- a/python/makefile.py +++ b/python/makefile.py @@ -161,7 +161,10 @@ for clippy_file in clippy_scan: # combine daemon .xref files into frr.xref out_lines.append("") xref_targets = [ - target for target in xref_targets if target not in [ + target + for target in xref_targets + if target + not in [ "bgpd/rfp-example/rfptest/rfptest", "pimd/mtracebis", "tools/ssd", diff --git a/python/runtests.py b/python/runtests.py index bcf650b329..70deaa35d0 100644 --- a/python/runtests.py +++ b/python/runtests.py @@ -5,9 +5,11 @@ import os try: import _clippy except ImportError: - sys.stderr.write('''these tests need to be run with the _clippy C extension + sys.stderr.write( + """these tests need to be run with the _clippy C extension module available. Try running "clippy runtests.py ...". -''') +""" + ) sys.exit(1) os.chdir(os.path.dirname(os.path.abspath(__file__))) diff --git a/python/test_xrelfo.py b/python/test_xrelfo.py index 3ae24ea7b3..3379959dc1 100644 --- a/python/test_xrelfo.py +++ b/python/test_xrelfo.py @@ -22,20 +22,21 @@ import pytest from pprint import pprint root = os.path.dirname(os.path.dirname(__file__)) -sys.path.append(os.path.join(root, 'python')) +sys.path.append(os.path.join(root, "python")) import xrelfo from clippy import elf, uidhash + def test_uidhash(): - assert uidhash.uidhash("lib/test_xref.c", "logging call", 3, 0) \ - == 'H7KJB-67TBH' + assert uidhash.uidhash("lib/test_xref.c", "logging call", 3, 0) == "H7KJB-67TBH" + def test_xrelfo_other(): for data in [ - elf.ELFNull(), - elf.ELFUnresolved('somesym', 0), - ]: + elf.ELFNull(), + elf.ELFUnresolved("somesym", 0), + ]: dissect = xrelfo.XrefPtr(data) print(repr(dissect)) @@ -43,9 +44,10 @@ def test_xrelfo_other(): with pytest.raises(AttributeError): dissect.xref + def test_xrelfo_obj(): xrelfo_ = xrelfo.Xrelfo() - edf = xrelfo_.load_elf(os.path.join(root, 'lib/.libs/zclient.o'), 'zclient.lo') + edf = xrelfo_.load_elf(os.path.join(root, "lib/.libs/zclient.o"), "zclient.lo") xrefs = xrelfo_._xrefs with pytest.raises(elf.ELFAccessError): @@ -54,12 +56,13 @@ def test_xrelfo_obj(): pprint(xrefs[0]) pprint(xrefs[0]._data) + def test_xrelfo_bin(): xrelfo_ = xrelfo.Xrelfo() - edf = xrelfo_.load_elf(os.path.join(root, 'lib/.libs/libfrr.so'), 'libfrr.la') + edf = xrelfo_.load_elf(os.path.join(root, "lib/.libs/libfrr.so"), "libfrr.la") xrefs = xrelfo_._xrefs - assert edf[0:4] == b'\x7fELF' + assert edf[0:4] == b"\x7fELF" pprint(xrefs[0]) pprint(xrefs[0]._data) diff --git a/python/tiabwarfo.py b/python/tiabwarfo.py index 4a6cd6ad77..b19c756738 100644 --- a/python/tiabwarfo.py +++ b/python/tiabwarfo.py @@ -23,10 +23,19 @@ import re import argparse import json -structs = ['xref', 'xref_logmsg', 'xref_threadsched', 'xref_install_element', 'xrefdata', 'xrefdata_logmsg', 'cmd_element'] +structs = [ + "xref", + "xref_logmsg", + "xref_threadsched", + "xref_install_element", + "xrefdata", + "xrefdata_logmsg", + "cmd_element", +] -def extract(filename='lib/.libs/libfrr.so'): - ''' + +def extract(filename="lib/.libs/libfrr.so"): + """ Convert output from "pahole" to JSON. Example pahole output: @@ -41,26 +50,30 @@ def extract(filename='lib/.libs/libfrr.so'): /* size: 32, cachelines: 1, members: 5 */ /* last cacheline: 32 bytes */ }; - ''' - pahole = subprocess.check_output(['pahole', '-C', ','.join(structs), filename]).decode('UTF-8') + """ + pahole = subprocess.check_output( + ["pahole", "-C", ",".join(structs), filename] + ).decode("UTF-8") - struct_re = re.compile(r'^struct ([^ ]+) \{([^\}]+)};', flags=re.M | re.S) - field_re = re.compile(r'^\s*(?P[^;\(]+)\s+(?P[^;\[\]]+)(?:\[(?P\d+)\])?;\s*\/\*(?P.*)\*\/\s*$') - comment_re = re.compile(r'^\s*\/\*.*\*\/\s*$') + struct_re = re.compile(r"^struct ([^ ]+) \{([^\}]+)};", flags=re.M | re.S) + field_re = re.compile( + r"^\s*(?P[^;\(]+)\s+(?P[^;\[\]]+)(?:\[(?P\d+)\])?;\s*\/\*(?P.*)\*\/\s*$" + ) + comment_re = re.compile(r"^\s*\/\*.*\*\/\s*$") pastructs = struct_re.findall(pahole) out = {} for sname, data in pastructs: this = out.setdefault(sname, {}) - fields = this.setdefault('fields', []) + fields = this.setdefault("fields", []) lines = data.strip().splitlines() next_offs = 0 for line in lines: - if line.strip() == '': + if line.strip() == "": continue m = comment_re.match(line) if m is not None: @@ -68,51 +81,55 @@ def extract(filename='lib/.libs/libfrr.so'): m = field_re.match(line) if m is not None: - offs, size = m.group('comment').strip().split() + offs, size = m.group("comment").strip().split() offs = int(offs) size = int(size) - typ_ = m.group('type').strip() - name = m.group('name') + typ_ = m.group("type").strip() + name = m.group("name") - if name.startswith('(*'): + if name.startswith("(*"): # function pointer - typ_ = typ_ + ' *' - name = name[2:].split(')')[0] + typ_ = typ_ + " *" + name = name[2:].split(")")[0] data = { - 'name': name, - 'type': typ_, - # 'offset': offs, - # 'size': size, + "name": name, + "type": typ_, + # 'offset': offs, + # 'size': size, } - if m.group('array'): - data['array'] = int(m.group('array')) + if m.group("array"): + data["array"] = int(m.group("array")) fields.append(data) if offs != next_offs: - raise ValueError('%d padding bytes before struct %s.%s' % (offs - next_offs, sname, name)) + raise ValueError( + "%d padding bytes before struct %s.%s" + % (offs - next_offs, sname, name) + ) next_offs = offs + size continue - raise ValueError('cannot process line: %s' % line) + raise ValueError("cannot process line: %s" % line) return out + class FieldApplicator(object): - ''' + """ Fill ELFDissectStruct fields list from pahole/JSON Uses the JSON file created by the above code to fill in the struct fields in subclasses of ELFDissectStruct. - ''' + """ # only what we really need. add more as needed. packtypes = { - 'int': 'i', - 'uint8_t': 'B', - 'uint16_t': 'H', - 'uint32_t': 'I', - 'char': 's', + "int": "i", + "uint8_t": "B", + "uint16_t": "H", + "uint32_t": "I", + "char": "s", } def __init__(self, data): @@ -126,60 +143,65 @@ class FieldApplicator(object): def resolve(self, cls): out = [] - #offset = 0 + # offset = 0 + + fieldrename = getattr(cls, "fieldrename", {}) - fieldrename = getattr(cls, 'fieldrename', {}) def mkname(n): return (fieldrename.get(n, n),) - for field in self.data[cls.struct]['fields']: - typs = field['type'].split() - typs = [i for i in typs if i not in ['const']] + for field in self.data[cls.struct]["fields"]: + typs = field["type"].split() + typs = [i for i in typs if i not in ["const"]] # this will break reuse of xrefstructs.json across 32bit & 64bit # platforms - #if field['offset'] != offset: + # if field['offset'] != offset: # assert offset < field['offset'] # out.append(('_pad', '%ds' % (field['offset'] - offset,))) # pretty hacky C types handling, but covers what we need ptrlevel = 0 - while typs[-1] == '*': + while typs[-1] == "*": typs.pop(-1) ptrlevel += 1 if ptrlevel > 0: - packtype = ('P', None) + packtype = ("P", None) if ptrlevel == 1: - if typs[0] == 'char': - packtype = ('P', str) - elif typs[0] == 'struct' and typs[1] in self.clsmap: - packtype = ('P', self.clsmap[typs[1]]) - elif typs[0] == 'enum': - packtype = ('I',) + if typs[0] == "char": + packtype = ("P", str) + elif typs[0] == "struct" and typs[1] in self.clsmap: + packtype = ("P", self.clsmap[typs[1]]) + elif typs[0] == "enum": + packtype = ("I",) elif typs[0] in self.packtypes: packtype = (self.packtypes[typs[0]],) - elif typs[0] == 'struct': + elif typs[0] == "struct": if typs[1] in self.clsmap: packtype = (self.clsmap[typs[1]],) else: - raise ValueError('embedded struct %s not in extracted data' % (typs[1],)) + raise ValueError( + "embedded struct %s not in extracted data" % (typs[1],) + ) else: - raise ValueError('cannot decode field %s in struct %s (%s)' % ( - cls.struct, field['name'], field['type'])) + raise ValueError( + "cannot decode field %s in struct %s (%s)" + % (cls.struct, field["name"], field["type"]) + ) - if 'array' in field and typs[0] == 'char': - packtype = ('%ds' % field['array'],) - out.append(mkname(field['name']) + packtype) - elif 'array' in field: - for i in range(0, field['array']): - out.append(mkname('%s_%d' % (field['name'], i)) + packtype) + if "array" in field and typs[0] == "char": + packtype = ("%ds" % field["array"],) + out.append(mkname(field["name"]) + packtype) + elif "array" in field: + for i in range(0, field["array"]): + out.append(mkname("%s_%d" % (field["name"], i)) + packtype) else: - out.append(mkname(field['name']) + packtype) + out.append(mkname(field["name"]) + packtype) - #offset = field['offset'] + field['size'] + # offset = field['offset'] + field['size'] cls.fields = out @@ -187,16 +209,30 @@ class FieldApplicator(object): for cls in self.classes: self.resolve(cls) + def main(): - argp = argparse.ArgumentParser(description = 'FRR DWARF structure extractor') - argp.add_argument('-o', dest='output', type=str, help='write JSON output', default='python/xrefstructs.json') - argp.add_argument('-i', dest='input', type=str, help='ELF file to read', default='lib/.libs/libfrr.so') + argp = argparse.ArgumentParser(description="FRR DWARF structure extractor") + argp.add_argument( + "-o", + dest="output", + type=str, + help="write JSON output", + default="python/xrefstructs.json", + ) + argp.add_argument( + "-i", + dest="input", + type=str, + help="ELF file to read", + default="lib/.libs/libfrr.so", + ) args = argp.parse_args() out = extract(args.input) - with open(args.output + '.tmp', 'w') as fd: + with open(args.output + ".tmp", "w") as fd: json.dump(out, fd, indent=2, sort_keys=True) - os.rename(args.output + '.tmp', args.output) + os.rename(args.output + ".tmp", args.output) -if __name__ == '__main__': + +if __name__ == "__main__": main() diff --git a/python/xrelfo.py b/python/xrelfo.py index 739becd8ad..966ccdee9e 100644 --- a/python/xrelfo.py +++ b/python/xrelfo.py @@ -40,13 +40,15 @@ from tiabwarfo import FieldApplicator from xref2vtysh import CommandEntry try: - with open(os.path.join(frr_top_src, 'python', 'xrefstructs.json'), 'r') as fd: + with open(os.path.join(frr_top_src, "python", "xrefstructs.json"), "r") as fd: xrefstructs = json.load(fd) except FileNotFoundError: - sys.stderr.write(''' + sys.stderr.write( + """ The "xrefstructs.json" file (created by running tiabwarfo.py with the pahole tool available) could not be found. It should be included with the sources. -''') +""" + ) sys.exit(1) # constants, need to be kept in sync manually... @@ -58,7 +60,7 @@ XREFT_INSTALL_ELEMENT = 0x301 # LOG_* priovals = {} -prios = ['0', '1', '2', 'E', 'W', 'N', 'I', 'D'] +prios = ["0", "1", "2", "E", "W", "N", "I", "D"] class XrelfoJson(object): @@ -71,9 +73,10 @@ class XrelfoJson(object): def to_dict(self, refs): pass + class Xref(ELFDissectStruct, XrelfoJson): - struct = 'xref' - fieldrename = {'type': 'typ'} + struct = "xref" + fieldrename = {"type": "typ"} containers = {} def __init__(self, *args, **kwargs): @@ -86,7 +89,7 @@ class Xref(ELFDissectStruct, XrelfoJson): def container(self): if self._container is None: if self.typ in self.containers: - self._container = self.container_of(self.containers[self.typ], 'xref') + self._container = self.container_of(self.containers[self.typ], "xref") return self._container def check(self, *args, **kwargs): @@ -95,10 +98,10 @@ class Xref(ELFDissectStruct, XrelfoJson): class Xrefdata(ELFDissectStruct): - struct = 'xrefdata' + struct = "xrefdata" # uid is all zeroes in the data loaded from ELF - fieldrename = {'uid': '_uid'} + fieldrename = {"uid": "_uid"} def ref_from(self, xref, typ): self.xref = xref @@ -109,38 +112,83 @@ class Xrefdata(ELFDissectStruct): return None return uidhash(self.xref.file, self.hashstr, self.hashu32_0, self.hashu32_1) + class XrefPtr(ELFDissectStruct): fields = [ - ('xref', 'P', Xref), + ("xref", "P", Xref), ] + class XrefThreadSched(ELFDissectStruct, XrelfoJson): - struct = 'xref_threadsched' + struct = "xref_threadsched" + + Xref.containers[XREFT_THREADSCHED] = XrefThreadSched + class XrefLogmsg(ELFDissectStruct, XrelfoJson): - struct = 'xref_logmsg' + struct = "xref_logmsg" def _warn_fmt(self, text): - lines = text.split('\n') - yield ((self.xref.file, self.xref.line), '%s:%d: %s (in %s())%s\n' % (self.xref.file, self.xref.line, lines[0], self.xref.func, ''.join(['\n' + l for l in lines[1:]]))) + lines = text.split("\n") + yield ( + (self.xref.file, self.xref.line), + "%s:%d: %s (in %s())%s\n" + % ( + self.xref.file, + self.xref.line, + lines[0], + self.xref.func, + "".join(["\n" + l for l in lines[1:]]), + ), + ) fmt_regexes = [ - (re.compile(r'([\n\t]+)'), 'error: log message contains tab or newline'), - # (re.compile(r'^(\s+)'), 'warning: log message starts with whitespace'), - (re.compile(r'^((?:warn(?:ing)?|error):\s*)', re.I), 'warning: log message starts with severity'), + (re.compile(r"([\n\t]+)"), "error: log message contains tab or newline"), + # (re.compile(r'^(\s+)'), 'warning: log message starts with whitespace'), + ( + re.compile(r"^((?:warn(?:ing)?|error):\s*)", re.I), + "warning: log message starts with severity", + ), ] arg_regexes = [ - # the (?' if edf._elffile.bigendian else '<' + endian = ">" if edf._elffile.bigendian else "<" mem = edf._elffile[note] if edf._elffile.elfclass == 64: - start, end = struct.unpack(endian + 'QQ', mem) + start, end = struct.unpack(endian + "QQ", mem) start += note.start end += note.start + 8 else: - start, end = struct.unpack(endian + 'II', mem) + start, end = struct.unpack(endian + "II", mem) start += note.start end += note.start + 4 ptrs = edf.iter_data(XrefPtr, slice(start, end)) else: - xrefarray = edf.get_section('xref_array') + xrefarray = edf.get_section("xref_array") if xrefarray is None: - raise ValueError('file has neither xref note nor xref_array section') + raise ValueError("file has neither xref note nor xref_array section") ptrs = xrefarray.iter_data(XrefPtr) for ptr in ptrs: if ptr.xref is None: - print('NULL xref') + print("NULL xref") continue self._xrefs.append(ptr.xref) @@ -347,15 +428,15 @@ class Xrelfo(dict): def load_json(self, fd): data = json.load(fd) - for uid, items in data['refs'].items(): - myitems = self['refs'].setdefault(uid, []) + for uid, items in data["refs"].items(): + myitems = self["refs"].setdefault(uid, []) for item in items: if item in myitems: continue myitems.append(item) - for cmd, items in data['cli'].items(): - self['cli'].setdefault(cmd, {}).update(items) + for cmd, items in data["cli"].items(): + self["cli"].setdefault(cmd, {}).update(items) return data @@ -363,24 +444,33 @@ class Xrelfo(dict): for xref in self._xrefs: yield from xref.check(checks) + def main(): - argp = argparse.ArgumentParser(description = 'FRR xref ELF extractor') - argp.add_argument('-o', dest='output', type=str, help='write JSON output') - argp.add_argument('--out-by-file', type=str, help='write by-file JSON output') - argp.add_argument('-c', dest='vtysh_cmds', type=str, help='write vtysh_cmd.c') - argp.add_argument('-Wlog-format', action='store_const', const=True) - argp.add_argument('-Wlog-args', action='store_const', const=True) - argp.add_argument('-Werror', action='store_const', const=True) - argp.add_argument('--profile', action='store_const', const=True) - argp.add_argument('binaries', metavar='BINARY', nargs='+', type=str, help='files to read (ELF files or libtool objects)') + argp = argparse.ArgumentParser(description="FRR xref ELF extractor") + argp.add_argument("-o", dest="output", type=str, help="write JSON output") + argp.add_argument("--out-by-file", type=str, help="write by-file JSON output") + argp.add_argument("-c", dest="vtysh_cmds", type=str, help="write vtysh_cmd.c") + argp.add_argument("-Wlog-format", action="store_const", const=True) + argp.add_argument("-Wlog-args", action="store_const", const=True) + argp.add_argument("-Werror", action="store_const", const=True) + argp.add_argument("--profile", action="store_const", const=True) + argp.add_argument( + "binaries", + metavar="BINARY", + nargs="+", + type=str, + help="files to read (ELF files or libtool objects)", + ) args = argp.parse_args() if args.profile: import cProfile - cProfile.runctx('_main(args)', globals(), {'args': args}, sort='cumtime') + + cProfile.runctx("_main(args)", globals(), {"args": args}, sort="cumtime") else: _main(args) + def _main(args): errors = 0 xrelfo = Xrelfo() @@ -390,60 +480,59 @@ def _main(args): xrelfo.load_file(fn) except: errors += 1 - sys.stderr.write('while processing %s:\n' % (fn)) + sys.stderr.write("while processing %s:\n" % (fn)) traceback.print_exc() for option in dir(args): - if option.startswith('W') and option != 'Werror': + if option.startswith("W") and option != "Werror": checks = sorted(xrelfo.check(args)) - sys.stderr.write(''.join([c[-1] for c in checks])) + sys.stderr.write("".join([c[-1] for c in checks])) if args.Werror and len(checks) > 0: errors += 1 break - - refs = xrelfo['refs'] + refs = xrelfo["refs"] counts = {} for k, v in refs.items(): - strs = set([i['fmtstring'] for i in v]) + strs = set([i["fmtstring"] for i in v]) if len(strs) != 1: - print('\033[31;1m%s\033[m' % k) + print("\033[31;1m%s\033[m" % k) counts[k] = len(v) out = xrelfo outbyfile = {} for uid, locs in refs.items(): for loc in locs: - filearray = outbyfile.setdefault(loc['file'], []) + filearray = outbyfile.setdefault(loc["file"], []) loc = dict(loc) - del loc['file'] + del loc["file"] filearray.append(loc) for k in outbyfile.keys(): - outbyfile[k] = sorted(outbyfile[k], key=lambda x: x['line']) + outbyfile[k] = sorted(outbyfile[k], key=lambda x: x["line"]) if errors: sys.exit(1) if args.output: - with open(args.output + '.tmp', 'w') as fd: + with open(args.output + ".tmp", "w") as fd: json.dump(out, fd, indent=2, sort_keys=True, **json_dump_args) - os.rename(args.output + '.tmp', args.output) + os.rename(args.output + ".tmp", args.output) if args.out_by_file: - with open(args.out_by_file + '.tmp', 'w') as fd: + with open(args.out_by_file + ".tmp", "w") as fd: json.dump(outbyfile, fd, indent=2, sort_keys=True, **json_dump_args) - os.rename(args.out_by_file + '.tmp', args.out_by_file) + os.rename(args.out_by_file + ".tmp", args.out_by_file) if args.vtysh_cmds: - with open(args.vtysh_cmds + '.tmp', 'w') as fd: + with open(args.vtysh_cmds + ".tmp", "w") as fd: CommandEntry.run(out, fd) - os.rename(args.vtysh_cmds + '.tmp', args.vtysh_cmds) + os.rename(args.vtysh_cmds + ".tmp", args.vtysh_cmds) if args.Werror and CommandEntry.warn_counter: sys.exit(1) -if __name__ == '__main__': +if __name__ == "__main__": main()