From 196a94e199d76d74c29f3833e41b2caaac1cbb67 Mon Sep 17 00:00:00 2001 From: Romain Goyet Date: Sun, 12 Apr 2020 22:14:06 -0400 Subject: [PATCH] [ci] Report .text, .rodata, .bss and .data sections Note that .text and .rodata are cumulative (internal+external) --- .github/workflows/metrics-workflow.yml | 2 +- build/metrics/binary_size.py | 27 ++++++++++++-------------- 2 files changed, 13 insertions(+), 16 deletions(-) diff --git a/.github/workflows/metrics-workflow.yml b/.github/workflows/metrics-workflow.yml index bc22fcd4b..5341444b4 100644 --- a/.github/workflows/metrics-workflow.yml +++ b/.github/workflows/metrics-workflow.yml @@ -25,7 +25,7 @@ jobs: run: make -j2 -C head epsilon.elf - name: Retrieve binary size analysis id: binary_size - run: echo "::set-output name=table::$(python3 head/build/metrics/binary_size.py base/output/release/device/n0110/epsilon.elf head/output/release/device/n0110/epsilon.elf --labels Base Head --escape)" + run: echo "::set-output name=table::$(python3 head/build/metrics/binary_size.py base/output/release/device/n0110/epsilon.elf head/output/release/device/n0110/epsilon.elf --labels Base Head --sections .text .rodata .bss .data --escape)" - name: Prepare comment auth run: echo "::set-env name=GITHUB_TOKEN::$(echo ZGExNWM1YzNlMjVkMWU5ZGFmOWQyY2UxMmRhYjJiN2ZhMWM4ODVhMA== | base64 --decode)" - name: Add comment diff --git a/build/metrics/binary_size.py b/build/metrics/binary_size.py index c9c07937c..867661070 100644 --- a/build/metrics/binary_size.py +++ b/build/metrics/binary_size.py @@ -8,8 +8,8 @@ import urllib.parse # ELF analysis -def loadable_sections(elf_file, address_prefix = ""): - objdump_section_headers_pattern = re.compile("^\s+\d+\s+(\.[\w\.]+)\s+([0-9a-f]+)\s+([0-9a-f]+)\s+("+address_prefix+"[0-9a-f]+)\s+([0-9a-f]+).*LOAD", flags=re.MULTILINE) +def loadable_sections(elf_file): + objdump_section_headers_pattern = re.compile("^\s+\d+\s+(\.[\w\.]+)\s+([0-9a-f]+)\s+([0-9a-f]+)\s+([0-9a-f]+)\s+([0-9a-f]+)", flags=re.MULTILINE) objdump_output = subprocess.check_output(["arm-none-eabi-objdump", "-h", "-w", elf_file]).decode('utf-8') sections = [] for (name, size, vma, lma, offset) in re.findall(objdump_section_headers_pattern, objdump_output): @@ -21,19 +21,16 @@ def loadable_sections(elf_file, address_prefix = ""): # Data filtering -def biggest_sections(sections, n): - sorted_sections = sorted(sections, key=lambda s: s['size'], reverse=True) - return sorted_sections[:n] - -def total_size(sections): - return sum(map(lambda s: s['size'], sections)) - -def row_for_elf(elf, columns): +def row_for_elf(elf, requested_section_prefixes): sections = loadable_sections(elf) result = {} - for s in biggest_sections(sections, columns): - result[s['name']] = s['size'] - result['Total'] = total_size(sections) + for prefix in requested_section_prefixes: + for s in sections: + section_name = s['name'] + if s['name'].startswith(prefix): + if not prefix in result: + result[prefix] = 0 + result[prefix] += s['size'] return result @@ -119,7 +116,7 @@ def format_table(table): parser = argparse.ArgumentParser(description='Compute binary size metrics') parser.add_argument('files', type=str, nargs='+', help='an ELF file') parser.add_argument('--labels', type=str, nargs='+', help='label for ELF file') -parser.add_argument('--number-of-sections', type=int, default=2, help='Number of detailed sections') +parser.add_argument('--sections', type=str, nargs='+', help='Section (prefix) to list') parser.add_argument('--escape', action='store_true', help='Escape the output') args = parser.parse_args() @@ -131,7 +128,7 @@ for i,filename in enumerate(args.files): label = os.path.basename(filename) if args.labels and i < len(args.labels): label = args.labels[i] - table.append({'label': label, 'values': row_for_elf(filename, args.number_of_sections)}) + table.append({'label': label, 'values': row_for_elf(filename, args.sections)}) formatted_table = format_table(table) if args.escape: