scripts/make_fit: Compress dtbs in parallel

When there are 1500 device tree files it takes quite a while to compress
them. Do it in parallel.

Signed-off-by: Simon Glass <sjg@chromium.org>
Link: https://patch.msgid.link/20260106162738.2605574-7-sjg@chromium.org
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
This commit is contained in:
Simon Glass 2026-01-06 09:27:36 -07:00 committed by Nathan Chancellor
parent fcdcf22a34
commit c7c88b20cd
No known key found for this signature in database
GPG key ID: 1D6B269171C01A96

View file

@ -37,6 +37,7 @@ as U-Boot, Linuxboot, Tianocore, etc.
import argparse
import collections
import multiprocessing
import os
import subprocess
import sys
@ -225,15 +226,31 @@ def compress_data(inf, compress):
return comp_data
def output_dtb(fsw, seq, fname, arch, compress):
def compress_dtb(fname, compress):
"""Compress a single DTB file
Args:
fname (str): Filename containing the DTB
compress (str): Compression algorithm, e.g. 'gzip'
Returns:
tuple: (str: fname, bytes: compressed_data)
"""
with open(fname, 'rb') as inf:
compressed = compress_data(inf, compress)
return fname, compressed
def output_dtb(fsw, seq, fname, arch, compress, data=None):
"""Write out a single devicetree to the FIT
Args:
fsw (libfdt.FdtSw): Object to use for writing
seq (int): Sequence number (1 for first)
fname (str): Filename containing the DTB
arch: FIT architecture, e.g. 'arm64'
arch (str): FIT architecture, e.g. 'arm64'
compress (str): Compressed algorithm, e.g. 'gzip'
data (bytes): Pre-compressed data (optional)
"""
with fsw.add_node(f'fdt-{seq}'):
fsw.property_string('description', os.path.basename(fname))
@ -241,9 +258,10 @@ def output_dtb(fsw, seq, fname, arch, compress):
fsw.property_string('arch', arch)
fsw.property_string('compression', compress)
with open(fname, 'rb') as inf:
compressed = compress_data(inf, compress)
fsw.property('data', compressed)
if data is None:
with open(fname, 'rb') as inf:
data = compress_data(inf, compress)
fsw.property('data', data)
def process_dtb(fname, args):
@ -300,6 +318,11 @@ def _process_dtbs(args, fsw, entries, fdts):
"""
seq = 0
size = 0
# First figure out the unique DTB files that need compression
todo = []
file_info = [] # List of (fname, model, compat, files) tuples
for fname in args.dtbs:
# Ignore non-DTB (*.dtb) files
if os.path.splitext(fname)[1] != '.dtb':
@ -311,11 +334,32 @@ def _process_dtbs(args, fsw, entries, fdts):
sys.stderr.write(f'Error processing {fname}:\n')
raise e
file_info.append((fname, model, compat, files))
for fn in files:
if fn not in fdts and fn not in todo:
todo.append(fn)
# Compress all DTBs in parallel
cache = {}
if todo and args.compress != 'none':
if args.verbose:
print(f'Compressing {len(todo)} DTBs...')
with multiprocessing.Pool() as pool:
compress_args = [(fn, args.compress) for fn in todo]
# unpacks each tuple, calls compress_dtb(fn, compress) in parallel
results = pool.starmap(compress_dtb, compress_args)
cache = dict(results)
# Now write all DTBs to the FIT using pre-compressed data
for fname, model, compat, files in file_info:
for fn in files:
if fn not in fdts:
seq += 1
size += os.path.getsize(fn)
output_dtb(fsw, seq, fn, args.arch, args.compress)
output_dtb(fsw, seq, fn, args.arch, args.compress,
cache.get(fn))
fdts[fn] = seq
files_seq = [fdts[fn] for fn in files]