Beispiel #1
0
@author: Neil 'superna' Armstrong <*****@*****.**>
"""

import argparse
from pyfdt.pyfdt import FdtBlobParse

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Device Tree Blob dump')
    parser.add_argument('--format', dest = 'format' ,help="output format (dts, json or dtb), default to dts", default = "dts")
    parser.add_argument('in_filename', help="input filename")
    parser.add_argument('out_filename', help="output filename")
    args = parser.parse_args()

    if args.format not in ('dts', 'json', 'dtb'):
        raise Exception('Invalid Output Format')
    
    with open(args.in_filename, 'rb') as infile:
        dtb = FdtBlobParse(infile)
    
    fdt = dtb.to_fdt()
    
    if args.format == "dts":
        with open(args.out_filename, 'wb') as outfile:
            outfile.write(fdt.to_dts().encode('ascii'))
    elif args.format == "dtb":
        with open(args.out_filename, 'wb') as outfile:
            outfile.write(fdt.to_dtb())
    elif args.format == "json":
        with open(args.out_filename, 'wb') as outfile:
            outfile.write(fdt.to_json().encode('ascii'))
Beispiel #2
0
    )
    parser.add_argument(
        "--format2", dest="format2", help="input format (dtb, fs or json), default to dtb", default="dtb"
    )
    parser.add_argument("in_dtb1", help="input filename 1")
    parser.add_argument("in_dtb2", help="input filename 2")
    args = parser.parse_args()

    if args.format1 not in ("fs", "dtb", "json"):
        raise Exception("Invalid Format1")
    if args.format2 not in ("fs", "dtb", "json"):
        raise Exception("Invalid Format2")

    if args.format1 == "dtb":
        with open(args.in_dtb1, "rb") as infile:
            dtb1 = FdtBlobParse(infile)
        fdt1 = dtb1.to_fdt()
    elif args.format1 == "json":
        with open(args.in_dtb1) as infile:
            fdt1 = FdtJsonParse(infile.read())
    else:
        fdt1 = FdtFsParse(args.in_dtb1)

    if args.format2 == "dtb":
        with open(args.in_dtb2, "rb") as infile:
            dtb2 = FdtBlobParse(infile)
        fdt2 = dtb2.to_fdt()
    elif args.format2 == "json":
        with open(args.in_dtb2, "r") as infile:
            fdt2 = FdtJsonParse(infile.read())
    else:
Beispiel #3
0
def read_binary_file(fname):
    with open(fname, "rb") as f:
        dtb = FdtBlobParse(f)
        ftd = dtb.to_fdt()
    return ftd
Beispiel #4
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
DTB to DTS

@author: Neil 'superna' Armstrong <*****@*****.**>
"""

import argparse
from pyfdt.pyfdt import FdtBlobParse

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Device Tree Blob dump')
    parser.add_argument('in_filename', help="input filename")
    args = parser.parse_args()

    with open(args.in_filename) as infile:
        dtb = FdtBlobParse(infile)

    fdt = dtb.to_fdt()

    print fdt.to_json()
Beispiel #5
0
    parser.add_argument('--format1', dest = 'format1' ,help="input format1 (dtb, fs or json), default to dtb", default = "dtb")
    parser.add_argument('--format2', dest = 'format2' ,help="input format2 (dtb, fs or json), default to dtb", default = "dtb")
    parser.add_argument('--outformat', dest = 'outformat' ,help="output format (dtb, dts or json), default to dtb", default = "dtb")
    parser.add_argument('in_dtb1', help="input filename 1")
    parser.add_argument('in_dtb2', help="input filename 2")
    parser.add_argument('out_filename', help="output filename")
    args = parser.parse_args()

    if args.format1 not in ('fs', 'dtb', 'json'):
        raise Exception('Invalid Format1')
    if args.format2 not in ('fs', 'dtb', 'json'):
        raise Exception('Invalid Format2')

    if args.format1 == 'dtb':
        with open(args.in_dtb1) as infile:
            dtb1 = FdtBlobParse(infile)
        fdt1 = dtb1.to_fdt()
    elif args.format1 == 'json':
        with open(args.in_dtb1) as infile:
            fdt1 = FdtJsonParse(infile.read())
    else:
        fdt1 = FdtFsParse(args.in_dtb1)

    if args.format2 == 'dtb':
        with open(args.in_dtb2) as infile:
            dtb2 = FdtBlobParse(infile)
        fdt2 = dtb2.to_fdt()
    elif args.format2 == 'json':
        with open(args.in_dtb2) as infile:
            fdt2 = FdtJsonParse(infile.read())
    else:
from pyfdt.pyfdt import FdtBlobParse
import sys

NODE_HYPERVISOR = b'hypervisor'
DIRECTION_LEFT = -1
DIRECTION_RIGHT = 1


def shift_node_by_n_bytes(dtb, node, nnn):
    node_offset = dtb.find(node) - 2
    node_length = len(node)
    return dtb[:node_offset +
               nnn] + dtb[node_offset:node_offset + node_length +
                          76] + (-1 * nnn) * b'\000' + dtb[node_offset +
                                                           node_length + 76:]


with open(sys.argv[1], 'rb') as input_device_tree_binary_file:
    input_device_tree_binary = FdtBlobParse(
        input_device_tree_binary_file).to_fdt().to_dtb()
output_device_tree_binary = shift_node_by_n_bytes(input_device_tree_binary,
                                                  NODE_HYPERVISOR,
                                                  DIRECTION_LEFT * 2)

with open(sys.argv[2], 'wb') as output_device_tree_binary_file:
    output_device_tree_binary_file.write(output_device_tree_binary)
Beispiel #7
0
def main(argv, out, err):

    # We need a UTF-8 locale, so bail out if we don't have one. More
    # specifically, things like the version() computation traverse the file
    # system and, if they hit a UTF-8 filename, they try to decode it into your
    # preferred encoding and trigger an exception.
    encoding = locale.getpreferredencoding().lower()
    if encoding not in ('utf-8', 'utf8'):
        err.write('CAmkES uses UTF-8 encoding, but your locale\'s preferred '
            'encoding is %s. You can override your locale with the LANG '
            'environment variable.\n' % encoding)
        return -1

    options = parse_args(argv, out, err)

    # Ensure we were supplied equal items and outfiles
    if len(options.outfile) != len(options.item):
        err.write('Different number of items and outfiles. Required one outfile location '
            'per item requested.\n')
        return -1

    # No duplicates in items or outfiles
    if len(set(options.item)) != len(options.item):
        err.write('Duplicate items requested through --item.\n')
        return -1
    if len(set(options.outfile)) != len(options.outfile):
        err.write('Duplicate outfiles requrested through --outfile.\n')
        return -1

    # Save us having to pass debugging everywhere.
    die = functools.partial(_die, options)

    log.set_verbosity(options.verbosity)

    cwd = os.getcwd()

    # Build a list of item/outfile pairs that we have yet to match and process
    all_items = set(zip(options.item, options.outfile))
    done_items = set([])

    # Construct the compilation caches if requested.
    cachea = None
    cacheb = None
    if options.cache:

        # Construct a modified version of the command line arguments that we'll
        # use in the keys to the caches. Essentially we elide --outfile and its
        # parameter under the assumption that this value is never used in code
        # generation. The purpose of this is to allow us to successfully cache
        # ancillary outputs that we generate along the way to the current
        # output. If we were to include --outfile in the key, future attempts
        # to generate these ancillary outputs would unnecessarily miss the
        # entries generated by this execution.
        args = []
        skip = False
        for index, arg in enumerate(argv[1:]):
            if skip:
                skip = False
                continue
            if arg in ('--outfile', '-O'):
                skip = True
                continue
            args.append(arg)

        cachea = LevelACache(os.path.join(options.cache_dir, version(), 'cachea'))
        cacheb = LevelBCache(os.path.join(options.cache_dir, version(), 'cacheb'))

    def done(s, file, item):
        ret = 0
        if s:
            file.write(s)
            file.close()
        if cachea is not None:
            try:
                cachea.flush()
            except sqlite3.OperationalError as e:
                # The following suppresses two spurious errors:
                #  1. The database is locked. In a large, parallel build, writes
                #     to the level A cache are heavily contended and this error
                #     can occur.
                #  2. The database structure is unexpected. If the CAmkES
                #     sources have changed *while* the runner was executing,
                #     the level A cache can be looking in a different place to
                #     where the cache was created.
                # Both of these are non-critical (will just result in a
                # potential future cache miss) so there's no need to alarm the
                # user.
                if re.search(r'database is locked', str(e)) is not None or \
                   re.search(r'no such table', str(e)) is not None:
                    log.debug('failed to flush level A cache: %s' % str(e))
                else:
                    raise
        if cacheb is not None:
            try:
                cacheb.flush()
            except sqlite3.OperationalError as e:
                # As above for the level B cache.
                if re.search(r'database is locked', str(e)):
                    log.debug('failed to flush level B cache: %s' % str(e))
                else:
                    raise

        done_items.add((item, file))
        if len(all_items - done_items) == 0:
            sys.exit(ret)

    # Try to find this output in the level A cache if possible. This check will
    # 'hit' if the source files representing the input spec are identical to
    # some previously observed execution.
    if cachea is not None:
        assert 'args' in locals()
        assert len(options.outfile) == 1, 'level A cache only supported when requestiong ' \
            'single items'
        output = cachea.load(args, cwd)
        if output is not None:
            log.debug('Retrieved %(platform)s/%(item)s from level A cache' %
                options.__dict__)
            done(output, options.outfile[0], options.item[0])

    filename = os.path.abspath(options.file.name)

    try:
        # Build the parser options
        parse_options = ParserOptions(options.cpp, options.cpp_flag, options.import_path, options.verbosity, options.allow_forward_references)
        ast, read = parse_file_cached(filename, options.data_structure_cache_dir, parse_options)
    except (ASTError, ParseError) as e:
        die(e.args)

    # Locate the assembly.
    assembly = ast.assembly
    if assembly is None:
        die('No assembly found')

    if options.dtb:
        dtb = FdtBlobParse(options.dtb).to_fdt()

    # Do some extra checks if the user asked for verbose output.
    if options.verbosity >= 2:

        # Try to catch type mismatches in attribute settings. Note that it is
        # not possible to conclusively evaluate type correctness because the
        # attributes' type system is (deliberately) too loose. That is, the
        # type of an attribute can be an uninterpreted C type the user will
        # provide post hoc.
        for i in assembly.composition.instances:
            for a in i.type.attributes:
                value = assembly.configuration[i.name].get(a.name)
                if value is not None:
                    if a.type == 'string' and not \
                            isinstance(value, six.string_types):
                        log.warning('attribute %s.%s has type string but is '
                            'set to a value that is not a string' % (i.name,
                            a.name))
                    elif a.type == 'int' and not \
                            isinstance(value, numbers.Number):
                        log.warning('attribute %s.%s has type int but is set '
                            'to a value that is not an integer' % (i.name,
                                a.name))

    obj_space = ObjectAllocator()
    obj_space.spec.arch = options.architecture
    cspaces = {}
    pds = {}
    conf = assembly.configuration
    shmem = collections.defaultdict(ShmemFactory())
    kept_symbols = {}
    fill_frames = {}

    templates = Templates(options.platform)
    [templates.add_root(t) for t in options.templates]
    try:
        r = Renderer(templates, options.cache, options.cache_dir)
    except jinja2.exceptions.TemplateSyntaxError as e:
        die('template syntax error: %s' % e)

    # The user may have provided their own connector definitions (with
    # associated) templates, in which case they won't be in the built-in lookup
    # dictionary. Let's add them now. Note, definitions here that conflict with
    # existing lookup entries will overwrite the existing entries. Note that
    # the extra check that the connector has some templates is just an
    # optimisation; the templates module handles connectors without templates
    # just fine.
    extra_templates = set()
    for c in (x for x in ast.items if isinstance(x, Connector) and
            (x.from_template is not None or x.to_template is not None)):
        try:
            # Find a connection that uses this type.
            connection = next(x for x in ast if isinstance(x, Connection) and
                x.type == c)
            # Add the custom templates and update our collection of read
            # inputs. It is necessary to update the read set here to avoid
            # false compilation cache hits when the source of a custom template
            # has changed.
            extra_templates |= templates.add(c, connection)
        except TemplateError as e:
            die('while adding connector %s: %s' % (c.name, e))
        except StopIteration:
            # No connections use this type. There's no point adding it to the
            # template lookup dictionary.
            pass

    # Check if our current target is in the level B cache. The level A cache
    # will 'miss' and this one will 'hit' when the input spec is identical to
    # some previously observed execution modulo a semantically irrelevant
    # element (e.g. an introduced comment).
    ast_hash = None
    if cacheb is not None:
        ast_hash = level_b_prime(ast)
        assert 'args' in locals()
        assert len(options.item) == 1, 'level B cache only supported when requesting ' \
            'single items'
        output = cacheb.load(ast_hash, args, set(options.elf) | extra_templates)
        if output is not None:
            log.debug('Retrieved %(platform)s/%(item)s from level B cache' %
                options.__dict__)
            done(output, options.outfile[0], options.item[0])

    # Add custom templates.
    read |= extra_templates

    # Add the CAmkES sources themselves to the accumulated list of inputs.
    read |= set(path for path, _ in sources())

    # Add any ELF files we were passed as inputs.
    read |= set(options.elf)

    # Write a Makefile dependency rule if requested.
    if options.makefile_dependencies is not None:
        options.makefile_dependencies.write('%s: \\\n  %s\n' %
            (filename, ' \\\n  '.join(sorted(read))))

    # If we have a cache, allow outputs to be saved to it.
    if options.cache:

        assert cachea is not None, 'level A cache not available, though the ' \
            'cache is enabled (bug in runner?)'
        # The logic of this cache currently only works when a single item is requested
        # on the command line
        assert len(options.item) == 1, 'level A cache only supported when requesting ' \
            'single items'

        # Calculate the input files to the level A cache.
        inputs = level_a_prime(read)

        # Work out the position of the --item argument in the command line
        # parameters. We will use this to cache not only outputs for this
        # execution, but also outputs for ones with a different target.
        item_index = None
        assert 'args' in locals()
        for index, arg in enumerate(args[:-1]):
            if arg in ('--item', '-T'):
                item_index = index + 1
                break
        assert item_index is not None, 'failed to find required argument ' \
            '--item (bug in runner?)'

        # We should already have the necessary inputs for the level B cache.
        assert cacheb is not None, 'level B cache not available, though the ' \
            'cache is enabled (bug in runner?)'
        assert ast_hash is not None, 'AST hash not pre-computed (bug in ' \
            'runner?)'

        def save(item, value):
            # Juggle the command line arguments to cache the predicted
            # arguments for a call that would generate this item.
            new_args = args[:item_index] + [item] + args[item_index + 1:]

            # Save entries in both caches.
            cachea.save(new_args, cwd, value, inputs)
            if item != 'Makefile' and item != 'camkes-gen.cmake':
                # We avoid caching the generated Makefile because it is not
                # safe. The inputs to generation of the Makefile are not only
                # the AST, but also the file names (`inputs`). If we cache it in
                # the level B cache we risk the following scenario:
                #
                #   1. Generate the Makefile, caching it in the level B cache;
                #   2. Modify the spec to import a file containing only white
                #      space and/or comments; then
                #   3. Generate the Makefile, missing the level A cache, but
                #      hitting the level B cache.
                #
                # At this point, the generated Makefile is incorrect because it
                # does not capture any dependencies on the imported file. We can
                # now introduce something semantically relevant into this file
                # (e.g. an Assembly block) and it will not be seen by the build
                # system.
                cacheb.save(ast_hash, new_args,
                    set(options.elf) | extra_templates, value)
    else:
        def save(item, value):
            pass

    def apply_capdl_filters():
        # Derive a set of usable ELF objects from the filenames we were passed.
        elfs = {}
        for e in options.elf:
            try:
                name = os.path.basename(e)
                if name in elfs:
                    raise Exception('duplicate ELF files of name \'%s\' encountered' % name)
                elf = ELF(e, name, options.architecture)
                p = Perspective(phase=RUNNER, elf_name=name)
                group = p['group']
                # Avoid inferring a TCB as we've already created our own.
                elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False,
                    pd=pds[group], use_large_frames=options.largeframe)
                obj_space.merge(elf_spec, label=group)
                elfs[name] = (e, elf)
            except Exception as inst:
                die('While opening \'%s\': %s' % (e, inst))

        filteroptions = FilterOptions(options.architecture, options.realtime, options.largeframe,
            options.largeframe_dma, options.default_priority, options.default_max_priority,
            options.default_affinity, options.default_period, options.default_budget,
            options.default_data, options.default_size_bits,
            options.debug_fault_handlers, options.fprovide_tcb_caps)
        for f in CAPDL_FILTERS:
            try:
                # Pass everything as named arguments to allow filters to
                # easily ignore what they don't want.
                f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs,
                    options=filteroptions, shmem=shmem, fill_frames=fill_frames)
            except Exception as inst:
                die('While forming CapDL spec: %s' % inst)

    renderoptions = RenderOptions(options.file, options.verbosity, options.frpc_lock_elision,
        options.fspecialise_syscall_stubs, options.fprovide_tcb_caps, options.fsupport_init,
        options.largeframe, options.largeframe_dma, options.architecture, options.debug_fault_handlers,
        options.realtime)

    def instantiate_misc_template():
        for (item, outfile) in (all_items - done_items):
            try:
                template = templates.lookup(item)
                if template:
                    g = r.render(
                        assembly, assembly, template, obj_space, None,
                        shmem, kept_symbols, fill_frames, outfile_name=outfile.name,
                        imported=read, options=renderoptions)
                    save(item, g)
                    done(g, outfile, item)
            except TemplateError as inst:
                die(rendering_error(item, inst))

    if options.item[0] in ('capdl', 'label-mapping') and options.data_structure_cache_dir is not None \
            and len(options.outfile) == 1:
        # It's possible that data structures required to instantiate the capdl spec
        # were saved during a previous invocation of this script in the current build.
        cache_path = os.path.realpath(options.data_structure_cache_dir)
        pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE)

        if os.path.isfile(pickle_path):
            with open(pickle_path, 'rb') as pickle_file:
                # Found a cached version of the necessary data structures
                obj_space, shmem, cspaces, pds, kept_symbols, fill_frames = pickle.load(pickle_file)
                apply_capdl_filters()
                instantiate_misc_template()

                # If a template wasn't instantiated, something went wrong, and we can't recover
                raise CAmkESError("No template instantiated on capdl generation fastpath")

    # We're now ready to instantiate the template the user requested, but there
    # are a few wrinkles in the process. Namely,
    #  1. Template instantiation needs to be done in a deterministic order. The
    #     runner is invoked multiple times and template code needs to be
    #     allocated identical cap slots in each run.
    #  2. Components and connections need to be instantiated before any other
    #     templates, regardless of whether they are the ones we are after. Some
    #     other templates, such as the Makefile depend on the obj_space and
    #     cspaces.
    #  3. All actual code templates, up to the template that was requested,
    #     need to be instantiated. This is related to (1) in that the cap slots
    #     allocated are dependent on what allocations have been done prior to a
    #     given allocation call.

    # Instantiate the per-component source and header files.
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue

        if i.address_space not in cspaces:
            p = Perspective(phase=RUNNER, instance=i.name,
                group=i.address_space)
            cnode = obj_space.alloc(seL4_CapTableObject,
                name=p['cnode'], label=i.address_space)
            cspaces[i.address_space] = CSpaceAllocator(cnode)
            pd = obj_space.alloc(lookup_architecture(options.architecture).vspace().object, name=p['pd'],
                label=i.address_space)
            pds[i.address_space] = pd

        for t in ('%s/source' % i.name, '%s/header' % i.name,
                '%s/c_environment_source' % i.name,
                '%s/cakeml_start_source' % i.name, '%s/cakeml_end_source' % i.name,
                '%s/linker' % i.name):
            try:
                template = templates.lookup(t, i)
                g = ''
                if template:
                    g = r.render(i, assembly, template, obj_space, cspaces[i.address_space],
                        shmem, kept_symbols, fill_frames, outfile_name=None,
                        options=renderoptions, my_pd=pds[i.address_space])
                save(t, g)
                for (item, outfile) in (all_items - done_items):
                    if item == t:
                        if not template:
                            log.warning('Warning: no template for %s' % item)
                        done(g, outfile, item)
                        break
            except TemplateError as inst:
                die(rendering_error(i.name, inst))

    # Instantiate the per-connection files.
    for c in assembly.composition.connections:

        for t in (('%s/from/source' % c.name, c.from_ends),
                  ('%s/from/header' % c.name, c.from_ends),
                  ('%s/to/source' % c.name, c.to_ends),
                  ('%s/to/header' % c.name, c.to_ends),
                  ('%s/to/cakeml' % c.name, c.to_ends)):

            template = templates.lookup(t[0], c)

            if template is not None:
                for id, e in enumerate(t[1]):
                    item = '%s/%d' % (t[0], id)
                    g = ''
                    try:
                        g = r.render(e, assembly, template, obj_space,
                            cspaces[e.instance.address_space], shmem, kept_symbols,
                            fill_frames, outfile_name=None,
                            options=renderoptions, my_pd=pds[e.instance.address_space])
                    except TemplateError as inst:
                        die(rendering_error(item, inst))
                    except jinja2.exceptions.TemplateNotFound:
                        die('While rendering %s: missing template for %s' %
                            (item, c.type.name))
                    save(item, g)
                    for (target, outfile) in (all_items - done_items):
                        if target == item:
                            if not template:
                                log.warning('Warning: no template for %s' % item)
                            done(g, outfile, item)
                            break

        # The following block handles instantiations of per-connection
        # templates that are neither a 'source' or a 'header', as handled
        # above. We assume that none of these need instantiation unless we are
        # actually currently looking for them (== options.item). That is, we
        # assume that following templates, like the CapDL spec, do not require
        # these templates to be rendered prior to themselves.
        # FIXME: This is a pretty ugly way of handling this. It would be nicer
        # for the runner to have a more general notion of per-'thing' templates
        # where the per-component templates, the per-connection template loop
        # above, and this loop could all be done in a single unified control
        # flow.
        for (item, outfile) in (all_items - done_items):
            for t in (('%s/from/' % c.name, c.from_ends),
                    ('%s/to/' % c.name, c.to_ends)):

                if not item.startswith(t[0]):
                    # This is not the item we're looking for.
                    continue

                # If we've reached here then this is the exact item we're after.
                template = templates.lookup(item, c)
                if template is None:
                    die('no registered template for %s' % item)

                for e in t[1]:
                    try:
                        g = r.render(e, assembly, template, obj_space,
                            cspaces[e.instance.address_space], shmem, kept_symbols,
                            fill_frames, outfile_name=None,
                            options=renderoptions, my_pd=pds[e.instance.address_space])
                        save(item, g)
                        done(g, outfile, item)
                    except TemplateError as inst:
                        die(rendering_error(item, inst))

    # Perform any per component special generation. This needs to happen last
    # as these template needs to run after all other capabilities have been
    # allocated
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue
        assert i.address_space in cspaces
        SPECIAL_TEMPLATES = [('debug', 'debug'), ('simple', 'simple'), ('rump_config', 'rumprun')]
        for special in [bl for bl in SPECIAL_TEMPLATES if conf[i.name].get(bl[0])]:
            for t in ('%s/%s' % (i.name, special[1]),):
                try:
                    template = templates.lookup(t, i)
                    g = ''
                    if template:
                        g = r.render(i, assembly, template, obj_space, cspaces[i.address_space],
                            shmem, kept_symbols, fill_frames, outfile_name=None,
                            options=renderoptions, my_pd=pds[i.address_space])
                    save(t, g)
                    for (item, outfile) in (all_items - done_items):
                        if item == t:
                            if not template:
                                log.warning('Warning: no template for %s' % item)
                            done(g, outfile, item)
                except TemplateError as inst:
                    die(rendering_error(i.name, inst))

    if options.data_structure_cache_dir is not None:
        # At this point the capdl database is in the state required for applying capdl
        # filters and generating the capdl spec. In case the capdl spec isn't the current
        # target, we pickle the database here, so when the capdl spec is built, these
        # data structures don't need to be regenerated.
        cache_path = os.path.realpath(options.data_structure_cache_dir)
        pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE)
        with open(pickle_path, 'wb') as pickle_file:
            pickle.dump((obj_space, shmem, cspaces, pds, kept_symbols, fill_frames), pickle_file)

    for (item, outfile) in (all_items - done_items):
        if item in ('capdl', 'label-mapping'):
            apply_capdl_filters()

    # Instantiate any other, miscellaneous template. If we've reached this
    # point, we know the user did not request a code template.
    instantiate_misc_template()

    # Check if there are any remaining items
    not_done = all_items - done_items
    if len(not_done) > 0:
        for (item, outfile) in not_done:
            err.write('No valid element matching --item %s.\n' % item)
        return -1
    return 0
Beispiel #8
0
import collections

from pyfdt.pyfdt import FdtBlobParse

if __name__ == '__main__':
    if len(sys.argv) != 3:
        print('Usage: dump-kernel-from-itb.py <tree.itb> <kernel.gz>')
        sys.exit(-1)

    in_path = pathlib.Path(sys.argv[1]).expanduser().resolve()
    out_path = pathlib.Path(sys.argv[2]).expanduser().resolve()

    # Load the ITB.
    print('[-] Attempting to load ITB from {0}'.format(in_path))
    with open(in_path, 'rb') as fin:
        itb = FdtBlobParse(fin).to_fdt()

    # Locate the Kernel, and process it.
    print('[-] Looking for kernel@1 image in ITB')
    kernel = itb.resolve_path(path='/images/kernel@1')
    kernel_image = None
    kernel_description = None
    kernel_compression = None
    kernel_checksum_type = None
    kernel_checksum_value = None

    for node in kernel:
        # Find the checksum, and type.
        if node.name == 'hash@1':
            for entry in node:
                if entry.name == 'value':