Esempio n. 1
0
def final_spec(cspaces, obj_space, addr_spaces, elf_files, architecture):
    """
    Generates a final CapDL spec file that can be given to a capdl loader application
    """
    arch = lookup_architecture(architecture)

    for e in [item for sublist in elf_files for item in sublist]:
        name = os.path.basename(e)
        elf = ELF(e, name, architecture)
        cspace = cspaces[name]

        # Avoid inferring a TCB as we've already created our own.
        elf_spec = elf.get_spec(infer_tcb=False,
                                infer_asid=False,
                                pd=addr_spaces[name].vspace_root,
                                addr_space=addr_spaces[name])
        obj_space.merge(elf_spec)
        cspace.cnode.finalise_size(arch)

        # Fill in TCB object information.
        # TODO: This should be generalised with what is in the Camkes filters
        tcb = obj_space["tcb_%s" % name]
        progsymbol = elf.get_symbol_vaddr("progname")
        vsyscall = elf.get_symbol_vaddr("sel4_vsyscall")
        tcb.init = [0, 0, 0, 0, 2, progsymbol, 1, 0, 0, 32, vsyscall, 0, 0]
        tcb.addr = elf.get_symbol_vaddr("mainIpcBuffer")
        tcb.sp = elf.get_symbol_vaddr("stack") + elf.get_symbol_size("stack")
        tcb.ip = elf.get_entry_point()

    return obj_space
Esempio n. 2
0
def final_spec(args, obj_space, cspaces, addr_spaces, targets, architecture):
    """
    Generates a final CapDL spec file that can be given to a capdl loader application
    """
    arch = lookup_architecture(architecture)

    for e, key in targets:
        name = os.path.basename(e)
        elf = ELF(e, name, architecture)
        cspace = cspaces[key]

        # Avoid inferring a TCB as we've already created our own.
        elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False,
                                pd=addr_spaces[key].vspace_root, addr_space=addr_spaces[key])
        obj_space.merge(elf_spec, label=key)
        for slot, v in cspace.cnode.slots.items():
            if v is not None and isinstance(v.referent, TCB):
                tcb = v.referent
                if 'cspace' in tcb and tcb['cspace'] and tcb['cspace'].referent is not cspace.cnode:
                    # We exclude TCBs that refer to a different CSpace
                    continue
                funcs = {"get_vaddr": lambda x: elf.get_symbol_vaddr(x)}
                tcb.ip = eval(str(tcb.ip), {"__builtins__": None}, funcs)
                tcb.sp = eval(str(tcb.sp), {"__builtins__": None}, funcs)
                tcb.addr = eval(str(tcb.addr), {"__builtins__": None}, funcs)
                tcb.init = eval(str(tcb.init), {"__builtins__": None}, funcs)
                if not args.fprovide_tcb_caps:
                    del cspace.cnode[slot]
        cspace.cnode.finalise_size(arch=arch)
    return obj_space
 def start_elf(self, name):
     cnode = self.objects.alloc(ObjectType.seL4_CapTableObject, "cnode_%s" % name)
     arch = self.objects.spec.arch.capdl_name()
     pd = self.objects.alloc(lookup_architecture(arch).vspace().object, "vspace_%s" %name)
     self.current_cspace = CSpaceAllocator(cnode)
     self.current_addr_space = AddressSpaceAllocator(None, pd)
     self.current_cap_symbols = []
     self.current_region_symbols = []
Esempio n. 4
0
 def __init__(self, name, interface, intra_index, stack_size):
     self.name = name
     self.interface = interface
     self.intra_index = intra_index
     self.stack_symbol = "_camkes_stack_%s" % name
     self.stack_size = stack_size
     self.ipc_symbol = "_camkes_ipc_buffer_%s" % name
     self.sp = "get_vaddr(\'%s\') + %d" % (self.stack_symbol, self.stack_size + PAGE_SIZE)
     self.addr = "get_vaddr(\'%s\') + %d" % (self.ipc_symbol, 2 * PAGE_SIZE - lookup_architecture(options.architecture).ipc_buffer_size())
Esempio n. 5
0
 def __init__(self, name, interface, intra_index, stack_size):
     self.name = name
     self.interface = interface
     self.intra_index = intra_index
     self.stack_symbol = "_camkes_stack_%s" % name
     self.stack_size = stack_size
     self.ipc_symbol = "_camkes_ipc_buffer_%s" % name
     self.sp = "get_vaddr(\'%s\') + %d" % (self.stack_symbol,
                                           self.stack_size + PAGE_SIZE)
     self.addr = "get_vaddr(\'%s\') + %d" % (
         self.ipc_symbol, 2 * PAGE_SIZE -
         lookup_architecture(options.architecture).ipc_buffer_size())
Esempio n. 6
0
def guard_cnode_caps(cspaces, options, **_):
    '''If the templates have allocated any caps to CNodes, they will not have
    the correct guards. This is due to the CNodes' sizes being automatically
    calculated (during set_tcb_caps above). Correct them here.'''

    arch = lookup_architecture(options.architecture)
    for space in cspaces.values():
        [
            cap.set_guard_size(arch.word_size_bits() - cap.referent.size_bits)
            for cap in space.cnode.slots.values()
            if cap is not None and isinstance(cap.referent, CNode)
        ]
def set_tcb_caps(ast, obj_space, cspaces, elfs, options, **_):
    arch = lookup_architecture(options.architecture)
    assembly = ast.assembly

    for group, space in cspaces.items():
        cnode = space.cnode
        for index, tcb in [(k, v.referent) for (k, v) in cnode.slots.items()
                           if v is not None and isinstance(v.referent, TCB)]:

            perspective = Perspective(tcb=tcb.name, group=group)

            # Finalise the CNode so that we know what its absolute size will
            # be. Note that we are assuming no further caps will be added to
            # the CNode after this point.
            cnode.finalise_size()

            # Allow the user to override CNode sizes with the 'cnode_size_bits'
            # attribute.
            cnode_size = assembly.configuration[group].get('cnode_size_bits')
            if cnode_size is not None:
                try:
                    if isinstance(cnode_size, six.string_types):
                        size = int(cnode_size, 0)
                    else:
                        size = cnode_size
                except ValueError:
                    raise Exception('illegal value for CNode size for %s' %
                                    group)
                if size < cnode.size_bits:
                    raise Exception('%d-bit CNode specified for %s, but this '
                                    'CSpace needs to be at least %d bits' %
                                    (size, group, cnode.size_bits))
                cnode.size_bits = size

            cspace = Cap(cnode)
            cspace.set_guard_size(arch.word_size_bits() - cnode.size_bits)
            tcb['cspace'] = cspace

            pd = None
            pd_name = perspective['pd']
            pds = [x for x in obj_space.spec.objs if x.name == pd_name]
            if len(pds) > 1:
                raise Exception('Multiple PDs found for %s' % group)
            elif len(pds) == 1:
                pd, = pds
                tcb['vspace'] = Cap(pd)
            # If no PD was found we were probably just not passed any ELF files
            # in this pass.

            if perspective['pool']:
                # This TCB is part of the (cap allocator's) TCB pool.
                continue
Esempio n. 8
0
def describe_fill_frames(ast, obj_space, elfs, fill_frames, options, **_):

    if not elfs:
        # If we haven't been passed any ELF files this step is not relevant yet.
        return

    arch = lookup_architecture(options.architecture)
    assembly = ast.assembly

    for name in fill_frames:
        # Find it in assembly.composition.instances?
        instances = [
            x for x in assembly.composition.instances if x.name == name
        ]
        assert len(instances) == 1, 'Found registered fill frame with no associated ' \
            'instance (template bug?)'
        instance, = instances
        perspective = Perspective(instance=name, group=instance.address_space)

        elf_name = perspective['elf_name']
        assert elf_name in elfs, 'Failed to find binary image for instance %s' % name
        elf = elfs[elf_name]

        # Find the vspace root
        root_name = perspective['pd']
        roots = [x for x in obj_space.spec.objs if x.name == root_name]
        assert len(roots) == 1, 'No vspace found for instance %s' % name
        root, = roots

        # Go over all the fill symbols
        for symbol, fill in iter(fill_frames[name]):
            base = get_symbol_vaddr(elf, symbol)
            assert base is not None, 'Registered fill symbol not found in elf image (template bug?)'
            # Ensure this symbol is correctly aligned
            assert base % PAGE_SIZE == 0, 'Fill symbol in elf image is not correctly aligned (template bug?)'

            (cap, frame) = frame_for_vaddr(arch, root, base, PAGE_SIZE)
            assert frame is not None, 'Failed to find frame for symbol at %x (CAmkES bug?)' % base
            frame.set_fill(fill)
Esempio n. 9
0
 def apply_capdl_filters(renderoptions):
     # Derive a set of usable ELF objects from the filenames we were passed.
     render_state = renderoptions.render_state
     elfs = {}
     for e in options.elf:
         try:
             name = os.path.basename(e)
             if name in elfs:
                 raise Exception(
                     'duplicate ELF files of name \'%s\' encountered' %
                     name)
             elf = ELF(e, name, options.architecture)
             group = name.replace("_group_bin", "")
             # Avoid inferring a TCB as we've already created our own.
             elf_spec = elf.get_spec(
                 infer_tcb=False,
                 infer_asid=False,
                 pd=render_state.pds[group],
                 use_large_frames=options.largeframe,
                 addr_space=render_state.addr_spaces[group])
             render_state.obj_space.merge(elf_spec, label=group)
             elfs[name] = (e, elf)
         except Exception as inst:
             die('While opening \'%s\': %s' % (e, inst))
     for space in render_state.cspaces.values():
         for (slot, tcb) in [
             (v, v.referent) for (k, v) in space.cnode.slots.items()
                 if v is not None and isinstance(v.referent, TCB)
         ]:
             elf = elfs.get(tcb.elf)
             funcs = {"get_vaddr": lambda x: elf[1].get_symbol_vaddr(x)}
             tcb.ip = simple_eval(str(tcb.ip), functions=funcs)
             tcb.sp = simple_eval(str(tcb.sp), functions=funcs)
             tcb.addr = simple_eval(str(tcb.addr), functions=funcs)
             if not options.fprovide_tcb_caps:
                 del space.cnode[slot]
         space.cnode.finalise_size(
             arch=lookup_architecture(options.architecture))
Esempio n. 10
0
def main(argv, out, err):

    # We need a UTF-8 locale, so bail out if we don't have one. More
    # specifically, things like the version() computation traverse the file
    # system and, if they hit a UTF-8 filename, they try to decode it into your
    # preferred encoding and trigger an exception.
    encoding = locale.getpreferredencoding().lower()
    if encoding not in ('utf-8', 'utf8'):
        err.write('CAmkES uses UTF-8 encoding, but your locale\'s preferred '
                  'encoding is %s. You can override your locale with the LANG '
                  'environment variable.\n' % encoding)
        return -1

    options = parse_args(argv, out, err)

    # Ensure we were supplied equal items and outfiles
    if len(options.outfile) != len(options.item):
        err.write(
            'Different number of items and outfiles. Required one outfile location '
            'per item requested.\n')
        return -1

    # No duplicates in items or outfiles
    if len(set(options.item)) != len(options.item):
        err.write('Duplicate items requested through --item.\n')
        return -1
    if len(set(options.outfile)) != len(options.outfile):
        err.write('Duplicate outfiles requrested through --outfile.\n')
        return -1

    # Save us having to pass debugging everywhere.
    die = functools.partial(_die, options)

    log.set_verbosity(options.verbosity)

    cwd = os.getcwd()

    # Build a list of item/outfile pairs that we have yet to match and process
    all_items = set(zip(options.item, options.outfile))
    done_items = set([])

    # Construct the compilation caches if requested.
    cachea = None
    cacheb = None
    if options.cache:

        # Construct a modified version of the command line arguments that we'll
        # use in the keys to the caches. Essentially we elide --outfile and its
        # parameter under the assumption that this value is never used in code
        # generation. The purpose of this is to allow us to successfully cache
        # ancillary outputs that we generate along the way to the current
        # output. If we were to include --outfile in the key, future attempts
        # to generate these ancillary outputs would unnecessarily miss the
        # entries generated by this execution.
        args = []
        skip = False
        for index, arg in enumerate(argv[1:]):
            if skip:
                skip = False
                continue
            if arg in ('--outfile', '-O'):
                skip = True
                continue
            args.append(arg)

        cachea = LevelACache(
            os.path.join(options.cache_dir, version(), 'cachea'))
        cacheb = LevelBCache(
            os.path.join(options.cache_dir, version(), 'cacheb'))

    def done(s, file, item):
        ret = 0
        if s:
            file.write(s)
            file.close()
        if cachea is not None:
            try:
                cachea.flush()
            except sqlite3.OperationalError as e:
                # The following suppresses two spurious errors:
                #  1. The database is locked. In a large, parallel build, writes
                #     to the level A cache are heavily contended and this error
                #     can occur.
                #  2. The database structure is unexpected. If the CAmkES
                #     sources have changed *while* the runner was executing,
                #     the level A cache can be looking in a different place to
                #     where the cache was created.
                # Both of these are non-critical (will just result in a
                # potential future cache miss) so there's no need to alarm the
                # user.
                if re.search(r'database is locked', str(e)) is not None or \
                   re.search(r'no such table', str(e)) is not None:
                    log.debug('failed to flush level A cache: %s' % str(e))
                else:
                    raise
        if cacheb is not None:
            try:
                cacheb.flush()
            except sqlite3.OperationalError as e:
                # As above for the level B cache.
                if re.search(r'database is locked', str(e)):
                    log.debug('failed to flush level B cache: %s' % str(e))
                else:
                    raise

        done_items.add((item, file))
        if len(all_items - done_items) == 0:
            sys.exit(ret)

    # Try to find this output in the level A cache if possible. This check will
    # 'hit' if the source files representing the input spec are identical to
    # some previously observed execution.
    if cachea is not None:
        assert 'args' in locals()
        assert len(options.outfile) == 1, 'level A cache only supported when requestiong ' \
            'single items'
        output = cachea.load(args, cwd)
        if output is not None:
            log.debug('Retrieved %(platform)s/%(item)s from level A cache' %
                      options.__dict__)
            done(output, options.outfile[0], options.item[0])

    filename = os.path.abspath(options.file.name)

    try:
        # Build the parser options
        parse_options = ParserOptions(options.cpp, options.cpp_flag,
                                      options.import_path, options.verbosity,
                                      options.allow_forward_references)
        ast, read = parse_file_cached(filename,
                                      options.data_structure_cache_dir,
                                      parse_options)
    except (ASTError, ParseError) as e:
        die(e.args)

    # Locate the assembly.
    assembly = ast.assembly
    if assembly is None:
        die('No assembly found')

    # Do some extra checks if the user asked for verbose output.
    if options.verbosity >= 2:

        # Try to catch type mismatches in attribute settings. Note that it is
        # not possible to conclusively evaluate type correctness because the
        # attributes' type system is (deliberately) too loose. That is, the
        # type of an attribute can be an uninterpreted C type the user will
        # provide post hoc.
        for i in assembly.composition.instances:
            for a in i.type.attributes:
                value = assembly.configuration[i.name].get(a.name)
                if value is not None:
                    if a.type == 'string' and not \
                            isinstance(value, six.string_types):
                        log.warning('attribute %s.%s has type string but is '
                                    'set to a value that is not a string' %
                                    (i.name, a.name))
                    elif a.type == 'int' and not \
                            isinstance(value, numbers.Number):
                        log.warning('attribute %s.%s has type int but is set '
                                    'to a value that is not an integer' %
                                    (i.name, a.name))

    obj_space = ObjectAllocator()
    obj_space.spec.arch = options.architecture
    cspaces = {}
    pds = {}
    conf = assembly.configuration
    shmem = collections.defaultdict(ShmemFactory())
    kept_symbols = {}
    fill_frames = {}

    templates = Templates(options.platform)
    [templates.add_root(t) for t in options.templates]
    try:
        r = Renderer(templates, options.cache, options.cache_dir)
    except jinja2.exceptions.TemplateSyntaxError as e:
        die('template syntax error: %s' % e)

    # The user may have provided their own connector definitions (with
    # associated) templates, in which case they won't be in the built-in lookup
    # dictionary. Let's add them now. Note, definitions here that conflict with
    # existing lookup entries will overwrite the existing entries. Note that
    # the extra check that the connector has some templates is just an
    # optimisation; the templates module handles connectors without templates
    # just fine.
    extra_templates = set()
    for c in (x for x in ast.items if isinstance(x, Connector) and (
            x.from_template is not None or x.to_template is not None)):
        try:
            # Find a connection that uses this type.
            connection = next(x for x in ast
                              if isinstance(x, Connection) and x.type == c)
            # Add the custom templates and update our collection of read
            # inputs. It is necessary to update the read set here to avoid
            # false compilation cache hits when the source of a custom template
            # has changed.
            extra_templates |= templates.add(c, connection)
        except TemplateError as e:
            die('while adding connector %s: %s' % (c.name, e))
        except StopIteration:
            # No connections use this type. There's no point adding it to the
            # template lookup dictionary.
            pass

    # Check if our current target is in the level B cache. The level A cache
    # will 'miss' and this one will 'hit' when the input spec is identical to
    # some previously observed execution modulo a semantically irrelevant
    # element (e.g. an introduced comment).
    ast_hash = None
    if cacheb is not None:
        ast_hash = level_b_prime(ast)
        assert 'args' in locals()
        assert len(options.item) == 1, 'level B cache only supported when requesting ' \
            'single items'
        output = cacheb.load(ast_hash, args,
                             set(options.elf) | extra_templates)
        if output is not None:
            log.debug('Retrieved %(platform)s/%(item)s from level B cache' %
                      options.__dict__)
            done(output, options.outfile[0], options.item[0])

    # Add custom templates.
    read |= extra_templates

    # Add the CAmkES sources themselves to the accumulated list of inputs.
    read |= set(path for path, _ in sources())

    # Add any ELF files we were passed as inputs.
    read |= set(options.elf)

    # Write a Makefile dependency rule if requested.
    if options.makefile_dependencies is not None:
        options.makefile_dependencies.write(
            '%s: \\\n  %s\n' % (filename, ' \\\n  '.join(sorted(read))))

    # If we have a cache, allow outputs to be saved to it.
    if options.cache:

        assert cachea is not None, 'level A cache not available, though the ' \
            'cache is enabled (bug in runner?)'
        # The logic of this cache currently only works when a single item is requested
        # on the command line
        assert len(options.item) == 1, 'level A cache only supported when requesting ' \
            'single items'

        # Calculate the input files to the level A cache.
        inputs = level_a_prime(read)

        # Work out the position of the --item argument in the command line
        # parameters. We will use this to cache not only outputs for this
        # execution, but also outputs for ones with a different target.
        item_index = None
        assert 'args' in locals()
        for index, arg in enumerate(args[:-1]):
            if arg in ('--item', '-T'):
                item_index = index + 1
                break
        assert item_index is not None, 'failed to find required argument ' \
            '--item (bug in runner?)'

        # We should already have the necessary inputs for the level B cache.
        assert cacheb is not None, 'level B cache not available, though the ' \
            'cache is enabled (bug in runner?)'
        assert ast_hash is not None, 'AST hash not pre-computed (bug in ' \
            'runner?)'

        def save(item, value):
            # Juggle the command line arguments to cache the predicted
            # arguments for a call that would generate this item.
            new_args = args[:item_index] + [item] + args[item_index + 1:]

            # Save entries in both caches.
            cachea.save(new_args, cwd, value, inputs)
            if item != 'Makefile' and item != 'camkes-gen.cmake':
                # We avoid caching the generated Makefile because it is not
                # safe. The inputs to generation of the Makefile are not only
                # the AST, but also the file names (`inputs`). If we cache it in
                # the level B cache we risk the following scenario:
                #
                #   1. Generate the Makefile, caching it in the level B cache;
                #   2. Modify the spec to import a file containing only white
                #      space and/or comments; then
                #   3. Generate the Makefile, missing the level A cache, but
                #      hitting the level B cache.
                #
                # At this point, the generated Makefile is incorrect because it
                # does not capture any dependencies on the imported file. We can
                # now introduce something semantically relevant into this file
                # (e.g. an Assembly block) and it will not be seen by the build
                # system.
                cacheb.save(ast_hash, new_args,
                            set(options.elf) | extra_templates, value)
    else:

        def save(item, value):
            pass

    def apply_capdl_filters():
        # Derive a set of usable ELF objects from the filenames we were passed.
        elfs = {}
        for e in options.elf:
            try:
                name = os.path.basename(e)
                if name in elfs:
                    raise Exception(
                        'duplicate ELF files of name \'%s\' encountered' %
                        name)
                elf = ELF(e, name, options.architecture)
                p = Perspective(phase=RUNNER, elf_name=name)
                group = p['group']
                # Avoid inferring a TCB as we've already created our own.
                elf_spec = elf.get_spec(infer_tcb=False,
                                        infer_asid=False,
                                        pd=pds[group],
                                        use_large_frames=options.largeframe)
                obj_space.merge(elf_spec, label=group)
                elfs[name] = (e, elf)
            except Exception as inst:
                die('While opening \'%s\': %s' % (e, inst))

        # It's only relevant to run these filters if the final target is CapDL.
        # Note, this will no longer be true if we add any other templates that
        # depend on a fully formed CapDL spec. Guarding this loop with an if
        # is just an optimisation and the conditional can be removed if
        # desired.
        filteroptions = FilterOptions(
            options.architecture, options.realtime, options.largeframe,
            options.largeframe_dma, options.default_priority,
            options.default_max_priority, options.default_criticality,
            options.default_max_criticality, options.default_affinity,
            options.default_period, options.default_budget,
            options.default_data, options.default_size_bits,
            options.debug_fault_handlers, options.fprovide_tcb_caps)
        for f in CAPDL_FILTERS:
            try:
                # Pass everything as named arguments to allow filters to
                # easily ignore what they don't want.
                f(ast=ast,
                  obj_space=obj_space,
                  cspaces=cspaces,
                  elfs=elfs,
                  options=filteroptions,
                  shmem=shmem,
                  fill_frames=fill_frames)
            except Exception as inst:
                die('While forming CapDL spec: %s' % inst)

    renderoptions = RenderOptions(
        options.file, options.verbosity, options.frpc_lock_elision,
        options.fspecialise_syscall_stubs, options.fprovide_tcb_caps,
        options.fsupport_init, options.largeframe, options.largeframe_dma,
        options.architecture, options.debug_fault_handlers, options.realtime)

    def instantiate_misc_template():
        for (item, outfile) in (all_items - done_items):
            try:
                template = templates.lookup(item)
                if template:
                    g = r.render(assembly,
                                 assembly,
                                 template,
                                 obj_space,
                                 None,
                                 shmem,
                                 kept_symbols,
                                 fill_frames,
                                 imported=read,
                                 options=renderoptions)
                    save(item, g)
                    done(g, outfile, item)
            except TemplateError as inst:
                die([
                    'While rendering %s: %s' % (item, line)
                    for line in inst.args
                ])

    if options.item[0] in ('capdl', 'label-mapping') and options.data_structure_cache_dir is not None \
            and len(options.outfile) == 1:
        # It's possible that data structures required to instantiate the capdl spec
        # were saved during a previous invocation of this script in the current build.
        cache_path = os.path.realpath(options.data_structure_cache_dir)
        pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE)

        if os.path.isfile(pickle_path):
            with open(pickle_path, 'rb') as pickle_file:
                # Found a cached version of the necessary data structures
                obj_space, shmem, cspaces, pds, kept_symbols, fill_frames = pickle.load(
                    pickle_file)
                apply_capdl_filters()
                instantiate_misc_template()

                # If a template wasn't instantiated, something went wrong, and we can't recover
                raise CAmkESError(
                    "No template instantiated on capdl generation fastpath")

    # We're now ready to instantiate the template the user requested, but there
    # are a few wrinkles in the process. Namely,
    #  1. Template instantiation needs to be done in a deterministic order. The
    #     runner is invoked multiple times and template code needs to be
    #     allocated identical cap slots in each run.
    #  2. Components and connections need to be instantiated before any other
    #     templates, regardless of whether they are the ones we are after. Some
    #     other templates, such as the Makefile depend on the obj_space and
    #     cspaces.
    #  3. All actual code templates, up to the template that was requested,
    #     need to be instantiated. This is related to (1) in that the cap slots
    #     allocated are dependent on what allocations have been done prior to a
    #     given allocation call.

    # Instantiate the per-component source and header files.
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue

        if i.address_space not in cspaces:
            p = Perspective(phase=RUNNER,
                            instance=i.name,
                            group=i.address_space)
            cnode = obj_space.alloc(seL4_CapTableObject,
                                    name=p['cnode'],
                                    label=i.address_space)
            cspaces[i.address_space] = CSpaceAllocator(cnode)
            pd = obj_space.alloc(lookup_architecture(
                options.architecture).vspace().object,
                                 name=p['pd'],
                                 label=i.address_space)
            pds[i.address_space] = pd

        for t in ('%s/source' % i.name, '%s/header' % i.name,
                  '%s/linker' % i.name):
            try:
                template = templates.lookup(t, i)
                g = ''
                if template:
                    g = r.render(i,
                                 assembly,
                                 template,
                                 obj_space,
                                 cspaces[i.address_space],
                                 shmem,
                                 kept_symbols,
                                 fill_frames,
                                 options=renderoptions,
                                 my_pd=pds[i.address_space])
                save(t, g)
                for (item, outfile) in (all_items - done_items):
                    if item == t:
                        if not template:
                            log.warning('Warning: no template for %s' % item)
                        done(g, outfile, item)
                        break
            except TemplateError as inst:
                die([
                    'While rendering %s: %s' % (i.name, line)
                    for line in inst.args
                ])

    # Instantiate the per-connection files.
    for c in assembly.composition.connections:

        for t in (('%s/from/source' % c.name,
                   c.from_ends), ('%s/from/header' % c.name, c.from_ends),
                  ('%s/to/source' % c.name,
                   c.to_ends), ('%s/to/header' % c.name, c.to_ends)):

            template = templates.lookup(t[0], c)

            if template is not None:
                for id, e in enumerate(t[1]):
                    item = '%s/%d' % (t[0], id)
                    g = ''
                    try:
                        g = r.render(e,
                                     assembly,
                                     template,
                                     obj_space,
                                     cspaces[e.instance.address_space],
                                     shmem,
                                     kept_symbols,
                                     fill_frames,
                                     options=renderoptions,
                                     my_pd=pds[e.instance.address_space])
                    except TemplateError as inst:
                        die([
                            'While rendering %s: %s' % (item, line)
                            for line in inst.args
                        ])
                    except jinja2.exceptions.TemplateNotFound:
                        die('While rendering %s: missing template for %s' %
                            (item, c.type.name))
                    save(item, g)
                    for (target, outfile) in (all_items - done_items):
                        if target == item:
                            if not template:
                                log.warning('Warning: no template for %s' %
                                            item)
                            done(g, outfile, item)
                            break

        # The following block handles instantiations of per-connection
        # templates that are neither a 'source' or a 'header', as handled
        # above. We assume that none of these need instantiation unless we are
        # actually currently looking for them (== options.item). That is, we
        # assume that following templates, like the CapDL spec, do not require
        # these templates to be rendered prior to themselves.
        # FIXME: This is a pretty ugly way of handling this. It would be nicer
        # for the runner to have a more general notion of per-'thing' templates
        # where the per-component templates, the per-connection template loop
        # above, and this loop could all be done in a single unified control
        # flow.
        for (item, outfile) in (all_items - done_items):
            for t in (('%s/from/' % c.name, c.from_ends), ('%s/to/' % c.name,
                                                           c.to_ends)):

                if not item.startswith(t[0]):
                    # This is not the item we're looking for.
                    continue

                # If we've reached here then this is the exact item we're after.
                template = templates.lookup(item, c)
                if template is None:
                    die('no registered template for %s' % item)

                for e in t[1]:
                    try:
                        g = r.render(e,
                                     assembly,
                                     template,
                                     obj_space,
                                     cspaces[e.instance.address_space],
                                     shmem,
                                     kept_symbols,
                                     fill_frames,
                                     options=renderoptions,
                                     my_pd=pds[e.instance.address_space])
                        save(item, g)
                        done(g, outfile, item)
                    except TemplateError as inst:
                        die([
                            'While rendering %s: %s' % (item, line)
                            for line in inst.args
                        ])

    # Perform any per component special generation. This needs to happen last
    # as these template needs to run after all other capabilities have been
    # allocated
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue
        assert i.address_space in cspaces
        SPECIAL_TEMPLATES = [('debug', 'debug'), ('simple', 'simple'),
                             ('rump_config', 'rumprun')]
        for special in [
                bl for bl in SPECIAL_TEMPLATES if conf[i.name].get(bl[0])
        ]:
            for t in ('%s/%s' % (i.name, special[1]), ):
                try:
                    template = templates.lookup(t, i)
                    g = ''
                    if template:
                        g = r.render(i,
                                     assembly,
                                     template,
                                     obj_space,
                                     cspaces[i.address_space],
                                     shmem,
                                     kept_symbols,
                                     fill_frames,
                                     options=renderoptions,
                                     my_pd=pds[i.address_space])
                    save(t, g)
                    for (item, outfile) in (all_items - done_items):
                        if item == t:
                            if not template:
                                log.warning('Warning: no template for %s' %
                                            item)
                            done(g, outfile, item)
                except TemplateError as inst:
                    die([
                        'While rendering %s: %s' % (i.name, line)
                        for line in inst.args
                    ])

    if options.data_structure_cache_dir is not None:
        # At this point the capdl database is in the state required for applying capdl
        # filters and generating the capdl spec. In case the capdl spec isn't the current
        # target, we pickle the database here, so when the capdl spec is built, these
        # data structures don't need to be regenerated.
        cache_path = os.path.realpath(options.data_structure_cache_dir)
        pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE)
        with open(pickle_path, 'wb') as pickle_file:
            pickle.dump(
                (obj_space, shmem, cspaces, pds, kept_symbols, fill_frames),
                pickle_file)

    for (item, outfile) in (all_items - done_items):
        if item in ('capdl', 'label-mapping'):
            apply_capdl_filters()

    # Instantiate any other, miscellaneous template. If we've reached this
    # point, we know the user did not request a code template.
    instantiate_misc_template()

    # Check if there are any remaining items
    not_done = all_items - done_items
    if len(not_done) > 0:
        for (item, outfile) in not_done:
            err.write('No valid element matching --item %s.\n' % item)
        return -1
    return 0
Esempio n. 11
0
 def capdl_elf_vspace(context, elf_name, cap_symbol):
     pd_type = lookup_architecture("x86_64").vspace().object
     return TutorialFunctions.capdl_alloc_cap(context, pd_type, "vspace_%s" % elf_name, cap_symbol)
Esempio n. 12
0
def get_word_size(arch):
    return int(lookup_architecture(arch).word_size_bits()/8)
Esempio n. 13
0
def main(argv, out, err):

    # We need a UTF-8 locale, so bail out if we don't have one. More
    # specifically, things like the version() computation traverse the file
    # system and, if they hit a UTF-8 filename, they try to decode it into your
    # preferred encoding and trigger an exception.
    encoding = locale.getpreferredencoding().lower()
    if encoding not in ('utf-8', 'utf8'):
        err.write('CAmkES uses UTF-8 encoding, but your locale\'s preferred '
                  'encoding is %s. You can override your locale with the LANG '
                  'environment variable.\n' % encoding)
        return -1

    options = parse_args(argv, out, err)

    # register object sizes with loader
    if options.object_sizes:
        register_object_sizes(
            yaml.load(options.object_sizes, Loader=yaml.FullLoader))

    # Ensure we were supplied equal items and outfiles
    if len(options.outfile) != len(options.item):
        err.write(
            'Different number of items and outfiles. Required one outfile location '
            'per item requested.\n')
        return -1

    # No duplicates in items or outfiles
    if len(set(options.item)) != len(options.item):
        err.write('Duplicate items requested through --item.\n')
        return -1
    if len(set(options.outfile)) != len(options.outfile):
        err.write('Duplicate outfiles requrested through --outfile.\n')
        return -1

    # Save us having to pass debugging everywhere.
    die = functools.partial(_die, options)

    log.set_verbosity(options.verbosity)

    ast = pickle.load(options.load_ast)

    # Locate the assembly.
    assembly = ast.assembly
    if assembly is None:
        die('No assembly found')

    # Do some extra checks if the user asked for verbose output.
    if options.verbosity >= 2:

        # Try to catch type mismatches in attribute settings. Note that it is
        # not possible to conclusively evaluate type correctness because the
        # attributes' type system is (deliberately) too loose. That is, the
        # type of an attribute can be an uninterpreted C type the user will
        # provide post hoc.
        for i in assembly.composition.instances:
            for a in i.type.attributes:
                value = assembly.configuration[i.name].get(a.name)
                if value is not None:
                    if a.type == 'string' and not \
                            isinstance(value, six.string_types):
                        log.warning('attribute %s.%s has type string but is '
                                    'set to a value that is not a string' %
                                    (i.name, a.name))
                    elif a.type == 'int' and not \
                            isinstance(value, numbers.Number):
                        log.warning('attribute %s.%s has type int but is set '
                                    'to a value that is not an integer' %
                                    (i.name, a.name))
    templates = Templates(options.platform)
    [templates.add_root(t) for t in options.templates]
    try:
        r = Renderer(templates)
    except jinja2.exceptions.TemplateSyntaxError as e:
        die('template syntax error: %s' % e)

    # The user may have provided their own connector definitions (with
    # associated) templates, in which case they won't be in the built-in lookup
    # dictionary. Let's add them now. Note, definitions here that conflict with
    # existing lookup entries will overwrite the existing entries. Note that
    # the extra check that the connector has some templates is just an
    # optimisation; the templates module handles connectors without templates
    # just fine.
    for c in (x for x in ast.items if isinstance(x, Connector) and (
            x.from_template is not None or x.to_template is not None)):
        try:
            # Find a connection that uses this type.
            connection = next(x for x in ast
                              if isinstance(x, Connection) and x.type == c)
            # Add the custom templates and update our collection of read
            # inputs.
            templates.add(c, connection)
        except TemplateError as e:
            die('while adding connector %s: %s' % (c.name, e))
        except StopIteration:
            # No connections use this type. There's no point adding it to the
            # template lookup dictionary.
            pass

    if options.load_object_state is not None:
        render_state = pickle.load(options.load_object_state)

    else:
        obj_space = ObjectAllocator()
        obj_space.spec.arch = options.architecture
        render_state = AllocatorState(obj_space=obj_space)

        for i in assembly.composition.instances:
            # Don't generate any code for hardware components.
            if i.type.hardware:
                continue

            key = i.address_space

            if key not in render_state.cspaces:
                cnode = render_state.obj_space.alloc(
                    ObjectType.seL4_CapTableObject,
                    name="%s_cnode" % key,
                    label=key)
                render_state.cspaces[key] = CSpaceAllocator(cnode)
                pd = obj_space.alloc(lookup_architecture(
                    options.architecture).vspace().object,
                                     name="%s_group_bin_pd" % key,
                                     label=key)
                addr_space = AddressSpaceAllocator(
                    re.sub(r'[^A-Za-z0-9]', '_', "%s_group_bin" % key), pd)
                render_state.pds[key] = pd
                render_state.addr_spaces[key] = addr_space

    for (item, outfile) in zip(options.item, options.outfile):
        key = item.split("/")
        if len(key) is 1:
            # We are rendering something that isn't a component or connection.
            i = assembly
            obj_key = None
            template = templates.lookup(item)
        elif key[1] in [
                "source", "header", "c_environment_source",
                "cakeml_start_source", "cakeml_end_source", "camkesConstants",
                "linker", "debug", "simple", "rump_config"
        ]:
            # We are rendering a component template
            i = [
                x for x in assembly.composition.instances if x.name == key[0]
            ][0]
            obj_key = i.address_space
            template = templates.lookup(item, i)
        elif key[1] in ["from", "to"]:
            # We are rendering a connection template
            c = [
                c for c in assembly.composition.connections if c.name == key[0]
            ][0]
            if key[1] == "to":
                i = c.to_ends[int(key[-1])]
            elif key[1] == "from":
                i = c.from_ends[int(key[-1])]
            else:
                die("Invalid connector end")
            obj_key = i.instance.address_space
            template = templates.lookup("/".join(key[:-1]), c)
        else:
            die("item: \"%s\" does not have the correct formatting to lookup a template."
                % item)
        try:
            g = r.render(i,
                         assembly,
                         template,
                         render_state,
                         obj_key,
                         outfile_name=outfile.name,
                         options=options,
                         my_pd=render_state.pds[obj_key] if obj_key else None)
            outfile.write(g)
            outfile.close()
        except TemplateError as inst:
            die(rendering_error(i.name, inst))

    read = r.get_files_used()
    # Write a Makefile dependency rule if requested.
    if options.makefile_dependencies is not None:
        options.makefile_dependencies.write(
            '%s: \\\n  %s\n' %
            (options.outfile[0].name, ' \\\n  '.join(sorted(read))))

    if options.save_object_state is not None:
        # Write the render_state to the supplied outfile
        pickle.dump(render_state, options.save_object_state)

    sys.exit(0)
Esempio n. 14
0
def set_tcb_info(cspaces, obj_space, elfs, options, **_):
    '''Set relevant extra info for TCB objects.'''
    arch = lookup_architecture(options.architecture)

    for group, space in cspaces.items():
        cnode = space.cnode
        for index, tcb in [(k, v.referent) for (k, v) in cnode.slots.items()
                           if v is not None and isinstance(v.referent, TCB)]:

            perspective = Perspective(group=group, tcb=tcb.name)

            elf_name = perspective['elf_name']

            elf = elfs.get(elf_name)

            if elf is None:
                # We were not passed an ELF file for this CSpace. This will be
                # true in the first pass of the runner where no ELFs are passed.
                continue

            tcb.elf = elf_name
            tcb.ip = get_symbol_vaddr(elf, perspective['entry_symbol'])
            assert tcb.ip != 0, 'entry point \'%s\' of %s appears to be 0x0' \
                % (perspective['entry_symbol'], tcb.name)

            if perspective['pool']:
                # This TCB is part of the (cap allocator's) TCB pool.
                continue

            stack_symbol = perspective['stack_symbol']
            ipc_buffer_symbol = perspective['ipc_buffer_symbol']

            # The stack should be at least three pages and the IPC buffer
            # region should be exactly three pages. Note that these regions
            # both include a guard page either side of the used area. It is
            # assumed that the stack grows downwards.
            stack_size = get_symbol_size(elf, stack_symbol)
            assert stack_size is not None, 'Stack for %(name)s, ' \
                '\'%(symbol)s\', not found' % {
                    'name':tcb.name,
                    'symbol':stack_symbol,
                }
            assert stack_size >= PAGE_SIZE * 3, 'Stack for %(name)s, ' \
                '\'%(symbol)s\', is only %(size)d bytes and does not have ' \
                'room for guard pages' % {
                    'name':tcb.name,
                    'symbol':stack_symbol,
                    'size':stack_size,
                }
            assert get_symbol_size(elf, ipc_buffer_symbol) == PAGE_SIZE * 3

            # Move the stack pointer to the top of the stack. Extra page is
            # to account for the (upper) guard page.
            assert stack_size % PAGE_SIZE == 0
            tcb.sp = get_symbol_vaddr(elf,
                                      stack_symbol) + stack_size - PAGE_SIZE
            tcb.addr = get_symbol_vaddr(elf, ipc_buffer_symbol) + \
                2 * PAGE_SIZE - arch.ipc_buffer_size()

            # Each TCB needs to be passed its 'thread_id' that is the value
            # it branches on in main(). This corresponds to the slot index
            # to a cap to it in the component's CNode.
            tcb.init.append(index)

            if options.realtime:
                sc_name = perspective['sc']
                if sc_name in obj_space:
                    # For non-passive threads, associate the sc with the tcb
                    sc = obj_space[sc_name]
                    tcb['sc_slot'] = Cap(sc)
Esempio n. 15
0
def final_spec(cspaces, obj_space, addr_spaces, elf_files, architecture, so_files):
    """
    Generates a final CapDL spec file that can be given to a capdl loader application
    """

    # cspaces : dict containes all the CSpaceAllocator for this app
    # obj_space : ObjectAllocator for all the objs in the spec
    # addr_spaces : dict containers all the AddressSpaceAllocator for this app
    print("In final_spec function")
    arch = lookup_architecture(architecture)

    # NOTE: handle shared lib so files here
    for e in [item for sublist in so_files for item in sublist]:
        name = os.path.basename(e)
        name = name[3:-3]
        print(name)

        elf = ELF(e, name, architecture)
        cspace = cspaces[name]

        print('the vspace root is ' + str(addr_spaces[name].vspace_root))
        print('The addr space allocator is ' + str(addr_spaces[name]))

        # Avoid inferring a TCB as we've already created our own.
        elf_spec = elf.get_spec(
            infer_tcb=False,
            infer_asid=False,
            pd=addr_spaces[name].vspace_root,
            addr_space=addr_spaces[name]
            )

        obj_space.merge(elf_spec)
        cspace.cnode.finalise_size(arch)

        # Fill in TCB object information.
        # NOTE: There will be a tcb for a shared lib file but handled in root task of capdl
        # loader
        # TODO: This should be generalised with what is in the Camkes filters
        tcb = obj_space["tcb_%s" % name]
        progsymbol = 0
        vsyscall = 0
        tcb.init = [0,0,0,0,2,progsymbol,1,0,0,32,vsyscall,0,0]
        tcb.addr = 0
        tcb.sp = 0
        tcb.ip = 0


    for e in [item for sublist in elf_files for item in sublist]:
        name = os.path.basename(e)
        print(name)

        # path, name, arch
        elf = ELF(e, name, architecture)

        # find the cspace for current elf
        cspace = cspaces[name]

        print('the vspace root is ' + str(addr_spaces[name].vspace_root))
        print('The addr space allocator is ' + str(addr_spaces[name]))

        # Avoid inferring a TCB as we've already created our own.
        elf_spec = elf.get_spec(
            infer_tcb=False,
            infer_asid=False,
            pd=addr_spaces[name].vspace_root,
            addr_space=addr_spaces[name]
            )
        obj_space.merge(elf_spec)
        cspace.cnode.finalise_size(arch)

        # Fill in TCB object information.
        # TODO: This should be generalised with what is in the Camkes filters
        tcb = obj_space["tcb_%s" % name]
        progsymbol = elf.get_symbol_vaddr("progname")
        vsyscall = elf.get_symbol_vaddr("sel4_vsyscall")
        tcb.init = [0,0,0,0,2,progsymbol,1,0,0,32,vsyscall,0,0]
        tcb.addr = elf.get_symbol_vaddr("mainIpcBuffer");
        tcb.sp = elf.get_symbol_vaddr("stack")+elf.get_symbol_size("stack");
        tcb.ip = elf.get_entry_point()

    return obj_space
Esempio n. 16
0
def guard_pages(obj_space, cspaces, elfs, options, **_):
    '''Introduce a guard page around each stack and IPC buffer. Note that the
    templates should have ensured a three page region for each stack in order to
    enable this.'''

    arch = lookup_architecture(options.architecture)
    for group, space in cspaces.items():
        cnode = space.cnode
        for index, tcb in [(k, v.referent) for (k, v) in cnode.slots.items()
                           if v is not None and isinstance(v.referent, TCB)]:

            perspective = Perspective(group=group, tcb=tcb.name)

            if perspective['pool']:
                # This TCB is part of the (cap allocator's) TCB pool.
                continue

            elf_name = perspective['elf_name']

            # Find the page directory.
            pd = None
            pd_name = perspective['pd']
            pds = [x for x in obj_space.spec.objs if x.name == pd_name]
            if len(pds) > 1:
                raise Exception('Multiple PDs found for group %s' % group)
            elif len(pds) == 1:
                pd, = pds
                tcb['vspace'] = Cap(pd)
            # If no PD was found we were probably just not passed any ELF files
            # in this pass.

            elf = elfs.get(elf_name)

            if pd and elf:

                ipc_symbol = perspective['ipc_buffer_symbol']

                # Find the IPC buffer's preceding guard page's virtual address.
                assert get_symbol_size(elf, ipc_symbol) == PAGE_SIZE * 3
                pre_guard = get_symbol_vaddr(elf, ipc_symbol)

                (cap, frame) = frame_for_vaddr(arch, pd, pre_guard, PAGE_SIZE)
                if frame is None:
                    raise Exception(
                        'IPC buffer region of TCB %s in '
                        'group %s does not appear to be backed by a frame' %
                        (tcb.name, group))

                # Delete the page.
                obj_space.remove(frame)
                update_frame_in_vaddr(arch, pd, pre_guard, PAGE_SIZE, None)

                # Now do the same for the following guard page. We do this
                # calculation separately just in case the region crosses a PT
                # boundary and the two guard pages are in separate PTs.

                post_guard = pre_guard + 2 * PAGE_SIZE

                (cap, frame) = frame_for_vaddr(arch, pd, post_guard, PAGE_SIZE)
                if frame is None:
                    raise Exception(
                        'IPC buffer region of TCB %s in '
                        'group %s does not appear to be backed by a frame' %
                        (tcb.name, group))

                obj_space.remove(frame)
                update_frame_in_vaddr(arch, pd, post_guard, PAGE_SIZE, None)

                # Now we do the same thing for the preceding guard page of the
                # thread's stack...

                stack_symbol = perspective['stack_symbol']

                pre_guard = get_symbol_vaddr(elf, stack_symbol)

                (cap, frame) = frame_for_vaddr(arch, pd, pre_guard, PAGE_SIZE)
                if frame is None:
                    raise Exception(
                        'stack region of TCB %s in '
                        'group %s does not appear to be backed by a frame' %
                        (tcb.name, group))

                obj_space.remove(frame)
                update_frame_in_vaddr(arch, pd, pre_guard, PAGE_SIZE, None)

                # ...and the following guard page.

                stack_region_size = get_symbol_size(elf, stack_symbol)
                assert stack_region_size % PAGE_SIZE == 0, \
                    'stack region is not page-aligned'
                assert stack_region_size >= 3 * PAGE_SIZE, \
                    'stack region has no room for guard pages'
                post_guard = pre_guard + stack_region_size - PAGE_SIZE

                (cap, frame) = frame_for_vaddr(arch, pd, post_guard, PAGE_SIZE)
                if frame is None:
                    raise Exception(
                        'stack region of TCB %s in '
                        'group %s does not appear to be backed by a frame' %
                        (tcb.name, group))

                obj_space.remove(frame)
                update_frame_in_vaddr(arch, pd, post_guard, PAGE_SIZE, None)
Esempio n. 17
0
def main(argv, out, err):

    # We need a UTF-8 locale, so bail out if we don't have one. More
    # specifically, things like the version() computation traverse the file
    # system and, if they hit a UTF-8 filename, they try to decode it into your
    # preferred encoding and trigger an exception.
    encoding = locale.getpreferredencoding().lower()
    if encoding not in ('utf-8', 'utf8'):
        err.write('CAmkES uses UTF-8 encoding, but your locale\'s preferred '
                  'encoding is %s. You can override your locale with the LANG '
                  'environment variable.\n' % encoding)
        return -1

    options, queries, filteroptions = parse_args(argv, out, err)

    # Ensure we were supplied equal items and outfiles
    if len(options.outfile) != len(options.item):
        err.write(
            'Different number of items and outfiles. Required one outfile location '
            'per item requested.\n')
        return -1

    # No duplicates in items or outfiles
    if len(set(options.item)) != len(options.item):
        err.write('Duplicate items requested through --item.\n')
        return -1
    if len(set(options.outfile)) != len(options.outfile):
        err.write('Duplicate outfiles requrested through --outfile.\n')
        return -1

    # Save us having to pass debugging everywhere.
    die = functools.partial(_die, options)

    log.set_verbosity(options.verbosity)

    cwd = os.getcwd()

    # Build a list of item/outfile pairs that we have yet to match and process
    all_items = set(zip(options.item, options.outfile))
    done_items = set([])

    def done(s, file, item):
        ret = 0
        if s:
            file.write(s)
            file.close()

        done_items.add((item, file))
        if len(all_items - done_items) == 0:
            if options.save_object_state is not None:
                # Write the render_state to the supplied outfile
                pickle.dump(renderoptions.render_state,
                            options.save_object_state)

            sys.exit(ret)

    filename = None
    if options.file is not None:
        filename = os.path.abspath(options.file.name)

    try:
        # Build the parser options
        parse_options = ParserOptions(options.cpp, options.cpp_flag,
                                      options.import_path, options.verbosity,
                                      options.allow_forward_references,
                                      options.save_ast, options.load_ast,
                                      queries)
        ast, read = parse_file_cached(filename, parse_options)
    except (ASTError, ParseError) as e:
        die(e.args)

    # Locate the assembly.
    assembly = ast.assembly
    if assembly is None:
        die('No assembly found')

    # Do some extra checks if the user asked for verbose output.
    if options.verbosity >= 2:

        # Try to catch type mismatches in attribute settings. Note that it is
        # not possible to conclusively evaluate type correctness because the
        # attributes' type system is (deliberately) too loose. That is, the
        # type of an attribute can be an uninterpreted C type the user will
        # provide post hoc.
        for i in assembly.composition.instances:
            for a in i.type.attributes:
                value = assembly.configuration[i.name].get(a.name)
                if value is not None:
                    if a.type == 'string' and not \
                            isinstance(value, six.string_types):
                        log.warning('attribute %s.%s has type string but is '
                                    'set to a value that is not a string' %
                                    (i.name, a.name))
                    elif a.type == 'int' and not \
                            isinstance(value, numbers.Number):
                        log.warning('attribute %s.%s has type int but is set '
                                    'to a value that is not an integer' %
                                    (i.name, a.name))
    obj_space = ObjectAllocator()
    obj_space.spec.arch = options.architecture
    render_state = RenderState(obj_space=obj_space)

    templates = Templates(options.platform)
    [templates.add_root(t) for t in options.templates]
    try:
        r = Renderer(templates, options.cache, options.cache_dir)
    except jinja2.exceptions.TemplateSyntaxError as e:
        die('template syntax error: %s' % e)

    # The user may have provided their own connector definitions (with
    # associated) templates, in which case they won't be in the built-in lookup
    # dictionary. Let's add them now. Note, definitions here that conflict with
    # existing lookup entries will overwrite the existing entries. Note that
    # the extra check that the connector has some templates is just an
    # optimisation; the templates module handles connectors without templates
    # just fine.
    extra_templates = set()
    for c in (x for x in ast.items if isinstance(x, Connector) and (
            x.from_template is not None or x.to_template is not None)):
        try:
            # Find a connection that uses this type.
            connection = next(x for x in ast
                              if isinstance(x, Connection) and x.type == c)
            # Add the custom templates and update our collection of read
            # inputs. It is necessary to update the read set here to avoid
            # false compilation cache hits when the source of a custom template
            # has changed.
            extra_templates |= templates.add(c, connection)
        except TemplateError as e:
            die('while adding connector %s: %s' % (c.name, e))
        except StopIteration:
            # No connections use this type. There's no point adding it to the
            # template lookup dictionary.
            pass

    # Check if our current target is in the level B cache. The level A cache
    # will 'miss' and this one will 'hit' when the input spec is identical to
    # some previously observed execution modulo a semantically irrelevant
    # element (e.g. an introduced comment).
    ast_hash = None

    # Add custom templates.
    read |= extra_templates

    # Add the CAmkES sources themselves to the accumulated list of inputs.
    read |= set(path for path, _ in sources())

    # Add any ELF files we were passed as inputs.
    read |= set(options.elf)

    # Write a Makefile dependency rule if requested.
    if filename and options.makefile_dependencies is not None:
        options.makefile_dependencies.write(
            '%s: \\\n  %s\n' % (filename, ' \\\n  '.join(sorted(read))))

    def apply_capdl_filters(renderoptions):
        # Derive a set of usable ELF objects from the filenames we were passed.
        render_state = renderoptions.render_state
        elfs = {}
        for e in options.elf:
            try:
                name = os.path.basename(e)
                if name in elfs:
                    raise Exception(
                        'duplicate ELF files of name \'%s\' encountered' %
                        name)
                elf = ELF(e, name, options.architecture)
                p = Perspective(phase=RUNNER, elf_name=name)
                group = p['group']
                # Avoid inferring a TCB as we've already created our own.
                elf_spec = elf.get_spec(
                    infer_tcb=False,
                    infer_asid=False,
                    pd=render_state.pds[group],
                    use_large_frames=options.largeframe,
                    addr_space=render_state.addr_spaces[group])
                render_state.obj_space.merge(elf_spec, label=group)
                elfs[name] = (e, elf)
            except Exception as inst:
                die('While opening \'%s\': %s' % (e, inst))

        for f in CAPDL_FILTERS:
            try:
                # Pass everything as named arguments to allow filters to
                # easily ignore what they don't want.
                f(ast=ast,
                  obj_space=render_state.obj_space,
                  cspaces=render_state.cspaces,
                  elfs=elfs,
                  options=filteroptions)
            except Exception as inst:
                die('While forming CapDL spec: %s' % inst)

    renderoptions = RenderOptions(
        options.file, options.verbosity, options.frpc_lock_elision,
        options.fspecialise_syscall_stubs, options.fprovide_tcb_caps,
        options.fsupport_init, options.largeframe, options.largeframe_dma,
        options.architecture, options.debug_fault_handlers,
        options.default_stack_size, options.realtime,
        options.verification_base_name, filteroptions, render_state)

    def instantiate_misc_templates(renderoptions):
        for (item, outfile) in (all_items - done_items):
            try:
                template = templates.lookup(item)
                if template:
                    g = r.render(assembly,
                                 assembly,
                                 template,
                                 renderoptions.render_state,
                                 None,
                                 outfile_name=outfile.name,
                                 imported=read,
                                 options=renderoptions)
                    done(g, outfile, item)
            except TemplateError as inst:
                die(rendering_error(item, inst))

    if "camkes-gen.cmake" in options.item:
        instantiate_misc_templates(renderoptions)

    if options.load_object_state is not None:
        # There is an assumption that if load_object_state is set, we
        # skip all of the component and connector logic below.
        # FIXME: refactor to clarify control flow
        renderoptions.render_state = pickle.load(options.load_object_state)
        apply_capdl_filters(renderoptions)
        instantiate_misc_templates(renderoptions)

        # If a template wasn't instantiated, something went wrong, and we can't recover
        raise CAmkESError("No template instantiated on capdl generation path")

    # We're now ready to instantiate the template the user requested, but there
    # are a few wrinkles in the process. Namely,
    #  1. Template instantiation needs to be done in a deterministic order. The
    #     runner is invoked multiple times and template code needs to be
    #     allocated identical cap slots in each run.
    #  2. Components and connections need to be instantiated before any other
    #     templates, regardless of whether they are the ones we are after. Some
    #     other templates, such as the Makefile depend on the obj_space and
    #     cspaces.
    #  3. All actual code templates, up to the template that was requested,
    #     need to be instantiated. This is related to (1) in that the cap slots
    #     allocated are dependent on what allocations have been done prior to a
    #     given allocation call.

    # Instantiate the per-component source and header files.
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue

        if i.address_space not in renderoptions.render_state.cspaces:
            p = Perspective(phase=RUNNER,
                            instance=i.name,
                            group=i.address_space)
            cnode = renderoptions.render_state.obj_space.alloc(
                ObjectType.seL4_CapTableObject,
                name=p['cnode'],
                label=i.address_space)
            renderoptions.render_state.cspaces[
                i.address_space] = CSpaceAllocator(cnode)
            pd = obj_space.alloc(lookup_architecture(
                options.architecture).vspace().object,
                                 name=p['pd'],
                                 label=i.address_space)
            addr_space = AddressSpaceAllocator(
                re.sub(r'[^A-Za-z0-9]', '_', p['elf_name']), pd)
            renderoptions.render_state.pds[i.address_space] = pd
            renderoptions.render_state.addr_spaces[
                i.address_space] = addr_space

        for t in ('%s/source' % i.name, '%s/header' % i.name,
                  '%s/c_environment_source' % i.name,
                  '%s/cakeml_start_source' % i.name,
                  '%s/cakeml_end_source' % i.name, '%s/linker' % i.name):
            try:
                template = templates.lookup(t, i)
                g = ''
                if template:
                    g = r.render(
                        i,
                        assembly,
                        template,
                        renderoptions.render_state,
                        i.address_space,
                        outfile_name=None,
                        options=renderoptions,
                        my_pd=renderoptions.render_state.pds[i.address_space])
                for (item, outfile) in (all_items - done_items):
                    if item == t:
                        if not template:
                            log.warning('Warning: no template for %s' % item)
                        done(g, outfile, item)
                        break
            except TemplateError as inst:
                die(rendering_error(i.name, inst))

    # Instantiate the per-connection files.
    for c in assembly.composition.connections:

        for t in (('%s/from/source' % c.name,
                   c.from_ends), ('%s/from/header' % c.name, c.from_ends),
                  ('%s/to/source' % c.name,
                   c.to_ends), ('%s/to/header' % c.name, c.to_ends),
                  ('%s/to/cakeml' % c.name, c.to_ends)):

            template = templates.lookup(t[0], c)

            if template is not None:
                for id, e in enumerate(t[1]):
                    item = '%s/%d' % (t[0], id)
                    g = ''
                    try:
                        g = r.render(e,
                                     assembly,
                                     template,
                                     renderoptions.render_state,
                                     e.instance.address_space,
                                     outfile_name=None,
                                     options=renderoptions,
                                     my_pd=renderoptions.render_state.pds[
                                         e.instance.address_space])
                    except TemplateError as inst:
                        die(rendering_error(item, inst))
                    except jinja2.exceptions.TemplateNotFound:
                        die('While rendering %s: missing template for %s' %
                            (item, c.type.name))
                    for (target, outfile) in (all_items - done_items):
                        if target == item:
                            if not template:
                                log.warning('Warning: no template for %s' %
                                            item)
                            done(g, outfile, item)
                            break

        # The following block handles instantiations of per-connection
        # templates that are neither a 'source' or a 'header', as handled
        # above. We assume that none of these need instantiation unless we are
        # actually currently looking for them (== options.item). That is, we
        # assume that following templates, like the CapDL spec, do not require
        # these templates to be rendered prior to themselves.
        # FIXME: This is a pretty ugly way of handling this. It would be nicer
        # for the runner to have a more general notion of per-'thing' templates
        # where the per-component templates, the per-connection template loop
        # above, and this loop could all be done in a single unified control
        # flow.
        for (item, outfile) in (all_items - done_items):
            for t in (('%s/from/' % c.name, c.from_ends), ('%s/to/' % c.name,
                                                           c.to_ends)):

                if not item.startswith(t[0]):
                    # This is not the item we're looking for.
                    continue

                # If we've reached here then this is the exact item we're after.
                template = templates.lookup(item, c)
                if template is None:
                    die('no registered template for %s' % item)

                for e in t[1]:
                    try:
                        g = r.render(e,
                                     assembly,
                                     template,
                                     renderoptions.render_state,
                                     e.instance.address_space,
                                     outfile_name=None,
                                     options=renderoptions,
                                     my_pd=renderoptions.render_state.pds[
                                         e.instance.address_space])
                        done(g, outfile, item)
                    except TemplateError as inst:
                        die(rendering_error(item, inst))

    # Perform any per component special generation. This needs to happen last
    # as these template needs to run after all other capabilities have been
    # allocated
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue
        assert i.address_space in renderoptions.render_state.cspaces
        SPECIAL_TEMPLATES = [('debug', 'debug'), ('simple', 'simple'),
                             ('rump_config', 'rumprun')]
        for special in [
                bl for bl in SPECIAL_TEMPLATES
                if assembly.configuration[i.name].get(bl[0])
        ]:
            for t in ('%s/%s' % (i.name, special[1]), ):
                try:
                    template = templates.lookup(t, i)
                    g = ''
                    if template:
                        g = r.render(i,
                                     assembly,
                                     template,
                                     renderoptions.render_state,
                                     i.address_space,
                                     outfile_name=None,
                                     options=renderoptions,
                                     my_pd=renderoptions.render_state.pds[
                                         i.address_space])
                    for (item, outfile) in (all_items - done_items):
                        if item == t:
                            if not template:
                                log.warning('Warning: no template for %s' %
                                            item)
                            done(g, outfile, item)
                except TemplateError as inst:
                    die(rendering_error(i.name, inst))

    # Check if there are any remaining items
    not_done = all_items - done_items
    if len(not_done) > 0:
        for (item, outfile) in not_done:
            err.write('No valid element matching --item %s.\n' % item)
        return -1
    return 0
Esempio n. 18
0
def main(argv, out, err):

    # We need a UTF-8 locale, so bail out if we don't have one. More
    # specifically, things like the version() computation traverse the file
    # system and, if they hit a UTF-8 filename, they try to decode it into your
    # preferred encoding and trigger an exception.
    encoding = locale.getpreferredencoding().lower()
    if encoding not in ('utf-8', 'utf8'):
        err.write('CAmkES uses UTF-8 encoding, but your locale\'s preferred '
            'encoding is %s. You can override your locale with the LANG '
            'environment variable.\n' % encoding)
        return -1

    options = parse_args(argv, out, err)

    # Ensure we were supplied equal items and outfiles
    if len(options.outfile) != len(options.item):
        err.write('Different number of items and outfiles. Required one outfile location '
            'per item requested.\n')
        return -1

    # No duplicates in items or outfiles
    if len(set(options.item)) != len(options.item):
        err.write('Duplicate items requested through --item.\n')
        return -1
    if len(set(options.outfile)) != len(options.outfile):
        err.write('Duplicate outfiles requrested through --outfile.\n')
        return -1

    # Save us having to pass debugging everywhere.
    die = functools.partial(_die, options)

    log.set_verbosity(options.verbosity)

    cwd = os.getcwd()

    # Build a list of item/outfile pairs that we have yet to match and process
    all_items = set(zip(options.item, options.outfile))
    done_items = set([])

    # Construct the compilation caches if requested.
    cachea = None
    cacheb = None
    if options.cache:

        # Construct a modified version of the command line arguments that we'll
        # use in the keys to the caches. Essentially we elide --outfile and its
        # parameter under the assumption that this value is never used in code
        # generation. The purpose of this is to allow us to successfully cache
        # ancillary outputs that we generate along the way to the current
        # output. If we were to include --outfile in the key, future attempts
        # to generate these ancillary outputs would unnecessarily miss the
        # entries generated by this execution.
        args = []
        skip = False
        for index, arg in enumerate(argv[1:]):
            if skip:
                skip = False
                continue
            if arg in ('--outfile', '-O'):
                skip = True
                continue
            args.append(arg)

        cachea = LevelACache(os.path.join(options.cache_dir, version(), 'cachea'))
        cacheb = LevelBCache(os.path.join(options.cache_dir, version(), 'cacheb'))

    def done(s, file, item):
        ret = 0
        if s:
            file.write(s)
            file.close()
        if cachea is not None:
            try:
                cachea.flush()
            except sqlite3.OperationalError as e:
                # The following suppresses two spurious errors:
                #  1. The database is locked. In a large, parallel build, writes
                #     to the level A cache are heavily contended and this error
                #     can occur.
                #  2. The database structure is unexpected. If the CAmkES
                #     sources have changed *while* the runner was executing,
                #     the level A cache can be looking in a different place to
                #     where the cache was created.
                # Both of these are non-critical (will just result in a
                # potential future cache miss) so there's no need to alarm the
                # user.
                if re.search(r'database is locked', str(e)) is not None or \
                   re.search(r'no such table', str(e)) is not None:
                    log.debug('failed to flush level A cache: %s' % str(e))
                else:
                    raise
        if cacheb is not None:
            try:
                cacheb.flush()
            except sqlite3.OperationalError as e:
                # As above for the level B cache.
                if re.search(r'database is locked', str(e)):
                    log.debug('failed to flush level B cache: %s' % str(e))
                else:
                    raise

        done_items.add((item, file))
        if len(all_items - done_items) == 0:
            sys.exit(ret)

    # Try to find this output in the level A cache if possible. This check will
    # 'hit' if the source files representing the input spec are identical to
    # some previously observed execution.
    if cachea is not None:
        assert 'args' in locals()
        assert len(options.outfile) == 1, 'level A cache only supported when requestiong ' \
            'single items'
        output = cachea.load(args, cwd)
        if output is not None:
            log.debug('Retrieved %(platform)s/%(item)s from level A cache' %
                options.__dict__)
            done(output, options.outfile[0], options.item[0])

    filename = os.path.abspath(options.file.name)

    try:
        # Build the parser options
        parse_options = ParserOptions(options.cpp, options.cpp_flag, options.import_path, options.verbosity, options.allow_forward_references)
        ast, read = parse_file_cached(filename, options.data_structure_cache_dir, parse_options)
    except (ASTError, ParseError) as e:
        die(e.args)

    # Locate the assembly.
    assembly = ast.assembly
    if assembly is None:
        die('No assembly found')

    # Do some extra checks if the user asked for verbose output.
    if options.verbosity >= 2:

        # Try to catch type mismatches in attribute settings. Note that it is
        # not possible to conclusively evaluate type correctness because the
        # attributes' type system is (deliberately) too loose. That is, the
        # type of an attribute can be an uninterpreted C type the user will
        # provide post hoc.
        for i in assembly.composition.instances:
            for a in i.type.attributes:
                value = assembly.configuration[i.name].get(a.name)
                if value is not None:
                    if a.type == 'string' and not \
                            isinstance(value, six.string_types):
                        log.warning('attribute %s.%s has type string but is '
                            'set to a value that is not a string' % (i.name,
                            a.name))
                    elif a.type == 'int' and not \
                            isinstance(value, numbers.Number):
                        log.warning('attribute %s.%s has type int but is set '
                            'to a value that is not an integer' % (i.name,
                                a.name))

    obj_space = ObjectAllocator()
    obj_space.spec.arch = options.architecture
    cspaces = {}
    pds = {}
    conf = assembly.configuration
    shmem = collections.defaultdict(ShmemFactory())
    kept_symbols = {}
    fill_frames = {}

    templates = Templates(options.platform)
    [templates.add_root(t) for t in options.templates]
    try:
        r = Renderer(templates, options.cache, options.cache_dir)
    except jinja2.exceptions.TemplateSyntaxError as e:
        die('template syntax error: %s' % e)

    # The user may have provided their own connector definitions (with
    # associated) templates, in which case they won't be in the built-in lookup
    # dictionary. Let's add them now. Note, definitions here that conflict with
    # existing lookup entries will overwrite the existing entries. Note that
    # the extra check that the connector has some templates is just an
    # optimisation; the templates module handles connectors without templates
    # just fine.
    extra_templates = set()
    for c in (x for x in ast.items if isinstance(x, Connector) and
            (x.from_template is not None or x.to_template is not None)):
        try:
            # Find a connection that uses this type.
            connection = next(x for x in ast if isinstance(x, Connection) and
                x.type == c)
            # Add the custom templates and update our collection of read
            # inputs. It is necessary to update the read set here to avoid
            # false compilation cache hits when the source of a custom template
            # has changed.
            extra_templates |= templates.add(c, connection)
        except TemplateError as e:
            die('while adding connector %s: %s' % (c.name, e))
        except StopIteration:
            # No connections use this type. There's no point adding it to the
            # template lookup dictionary.
            pass

    # Check if our current target is in the level B cache. The level A cache
    # will 'miss' and this one will 'hit' when the input spec is identical to
    # some previously observed execution modulo a semantically irrelevant
    # element (e.g. an introduced comment).
    ast_hash = None
    if cacheb is not None:
        ast_hash = level_b_prime(ast)
        assert 'args' in locals()
        assert len(options.item) == 1, 'level B cache only supported when requesting ' \
            'single items'
        output = cacheb.load(ast_hash, args, set(options.elf) | extra_templates)
        if output is not None:
            log.debug('Retrieved %(platform)s/%(item)s from level B cache' %
                options.__dict__)
            done(output, options.outfile[0], options.item[0])

    # Add custom templates.
    read |= extra_templates

    # Add the CAmkES sources themselves to the accumulated list of inputs.
    read |= set(path for path, _ in sources())

    # Add any ELF files we were passed as inputs.
    read |= set(options.elf)

    # Write a Makefile dependency rule if requested.
    if options.makefile_dependencies is not None:
        options.makefile_dependencies.write('%s: \\\n  %s\n' %
            (filename, ' \\\n  '.join(sorted(read))))

    # If we have a cache, allow outputs to be saved to it.
    if options.cache:

        assert cachea is not None, 'level A cache not available, though the ' \
            'cache is enabled (bug in runner?)'
        # The logic of this cache currently only works when a single item is requested
        # on the command line
        assert len(options.item) == 1, 'level A cache only supported when requesting ' \
            'single items'

        # Calculate the input files to the level A cache.
        inputs = level_a_prime(read)

        # Work out the position of the --item argument in the command line
        # parameters. We will use this to cache not only outputs for this
        # execution, but also outputs for ones with a different target.
        item_index = None
        assert 'args' in locals()
        for index, arg in enumerate(args[:-1]):
            if arg in ('--item', '-T'):
                item_index = index + 1
                break
        assert item_index is not None, 'failed to find required argument ' \
            '--item (bug in runner?)'

        # We should already have the necessary inputs for the level B cache.
        assert cacheb is not None, 'level B cache not available, though the ' \
            'cache is enabled (bug in runner?)'
        assert ast_hash is not None, 'AST hash not pre-computed (bug in ' \
            'runner?)'

        def save(item, value):
            # Juggle the command line arguments to cache the predicted
            # arguments for a call that would generate this item.
            new_args = args[:item_index] + [item] + args[item_index + 1:]

            # Save entries in both caches.
            cachea.save(new_args, cwd, value, inputs)
            if item != 'Makefile' and item != 'camkes-gen.cmake':
                # We avoid caching the generated Makefile because it is not
                # safe. The inputs to generation of the Makefile are not only
                # the AST, but also the file names (`inputs`). If we cache it in
                # the level B cache we risk the following scenario:
                #
                #   1. Generate the Makefile, caching it in the level B cache;
                #   2. Modify the spec to import a file containing only white
                #      space and/or comments; then
                #   3. Generate the Makefile, missing the level A cache, but
                #      hitting the level B cache.
                #
                # At this point, the generated Makefile is incorrect because it
                # does not capture any dependencies on the imported file. We can
                # now introduce something semantically relevant into this file
                # (e.g. an Assembly block) and it will not be seen by the build
                # system.
                cacheb.save(ast_hash, new_args,
                    set(options.elf) | extra_templates, value)
    else:
        def save(item, value):
            pass

    def apply_capdl_filters():
        # Derive a set of usable ELF objects from the filenames we were passed.
        elfs = {}
        for e in options.elf:
            try:
                name = os.path.basename(e)
                if name in elfs:
                    raise Exception('duplicate ELF files of name \'%s\' encountered' % name)
                elf = ELF(e, name, options.architecture)
                p = Perspective(phase=RUNNER, elf_name=name)
                group = p['group']
                # Avoid inferring a TCB as we've already created our own.
                elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False,
                    pd=pds[group], use_large_frames=options.largeframe)
                obj_space.merge(elf_spec, label=group)
                elfs[name] = (e, elf)
            except Exception as inst:
                die('While opening \'%s\': %s' % (e, inst))

        filteroptions = FilterOptions(options.architecture, options.realtime, options.largeframe,
            options.largeframe_dma, options.default_priority, options.default_max_priority,
            options.default_affinity, options.default_period, options.default_budget,
            options.default_data, options.default_size_bits,
            options.debug_fault_handlers, options.fprovide_tcb_caps)
        for f in CAPDL_FILTERS:
            try:
                # Pass everything as named arguments to allow filters to
                # easily ignore what they don't want.
                f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs,
                    options=filteroptions, shmem=shmem, fill_frames=fill_frames)
            except Exception as inst:
                die('While forming CapDL spec: %s' % inst)

    renderoptions = RenderOptions(options.file, options.verbosity, options.frpc_lock_elision,
        options.fspecialise_syscall_stubs, options.fprovide_tcb_caps, options.fsupport_init,
        options.largeframe, options.largeframe_dma, options.architecture, options.debug_fault_handlers,
        options.realtime)

    def instantiate_misc_template():
        for (item, outfile) in (all_items - done_items):
            try:
                template = templates.lookup(item)
                if template:
                    g = r.render(assembly, assembly, template, obj_space, None,
                        shmem, kept_symbols, fill_frames, imported=read, options=renderoptions)
                    save(item, g)
                    done(g, outfile, item)
            except TemplateError as inst:
                die(rendering_error(item, inst))

    if options.item[0] in ('capdl', 'label-mapping') and options.data_structure_cache_dir is not None \
            and len(options.outfile) == 1:
        # It's possible that data structures required to instantiate the capdl spec
        # were saved during a previous invocation of this script in the current build.
        cache_path = os.path.realpath(options.data_structure_cache_dir)
        pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE)

        if os.path.isfile(pickle_path):
            with open(pickle_path, 'rb') as pickle_file:
                # Found a cached version of the necessary data structures
                obj_space, shmem, cspaces, pds, kept_symbols, fill_frames = pickle.load(pickle_file)
                apply_capdl_filters()
                instantiate_misc_template()

                # If a template wasn't instantiated, something went wrong, and we can't recover
                raise CAmkESError("No template instantiated on capdl generation fastpath")

    # We're now ready to instantiate the template the user requested, but there
    # are a few wrinkles in the process. Namely,
    #  1. Template instantiation needs to be done in a deterministic order. The
    #     runner is invoked multiple times and template code needs to be
    #     allocated identical cap slots in each run.
    #  2. Components and connections need to be instantiated before any other
    #     templates, regardless of whether they are the ones we are after. Some
    #     other templates, such as the Makefile depend on the obj_space and
    #     cspaces.
    #  3. All actual code templates, up to the template that was requested,
    #     need to be instantiated. This is related to (1) in that the cap slots
    #     allocated are dependent on what allocations have been done prior to a
    #     given allocation call.

    # Instantiate the per-component source and header files.
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue

        if i.address_space not in cspaces:
            p = Perspective(phase=RUNNER, instance=i.name,
                group=i.address_space)
            cnode = obj_space.alloc(seL4_CapTableObject,
                name=p['cnode'], label=i.address_space)
            cspaces[i.address_space] = CSpaceAllocator(cnode)
            pd = obj_space.alloc(lookup_architecture(options.architecture).vspace().object, name=p['pd'],
                label=i.address_space)
            pds[i.address_space] = pd

        for t in ('%s/source' % i.name, '%s/header' % i.name,
                '%s/c_environment_source' % i.name,
                '%s/cakeml_start_source' % i.name, '%s/cakeml_end_source' % i.name,
                '%s/linker' % i.name):
            try:
                template = templates.lookup(t, i)
                g = ''
                if template:
                    g = r.render(i, assembly, template, obj_space, cspaces[i.address_space],
                        shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[i.address_space])
                save(t, g)
                for (item, outfile) in (all_items - done_items):
                    if item == t:
                        if not template:
                            log.warning('Warning: no template for %s' % item)
                        done(g, outfile, item)
                        break
            except TemplateError as inst:
                die(rendering_error(i.name, inst))

    # Instantiate the per-connection files.
    for c in assembly.composition.connections:

        for t in (('%s/from/source' % c.name, c.from_ends),
                  ('%s/from/header' % c.name, c.from_ends),
                  ('%s/to/source' % c.name, c.to_ends),
                  ('%s/to/header' % c.name, c.to_ends)):

            template = templates.lookup(t[0], c)

            if template is not None:
                for id, e in enumerate(t[1]):
                    item = '%s/%d' % (t[0], id)
                    g = ''
                    try:
                        g = r.render(e, assembly, template, obj_space,
                            cspaces[e.instance.address_space], shmem, kept_symbols, fill_frames,
                            options=renderoptions, my_pd=pds[e.instance.address_space])
                    except TemplateError as inst:
                        die(rendering_error(item, inst))
                    except jinja2.exceptions.TemplateNotFound:
                        die('While rendering %s: missing template for %s' %
                            (item, c.type.name))
                    save(item, g)
                    for (target, outfile) in (all_items - done_items):
                        if target == item:
                            if not template:
                                log.warning('Warning: no template for %s' % item)
                            done(g, outfile, item)
                            break

        # The following block handles instantiations of per-connection
        # templates that are neither a 'source' or a 'header', as handled
        # above. We assume that none of these need instantiation unless we are
        # actually currently looking for them (== options.item). That is, we
        # assume that following templates, like the CapDL spec, do not require
        # these templates to be rendered prior to themselves.
        # FIXME: This is a pretty ugly way of handling this. It would be nicer
        # for the runner to have a more general notion of per-'thing' templates
        # where the per-component templates, the per-connection template loop
        # above, and this loop could all be done in a single unified control
        # flow.
        for (item, outfile) in (all_items - done_items):
            for t in (('%s/from/' % c.name, c.from_ends),
                    ('%s/to/' % c.name, c.to_ends)):

                if not item.startswith(t[0]):
                    # This is not the item we're looking for.
                    continue

                # If we've reached here then this is the exact item we're after.
                template = templates.lookup(item, c)
                if template is None:
                    die('no registered template for %s' % item)

                for e in t[1]:
                    try:
                        g = r.render(e, assembly, template, obj_space,
                            cspaces[e.instance.address_space], shmem, kept_symbols, fill_frames,
                            options=renderoptions, my_pd=pds[e.instance.address_space])
                        save(item, g)
                        done(g, outfile, item)
                    except TemplateError as inst:
                        die(rendering_error(item, inst))

    # Perform any per component special generation. This needs to happen last
    # as these template needs to run after all other capabilities have been
    # allocated
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue
        assert i.address_space in cspaces
        SPECIAL_TEMPLATES = [('debug', 'debug'), ('simple', 'simple'), ('rump_config', 'rumprun')]
        for special in [bl for bl in SPECIAL_TEMPLATES if conf[i.name].get(bl[0])]:
            for t in ('%s/%s' % (i.name, special[1]),):
                try:
                    template = templates.lookup(t, i)
                    g = ''
                    if template:
                        g = r.render(i, assembly, template, obj_space, cspaces[i.address_space],
                            shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[i.address_space])
                    save(t, g)
                    for (item, outfile) in (all_items - done_items):
                        if item == t:
                            if not template:
                                log.warning('Warning: no template for %s' % item)
                            done(g, outfile, item)
                except TemplateError as inst:
                    die(rendering_error(i.name, inst))

    if options.data_structure_cache_dir is not None:
        # At this point the capdl database is in the state required for applying capdl
        # filters and generating the capdl spec. In case the capdl spec isn't the current
        # target, we pickle the database here, so when the capdl spec is built, these
        # data structures don't need to be regenerated.
        cache_path = os.path.realpath(options.data_structure_cache_dir)
        pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE)
        with open(pickle_path, 'wb') as pickle_file:
            pickle.dump((obj_space, shmem, cspaces, pds, kept_symbols, fill_frames), pickle_file)

    for (item, outfile) in (all_items - done_items):
        if item in ('capdl', 'label-mapping'):
            apply_capdl_filters()

    # Instantiate any other, miscellaneous template. If we've reached this
    # point, we know the user did not request a code template.
    instantiate_misc_template()

    # Check if there are any remaining items
    not_done = all_items - done_items
    if len(not_done) > 0:
        for (item, outfile) in not_done:
            err.write('No valid element matching --item %s.\n' % item)
        return -1
    return 0
Esempio n. 19
0
def get_word_size(arch):
    return int(lookup_architecture(arch).word_size_bits() / 8)
Esempio n. 20
0
def collapse_shared_frames(ast, obj_space, elfs, shmem, options, **_):
    """Find regions in virtual address spaces that are intended to be backed by
    shared frames and adjust the capability distribution to reflect this."""

    if not elfs:
        # If we haven't been passed any ELF files this step is not relevant yet.
        return
    #for i in ast._items:
    #print ("rpc start")
    #print (i.__dict__)
    #print ("rpc end")

    arch = lookup_architecture(options.architecture)
    assembly = ast.assembly

    for window, mappings in shmem.items():
        frames = None
        exact_frames = False

        # If the shared variable has an associated set of backing frames
        # allocated already (ie. allocated in a template), look it up
        # before collapsing the shared variable.
        for mapping in mappings.values():
            for _, _, _, prealloc_frames, _ in mapping:
                if prealloc_frames is not None:
                    assert frames is None, 'Multiple sides of shared memory with' \
                            'preallocated frames for shared variable "%s"' % window

                    frames = prealloc_frames
                    exact_frames = True

        for cnode, local_mappings in mappings.items():
            for sym, permissions, paddr, _, cached_hw in local_mappings:

                perspective = Perspective(cnode=cnode)

                # Find this instance's ELF file.
                elf_name = perspective['elf_name']
                assert elf_name in elfs
                elf = elfs[elf_name]

                # Find this instance's page directory.
                pd_name = perspective['pd']
                pds = [x for x in obj_space.spec.objs if x.name == pd_name]
                assert len(pds) == 1
                pd = pds[0]

                # Look up the ELF-local version of this symbol.
                vaddr = get_symbol_vaddr(elf, sym)
                assert vaddr is not None, 'shared symbol \'%s\' not found in ' \
                    'ELF %s (template bug?)' % (sym, elf_name)
                assert vaddr != 0, 'shared symbol \'%s\' located at NULL in ELF ' \
                    '%s (template bug?)' % (sym, elf_name)
                assert vaddr % PAGE_SIZE == 0, 'shared symbol \'%s\' not ' \
                    'page-aligned in ELF %s (template bug?)' % (sym, elf_name)

                size = get_symbol_size(elf, sym)
                assert size != 0, 'shared symbol \'%s\' has size 0 in ELF %s ' \
                    '(template bug?)' % (sym, elf_name)
                assert size % PAGE_SIZE == 0, 'shared symbol \'%s\' in ELF %s ' \
                    'has a size that is not page-aligned (template bug?)' % \
                    (sym, elf_name)

                # Infer the page table(s) and small page(s) that currently back this
                # region.
                map_indices = [
                    make_indices(arch, v, PAGE_SIZE)
                    for v in six.moves.range(vaddr, vaddr + size, PAGE_SIZE)
                ]

                # Permissions that we will apply to the eventual mapping.
                read = 'R' in permissions
                write = 'W' in permissions
                execute = 'X' in permissions

                largest_frame_size, level_num = find_optimal_frame_size(
                    arch, vaddr, size)

                if frames is None:
                    # First iteration of the loop; we need to discover the backing
                    # frames for this region.
                    frames = []

                    # We want to derive large frames if (a) this window is device
                    # registers and large-frame-sized (in which case the kernel
                    # will have created it as large frames) or (b) the user has
                    # requested large frame promotion.
                    if largest_frame_size != PAGE_SIZE and (
                            options.largeframe or paddr is not None):
                        # Grab a copy of the frame for every entry we're going to end up making
                        new_frames = {}
                        for new_vaddr in six.moves.range(
                                vaddr, vaddr + size, largest_frame_size):
                            new_frames[new_vaddr] = obj_space.alloc(
                                seL4_FrameObject, size=largest_frame_size)
                        # Iterate over every unique index in every object below this one
                        delete_small_frames(arch, obj_space, pd, level_num,
                                            map_indices)
                        # Now insert the new frames
                        for new_vaddr in six.moves.range(
                                vaddr, vaddr + size, largest_frame_size):
                            frame = new_frames[new_vaddr]
                            cap = Cap(frame, read, write, execute)
                            if paddr is not None:
                                frame.paddr = paddr + (new_vaddr - vaddr)
                                cap.set_cached(cached_hw)
                            update_frame_in_vaddr(arch, pd, new_vaddr,
                                                  largest_frame_size, cap)
                            frames.append(frame)

                    else:
                        # We don't need to handle large frame promotion. Just tweak
                        # the permissions and optionally the physical address of
                        # all the current mappings.
                        for offset, indices in enumerate(map_indices):
                            (cap, frame) = lookup_vspace_indices(pd, indices)
                            cap.read = read
                            cap.write = write
                            cap.grant = execute
                            if paddr is not None:
                                frame.paddr = paddr + offset * PAGE_SIZE
                                cap.set_cached(cached_hw)
                            frames.append(frame)

                else:
                    # We have already discovered frames to back this region and now
                    # we just need to adjust page table mappings.

                    assert size == sum(f.size for f in frames), 'mismatched ' \
                        'sizes of shared region \'%s\' (template bug?)' % window

                    if not exact_frames:
                        # We do not need to preserve the exact same frames / frame sizings, so
                        # we can delete the entire region ready to put in our new frames
                        # Delete all the underlying frames / objects for this range
                        delete_small_frames(arch, obj_space, pd, level_num,
                                            map_indices)
                    offset = 0
                    for frame in frames:
                        cap = Cap(frame, read, write, execute)
                        if paddr is not None:
                            cap.set_cached(cached_hw)
                        if exact_frames:
                            # If we need to preserve the exact frames then we need to clear
                            # the range for each frame individually, up to the required level
                            # for that frame. This is to allow for 'weird' shared memory regions
                            # that have preallocated frames with different sized frames in
                            # the one region.
                            frame_map_indices = [
                                make_indices(arch, v, PAGE_SIZE)
                                for v in six.moves.range(
                                    vaddr + offset, vaddr + offset +
                                    frame.size, PAGE_SIZE)
                            ]
                            _, frame_level_num = find_optimal_frame_size(
                                arch, 0, frame.size)
                            delete_small_frames(arch, obj_space, pd,
                                                frame_level_num,
                                                frame_map_indices)
                        # Now, with exact_frames or not, we know that the slot for this frame is
                        # free and we can re-insert the correct frame
                        update_frame_in_vaddr(arch, pd, vaddr + offset,
                                              frame.size, cap)
                        offset = offset + frame.size
Esempio n. 21
0
def main(argv, out, err):

    # We need a UTF-8 locale, so bail out if we don't have one. More
    # specifically, things like the version() computation traverse the file
    # system and, if they hit a UTF-8 filename, they try to decode it into your
    # preferred encoding and trigger an exception.
    encoding = locale.getpreferredencoding().lower()
    if encoding not in ('utf-8', 'utf8'):
        err.write('CAmkES uses UTF-8 encoding, but your locale\'s preferred '
                  'encoding is %s. You can override your locale with the LANG '
                  'environment variable.\n' % encoding)
        return -1

    options = parse_args(argv, out, err)

    # register object sizes with loader
    if options.object_sizes:
        register_object_sizes(
            yaml.load(options.object_sizes, Loader=yaml.FullLoader))

    # Ensure we were supplied equal items, outfiles and templates
    if len(options.outfile) != len(options.item) != len(options.template):
        err.write(
            'Different number of items and outfiles. Required one outfile location '
            'per item requested.\n')
        return -1

    # No duplicates in outfiles
    if len(set(options.outfile)) != len(options.outfile):
        err.write('Duplicate outfiles requrested through --outfile.\n')
        return -1

    # Save us having to pass debugging everywhere.
    die = functools.partial(_die, options)

    log.set_verbosity(options.verbosity)

    ast = pickle.load(options.load_ast)

    # Locate the assembly.
    assembly = ast.assembly
    if assembly is None:
        die('No assembly found')

    # Do some extra checks if the user asked for verbose output.
    if options.verbosity >= 2:

        # Try to catch type mismatches in attribute settings. Note that it is
        # not possible to conclusively evaluate type correctness because the
        # attributes' type system is (deliberately) too loose. That is, the
        # type of an attribute can be an uninterpreted C type the user will
        # provide post hoc.
        for i in assembly.composition.instances:
            for a in i.type.attributes:
                value = assembly.configuration[i.name].get(a.name)
                if value is not None:
                    if a.type == 'string' and not \
                            isinstance(value, six.string_types):
                        log.warning('attribute %s.%s has type string but is '
                                    'set to a value that is not a string' %
                                    (i.name, a.name))
                    elif a.type == 'int' and not \
                            isinstance(value, numbers.Number):
                        log.warning('attribute %s.%s has type int but is set '
                                    'to a value that is not an integer' %
                                    (i.name, a.name))

    try:
        r = Renderer(options.templates)
    except jinja2.exceptions.TemplateSyntaxError as e:
        die('template syntax error: %s' % e)

    if options.load_object_state is not None:
        render_state = pickle.load(options.load_object_state)
    elif options.save_object_state is None:
        render_state = None
    else:
        obj_space = ObjectAllocator()
        obj_space.spec.arch = options.architecture
        render_state = RenderState(obj_space=obj_space)

        for i in assembly.composition.instances:
            # Don't generate any code for hardware components.
            if i.type.hardware:
                continue

            key = i.address_space

            if key not in render_state.cspaces:
                cnode = render_state.obj_space.alloc(
                    ObjectType.seL4_CapTableObject,
                    name="%s_cnode" % key,
                    label=key)
                render_state.cspaces[key] = CSpaceAllocator(cnode)
                pd = obj_space.alloc(lookup_architecture(
                    options.architecture).vspace().object,
                                     name="%s_group_bin_pd" % key,
                                     label=key)
                addr_space = AddressSpaceAllocator(
                    re.sub(r'[^A-Za-z0-9]', '_', "%s_group_bin" % key), pd)
                render_state.pds[key] = pd
                render_state.addr_spaces[key] = addr_space

    for (item, outfile, template) in zip(options.item, options.outfile,
                                         options.template):
        key = item.split("/")
        if key[0] == "component":
            i = [
                x for x in assembly.composition.instances if x.name == key[1]
            ][0]
            obj_key = i.address_space
        elif key[0] == "connector":
            c = [
                c for c in assembly.composition.connections if c.name == key[1]
            ][0]
            if key[2] == "to":
                i = c.to_ends[int(key[3])]
            elif key[2] == "from":
                i = c.from_ends[int(key[3])]
            else:
                die("Invalid connector end")
            obj_key = i.instance.address_space
        elif key[0] == "assembly":
            i = assembly
            obj_key = None
        else:
            die("item: \"%s\" does not have the correct formatting to render."
                % item)

        try:
            g = r.render(i,
                         assembly,
                         template,
                         render_state,
                         obj_key,
                         outfile_name=outfile.name,
                         options=options,
                         my_pd=render_state.pds[obj_key] if obj_key else None)
            outfile.write(g)
            outfile.close()
        except TemplateError as inst:
            if hasattr(i, 'name'):
                die(rendering_error(i.name, inst))
            else:
                die(rendering_error(i.parent.name, inst))

    read = r.get_files_used()
    # Write a Makefile dependency rule if requested.
    if options.makefile_dependencies is not None:
        options.makefile_dependencies.write(
            '%s: \\\n  %s\n' %
            (options.outfile[0].name, ' \\\n  '.join(sorted(read))))

    if options.save_object_state is not None:
        # Write the render_state to the supplied outfile
        pickle.dump(render_state, options.save_object_state)

    sys.exit(0)
Esempio n. 22
0
def replace_dma_frames(ast, obj_space, elfs, options, **_):
    '''Locate the DMA pool (a region that needs to have frames whose mappings
    can be reversed) and replace its backing frames with pre-allocated,
    reversible ones.'''

    if not elfs:
        # If we haven't been passed any ELF files this step is not relevant yet.
        return

    arch = lookup_architecture(options.architecture)
    assembly = ast.assembly

    for i in (x for x in assembly.composition.instances
              if not x.type.hardware):

        perspective = Perspective(instance=i.name, group=i.address_space)

        elf_name = perspective['elf_name']
        assert elf_name in elfs
        elf = elfs[elf_name]

        # Find this instance's page directory.
        pd_name = perspective['pd']
        pds = [x for x in obj_space.spec.objs if x.name == pd_name]
        assert len(pds) == 1
        pd, = pds

        sym = perspective['dma_pool_symbol']
        base = get_symbol_vaddr(elf, sym)
        if base is None:
            # We don't have a DMA pool.
            continue
        assert base != 0
        sz = get_symbol_size(elf, sym)
        assert sz % PAGE_SIZE == 0  # DMA pool should be at least page-aligned.

        # Replicate logic from the template to determine the page size used to
        # back the DMA pool.
        page_size = 4 * 1024
        if options.largeframe_dma:
            for size in reversed(page_sizes(options.architecture)):
                if sz >= size:
                    page_size = size
                    break

        assert sz % page_size == 0, 'DMA pool not rounded up to a multiple ' \
            'of page size %d (template bug?)' % page_size

        dma_frame_index = 0

        def get_dma_frame(index):
            '''
            Find the `index`-th DMA frame. Note that these are constructed in
            the component template itself.
            '''
            p = Perspective(instance=i.name,
                            group=i.address_space,
                            dma_frame_index=index)
            name = p['dma_frame_symbol']
            assert name in obj_space, "No such symbol in capdl spec %s" % name
            return obj_space[name]

        # Ensure paging structures are in place to map in dma frames
        replace_large_frames(obj_space, arch, pd, base, sz, page_size)

        for page_vaddr in six.moves.range(base, base + sz, page_size):
            cap = Cap(get_dma_frame(dma_frame_index), True, True, False)
            cap.set_cached(False)
            update_frame_in_vaddr(arch, pd, page_vaddr, page_size, cap)
            dma_frame_index = dma_frame_index + 1