def apply_capdl_filters(): # Derive a set of usable ELF objects from the filenames we were passed. elfs = {} for e in options.elf: try: name = os.path.basename(e) if name in elfs: raise Exception('duplicate ELF files of name \'%s\' encountered' % name) elf = ELF(e, name, options.architecture) p = Perspective(phase=RUNNER, elf_name=name) group = p['group'] # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False, pd=pds[group], use_large_frames=options.largeframe) obj_space.merge(elf_spec, label=group) elfs[name] = (e, elf) except Exception as inst: die('While opening \'%s\': %s' % (e, inst)) filteroptions = FilterOptions(options.architecture, options.realtime, options.largeframe, options.largeframe_dma, options.default_priority, options.default_max_priority, options.default_affinity, options.default_period, options.default_budget, options.default_data, options.default_size_bits, options.debug_fault_handlers, options.fprovide_tcb_caps) for f in CAPDL_FILTERS: try: # Pass everything as named arguments to allow filters to # easily ignore what they don't want. f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs, options=filteroptions, shmem=shmem, fill_frames=fill_frames) except Exception as inst: die('While forming CapDL spec: %s' % inst)
def apply_capdl_filters(): # Derive a set of usable ELF objects from the filenames we were passed. elfs = {} for e in options.elf: try: name = os.path.basename(e) if name in elfs: raise Exception('duplicate ELF files of name \'%s\' encountered' % name) elf = ELF(e, name, options.architecture) p = Perspective(phase=RUNNER, elf_name=name) group = p['group'] # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False, pd=pds[group], use_large_frames=options.largeframe) obj_space.merge(elf_spec, label=group) elfs[name] = (e, elf) except Exception as inst: die('While opening \'%s\': %s' % (e, inst)) # It's only relevant to run these filters if the final target is CapDL. # Note, this will no longer be true if we add any other templates that # depend on a fully formed CapDL spec. Guarding this loop with an if # is just an optimisation and the conditional can be removed if # desired. for f in CAPDL_FILTERS: try: # Pass everything as named arguments to allow filters to # easily ignore what they don't want. f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs, options=options, shmem=shmem, fill_frames=fill_frames) except Exception as inst: die('While forming CapDL spec: %s' % inst)
def final_spec(args, obj_space, cspaces, addr_spaces, targets, architecture): """ Generates a final CapDL spec file that can be given to a capdl loader application """ arch = lookup_architecture(architecture) for e, key in targets: name = os.path.basename(e) elf = ELF(e, name, architecture) cspace = cspaces[key] # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False, pd=addr_spaces[key].vspace_root, addr_space=addr_spaces[key]) obj_space.merge(elf_spec, label=key) for slot, v in cspace.cnode.slots.items(): if v is not None and isinstance(v.referent, TCB): tcb = v.referent if 'cspace' in tcb and tcb['cspace'] and tcb['cspace'].referent is not cspace.cnode: # We exclude TCBs that refer to a different CSpace continue funcs = {"get_vaddr": lambda x: elf.get_symbol_vaddr(x)} tcb.ip = eval(str(tcb.ip), {"__builtins__": None}, funcs) tcb.sp = eval(str(tcb.sp), {"__builtins__": None}, funcs) tcb.addr = eval(str(tcb.addr), {"__builtins__": None}, funcs) tcb.init = eval(str(tcb.init), {"__builtins__": None}, funcs) if not args.fprovide_tcb_caps: del cspace.cnode[slot] cspace.cnode.finalise_size(arch=arch) return obj_space
def final_spec(cspaces, obj_space, addr_spaces, elf_files, architecture): """ Generates a final CapDL spec file that can be given to a capdl loader application """ arch = lookup_architecture(architecture) for e in [item for sublist in elf_files for item in sublist]: name = os.path.basename(e) elf = ELF(e, name, architecture) cspace = cspaces[name] # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False, pd=addr_spaces[name].vspace_root, addr_space=addr_spaces[name]) obj_space.merge(elf_spec) cspace.cnode.finalise_size(arch) # Fill in TCB object information. # TODO: This should be generalised with what is in the Camkes filters tcb = obj_space["tcb_%s" % name] progsymbol = elf.get_symbol_vaddr("progname") vsyscall = elf.get_symbol_vaddr("sel4_vsyscall") tcb.init = [0, 0, 0, 0, 2, progsymbol, 1, 0, 0, 32, vsyscall, 0, 0] tcb.addr = elf.get_symbol_vaddr("mainIpcBuffer") tcb.sp = elf.get_symbol_vaddr("stack") + elf.get_symbol_size("stack") tcb.ip = elf.get_entry_point() return obj_space
def apply_capdl_filters(renderoptions): # Derive a set of usable ELF objects from the filenames we were passed. render_state = renderoptions.render_state elfs = {} for e in options.elf: try: name = os.path.basename(e) if name in elfs: raise Exception( 'duplicate ELF files of name \'%s\' encountered' % name) elf = ELF(e, name, options.architecture) group = name.replace("_group_bin", "") # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec( infer_tcb=False, infer_asid=False, pd=render_state.pds[group], use_large_frames=options.largeframe, addr_space=render_state.addr_spaces[group]) render_state.obj_space.merge(elf_spec, label=group) elfs[name] = (e, elf) except Exception as inst: die('While opening \'%s\': %s' % (e, inst)) for space in render_state.cspaces.values(): for (slot, tcb) in [ (v, v.referent) for (k, v) in space.cnode.slots.items() if v is not None and isinstance(v.referent, TCB) ]: elf = elfs.get(tcb.elf) funcs = {"get_vaddr": lambda x: elf[1].get_symbol_vaddr(x)} tcb.ip = simple_eval(str(tcb.ip), functions=funcs) tcb.sp = simple_eval(str(tcb.sp), functions=funcs) tcb.addr = simple_eval(str(tcb.addr), functions=funcs) if not options.fprovide_tcb_caps: del space.cnode[slot] space.cnode.finalise_size( arch=lookup_architecture(options.architecture))
def apply_capdl_filters(renderoptions): # Derive a set of usable ELF objects from the filenames we were passed. render_state = renderoptions.render_state elfs = {} for e in options.elf: try: name = os.path.basename(e) if name in elfs: raise Exception( 'duplicate ELF files of name \'%s\' encountered' % name) elf = ELF(e, name, options.architecture) p = Perspective(phase=RUNNER, elf_name=name) group = p['group'] # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec( infer_tcb=False, infer_asid=False, pd=render_state.pds[group], use_large_frames=options.largeframe, addr_space=render_state.addr_spaces[group]) render_state.obj_space.merge(elf_spec, label=group) elfs[name] = (e, elf) except Exception as inst: die('While opening \'%s\': %s' % (e, inst)) for f in CAPDL_FILTERS: try: # Pass everything as named arguments to allow filters to # easily ignore what they don't want. f(ast=ast, obj_space=render_state.obj_space, cspaces=render_state.cspaces, elfs=elfs, options=filteroptions) except Exception as inst: die('While forming CapDL spec: %s' % inst)
def test_symbol_lookup(self): elf = ELF('resources/unstripped.bin') assert elf.get_arch() == 'x86' # Confirm that the address concurs with the one we get from objdump. assert elf.get_symbol_vaddr('_start') == 0x08048d48 elf = ELF('resources/stripped.bin') assert elf.get_arch() == 'x86' # We shouldn't be able to get the symbol from the stripped binary. try: vaddr = elf.get_symbol_vaddr('_start') assert not ( 'Symbol lookup on a stripped binary returned _start == 0x%0.8x' % vaddr) except: # Expected pass
#!/usr/bin/env python # # Copyright 2014, NICTA # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(NICTA_BSD) # from capdl import ELF elf = ELF('hello.bin') assert elf.get_arch() in [40, 'EM_ARM', 'ARM'] elf.get_spec()
def final_spec(cspaces, obj_space, addr_spaces, elf_files, architecture, so_files): """ Generates a final CapDL spec file that can be given to a capdl loader application """ # cspaces : dict containes all the CSpaceAllocator for this app # obj_space : ObjectAllocator for all the objs in the spec # addr_spaces : dict containers all the AddressSpaceAllocator for this app print("In final_spec function") arch = lookup_architecture(architecture) # NOTE: handle shared lib so files here for e in [item for sublist in so_files for item in sublist]: name = os.path.basename(e) name = name[3:-3] print(name) elf = ELF(e, name, architecture) cspace = cspaces[name] print('the vspace root is ' + str(addr_spaces[name].vspace_root)) print('The addr space allocator is ' + str(addr_spaces[name])) # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec( infer_tcb=False, infer_asid=False, pd=addr_spaces[name].vspace_root, addr_space=addr_spaces[name] ) obj_space.merge(elf_spec) cspace.cnode.finalise_size(arch) # Fill in TCB object information. # NOTE: There will be a tcb for a shared lib file but handled in root task of capdl # loader # TODO: This should be generalised with what is in the Camkes filters tcb = obj_space["tcb_%s" % name] progsymbol = 0 vsyscall = 0 tcb.init = [0,0,0,0,2,progsymbol,1,0,0,32,vsyscall,0,0] tcb.addr = 0 tcb.sp = 0 tcb.ip = 0 for e in [item for sublist in elf_files for item in sublist]: name = os.path.basename(e) print(name) # path, name, arch elf = ELF(e, name, architecture) # find the cspace for current elf cspace = cspaces[name] print('the vspace root is ' + str(addr_spaces[name].vspace_root)) print('The addr space allocator is ' + str(addr_spaces[name])) # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec( infer_tcb=False, infer_asid=False, pd=addr_spaces[name].vspace_root, addr_space=addr_spaces[name] ) obj_space.merge(elf_spec) cspace.cnode.finalise_size(arch) # Fill in TCB object information. # TODO: This should be generalised with what is in the Camkes filters tcb = obj_space["tcb_%s" % name] progsymbol = elf.get_symbol_vaddr("progname") vsyscall = elf.get_symbol_vaddr("sel4_vsyscall") tcb.init = [0,0,0,0,2,progsymbol,1,0,0,32,vsyscall,0,0] tcb.addr = elf.get_symbol_vaddr("mainIpcBuffer"); tcb.sp = elf.get_symbol_vaddr("stack")+elf.get_symbol_size("stack"); tcb.ip = elf.get_entry_point() return obj_space
def main(): options = parse_args(constants.TOOL_RUNNER) # Save us having to pass debugging everywhere. die = functools.partial(_die, options.verbosity >= 3) log.set_verbosity(options.verbosity) def done(s): ret = 0 if s: options.outfile.write(s) options.outfile.close() sys.exit(ret) if not options.platform or options.platform in ('?', 'help') \ or options.platform not in PLATFORMS: die('Valid --platform arguments are %s' % ', '.join(PLATFORMS)) if not options.file or len(options.file) > 1: die('A single input file must be provided for this operation') # Construct the compilation cache if requested. cache = None if options.cache in ('on', 'readonly', 'writeonly'): cache = Cache(options.cache_dir) f = options.file[0] try: s = f.read() # Try to find this output in the compilation cache if possible. This is # one of two places that we check in the cache. This check will 'hit' # if the source files representing the input spec are identical to some # previous execution. if options.cache in ('on', 'readonly'): key = [ version_hash(), os.path.abspath(f.name), s, cache_relevant_options(options), options.platform, options.item ] value = cache.get(key) assert value is None or isinstance(value, FileSet), \ 'illegally cached a value for %s that is not a FileSet' % options.item if value is not None and value.valid(): # Cache hit. log.debug('Retrieved %(platform)s.%(item)s from cache' % \ options.__dict__) done(value.output) ast = parser.parse_to_ast(s, options.cpp, options.cpp_flag, options.ply_optimise) parser.assign_filenames(ast, f.name) except parser.CAmkESSyntaxError as e: e.set_column(s) die('%s:%s' % (f.name, str(e))) except Exception as inst: die('While parsing \'%s\': %s' % (f.name, inst)) try: for t in AST_TRANSFORMS[PRE_RESOLUTION]: ast = t(ast) except Exception as inst: die('While transforming AST: %s' % str(inst)) try: ast, imported = parser.resolve_imports(ast, \ os.path.dirname(os.path.abspath(f.name)), options.import_path, options.cpp, options.cpp_flag, options.ply_optimise) except Exception as inst: die('While resolving imports of \'%s\': %s' % (f.name, inst)) try: # if there are multiple assemblies, combine them now compose_assemblies(ast) except Exception as inst: die('While combining assemblies: %s' % str(inst)) # If we have a readable cache check if our current target is in the cache. # The previous check will 'miss' and this one will 'hit' when the input # spec is identical to some previous execution modulo a semantically # irrelevant element (e.g. an introduced comment). I.e. the previous check # matches when the input is exactly the same and this one matches when the # AST is unchanged. if options.cache in ('on', 'readonly'): key = [ version_hash(), ast, cache_relevant_options(options), options.platform, options.item ] value = cache.get(key) if value is not None: assert options.item not in NEVER_AST_CACHE, \ '%s, that is marked \'never cache\' is in your cache' % options.item log.debug('Retrieved %(platform)s.%(item)s from cache' % \ options.__dict__) done(value) # If we have a writable cache, allow outputs to be saved to it. if options.cache in ('on', 'writeonly'): orig_ast = deepcopy(ast) fs = FileSet(imported) def save(item, value): # Save an input-keyed cache entry. This one is based on the # pre-parsed inputs to save having to derive the AST (parse the # input) in order to locate a cache entry in following passes. # This corresponds to the first cache check above. key = [ version_hash(), os.path.abspath(options.file[0].name), s, cache_relevant_options(options), options.platform, item ] specialised = fs.specialise(value) if item == 'capdl': specialised.extend(options.elf) cache[key] = specialised if item not in NEVER_AST_CACHE: # Save an AST-keyed cache entry. This corresponds to the second # cache check above. cache[[ version_hash(), orig_ast, cache_relevant_options(options), options.platform, item ]] = value else: def save(item, value): pass ast = parser.dedupe(ast) try: ast = parser.resolve_references(ast) except Exception as inst: die('While resolving references of \'%s\': %s' % (f.name, inst)) try: parser.collapse_references(ast) except Exception as inst: die('While collapsing references of \'%s\': %s' % (f.name, inst)) try: for t in AST_TRANSFORMS[POST_RESOLUTION]: ast = t(ast) except Exception as inst: die('While transforming AST: %s' % str(inst)) try: resolve_hierarchy(ast) except Exception as inst: die('While resolving hierarchy: %s' % str(inst)) # All references in the AST need to be resolved for us to continue. unresolved = reduce(lambda a, x: a.union(x), map(lambda x: x.unresolved(), ast), set()) if unresolved: die('Unresolved references in input specification:\n %s' % \ '\n '.join(map(lambda x: '%(filename)s:%(lineno)s:\'%(name)s\' of type %(type)s' % { 'filename':x.filename or '<unnamed file>', 'lineno':x.lineno, 'name':x._symbol, 'type':x._type.__name__, }, unresolved))) # Locate the assembly assembly = [x for x in ast if isinstance(x, AST.Assembly)] if len(assembly) > 1: die('Multiple assemblies found') elif len(assembly) == 1: assembly = assembly[0] else: die('No assembly found') obj_space = ObjectAllocator() obj_space.spec.arch = options.architecture cspaces = {} pds = {} conf = assembly.configuration shmem = defaultdict(dict) templates = Templates(options.platform) map(templates.add_root, options.templates) r = Renderer(templates.get_roots(), options) # The user may have provided their own connector definitions (with # associated) templates, in which case they won't be in the built-in lookup # dictionary. Let's add them now. Note, definitions here that conflict with # existing lookup entries will overwrite the existing entries. for c in (x for x in ast if isinstance(x, AST.Connector)): if c.from_template: templates.add(c.name, 'from.source', c.from_template) if c.to_template: templates.add(c.name, 'to.source', c.to_template) # We're now ready to instantiate the template the user requested, but there # are a few wrinkles in the process. Namely, # 1. Template instantiation needs to be done in a deterministic order. The # runner is invoked multiple times and template code needs to be # allocated identical cap slots in each run. # 2. Components and connections need to be instantiated before any other # templates, regardless of whether they are the ones we are after. Some # other templates, such as the Makefile depend on the obj_space and # cspaces. # 3. All actual code templates, up to the template that was requested, # need to be instantiated. This is related to (1) in that the cap slots # allocated are dependent on what allocations have been done prior to a # given allocation call. # Instantiate the per-component source and header files. for id, i in enumerate(assembly.composition.instances): # Don't generate any code for hardware components. if i.type.hardware: continue if i.address_space not in cspaces: p = Perspective(phase=RUNNER, instance=i.name, group=i.address_space) cnode = obj_space.alloc(seL4_CapTableObject, name=p['cnode'], label=i.address_space) cspaces[i.address_space] = CSpaceAllocator(cnode) pd = obj_space.alloc(seL4_PageDirectoryObject, name=p['pd'], label=i.address_space) pds[i.address_space] = pd for t in ('%s.source' % i.name, '%s.header' % i.name, '%s.linker' % i.name): try: template = templates.lookup(t, i) g = '' if template: g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], \ shmem, options=options, id=id, my_pd=pds[i.address_space]) save(t, g) if options.item == t: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (i.name, inst)) # Instantiate the per-connection files. conn_dict = {} for id, c in enumerate(assembly.composition.connections): tmp_name = c.name key_from = (c.from_instance.name + '_' + c.from_interface.name) in conn_dict key_to = (c.to_instance.name + '_' + c.to_interface.name) in conn_dict if not key_from and not key_to: # We need a new connection name conn_name = 'conn' + str(id) c.name = conn_name conn_dict[c.from_instance.name + '_' + c.from_interface.name] = conn_name conn_dict[c.to_instance.name + '_' + c.to_interface.name] = conn_name elif not key_to: conn_name = conn_dict[c.from_instance.name + '_' + c.from_interface.name] c.name = conn_name conn_dict[c.to_instance.name + '_' + c.to_interface.name] = conn_name elif not key_from: conn_name = conn_dict[c.to_instance.name + '_' + c.to_interface.name] c.name = conn_name conn_dict[c.from_instance.name + '_' + c.from_interface.name] = conn_name else: continue for t in (('%s.from.source' % tmp_name, c.from_instance.address_space), ('%s.from.header' % tmp_name, c.from_instance.address_space), ('%s.to.source' % tmp_name, c.to_instance.address_space), ('%s.to.header' % tmp_name, c.to_instance.address_space)): try: template = templates.lookup(t[0], c) g = '' if template: g = r.render(c, assembly, template, obj_space, cspaces[t[1]], \ shmem, options=options, id=id, my_pd=pds[t[1]]) save(t[0], g) if options.item == t[0]: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (t[0], inst)) c.name = tmp_name # The following block handles instantiations of per-connection # templates that are neither a 'source' or a 'header', as handled # above. We assume that none of these need instantiation unless we are # actually currently looking for them (== options.item). That is, we # assume that following templates, like the CapDL spec, do not require # these templates to be rendered prior to themselves. # FIXME: This is a pretty ugly way of handling this. It would be nicer # for the runner to have a more general notion of per-'thing' templates # where the per-component templates, the per-connection template loop # above, and this loop could all be done in a single unified control # flow. for t in (('%s.from.' % c.name, c.from_instance.address_space), ('%s.to.' % c.name, c.to_instance.address_space)): if not options.item.startswith(t[0]): # This is not the item we're looking for. continue try: # If we've reached here then this is the exact item we're # after. template = templates.lookup(options.item, c) if template is None: raise Exception('no registered template for %s' % options.item) g = r.render(c, assembly, template, obj_space, cspaces[t[1]], \ shmem, options=options, id=id, my_pd=pds[t[1]]) save(options.item, g) done(g) except Exception as inst: die('While rendering %s: %s' % (options.item, inst)) # Perform any per component simple generation. This needs to happen last # as this template needs to run after all other capabilities have been # allocated for id, i in enumerate(assembly.composition.instances): # Don't generate any code for hardware components. if i.type.hardware: continue assert i.address_space in cspaces if conf and conf.settings and [x for x in conf.settings if \ x.instance == i.name and x.attribute == 'simple' and x.value]: for t in ('%s.simple' % i.name, ): try: template = templates.lookup(t, i) g = '' if template: g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], \ shmem, options=options, id=id, my_pd=pds[i.address_space]) save(t, g) if options.item == t: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (i.name, inst)) # Derive a set of usable ELF objects from the filenames we were passed. elfs = {} for e in options.elf: try: name = os.path.basename(e) if name in elfs: raise Exception( 'duplicate ELF files of name \'%s\' encountered' % name) elf = ELF(e, name, options.architecture) p = Perspective(phase=RUNNER, elf_name=name) group = p['group'] # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False, pd=pds[group], use_large_frames=options.largeframe) obj_space.merge(elf_spec, label=group) elfs[name] = (e, elf) except Exception as inst: die('While opening \'%s\': %s' % (e, inst)) if options.item in ('capdl', 'label-mapping'): # It's only relevant to run these filters if the final target is CapDL. # Note, this will no longer be true if we add any other templates that # depend on a fully formed CapDL spec. Guarding this loop with an if # is just an optimisation and the conditional can be removed if # desired. for f in CAPDL_FILTERS: try: # Pass everything as named arguments to allow filters to # easily ignore what they don't want. f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs, options=options, shmem=shmem) except Exception as inst: die('While forming CapDL spec: %s' % str(inst)) # Instantiate any other, miscellaneous template. If we've reached this # point, we know the user did not request a code template. try: template = templates.lookup(options.item) if template: g = r.render(assembly, assembly, template, obj_space, None, \ shmem, imported=imported, options=options) save(options.item, g) done(g) except Exception as inst: die('While rendering %s: %s' % (options.item, inst)) die('No valid element matching --item %s' % options.item)
#!/usr/bin/env python # # Copyright 2017, Data61 # Commonwealth Scientific and Industrial Research Organisation (CSIRO) # ABN 41 687 119 230. # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(DATA61_BSD) # from __future__ import absolute_import, division, print_function, \ unicode_literals from capdl import ELF elf = ELF('hello.bin') assert elf.get_arch() in [40, 'EM_ARM', 'ARM'] elf.get_spec()
def test_ia32_elf(self): elf = ELF('resources/ia32-hello.bin') assert elf.get_arch() == 'x86' elf.get_spec()
def test_elf(self): elf = ELF('resources/arm-hello.bin') assert elf.get_arch() in [40, 'EM_ARM', 'ARM'] elf.get_spec()
#!/usr/bin/env python # # Copyright 2017, Data61 # Commonwealth Scientific and Industrial Research Organisation (CSIRO) # ABN 41 687 119 230. # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(DATA61_BSD) # from __future__ import absolute_import, division, print_function, \ unicode_literals from capdl import ELF elf = ELF('hello.bin') assert elf.get_arch() == 'x86' elf.get_spec()
#!/usr/bin/env python # # Copyright 2014, NICTA # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(NICTA_BSD) # from capdl import ELF elf = ELF('unstripped.bin') assert elf.get_arch() == 'x86' # Confirm that the address concurs with the one we get from objdump. assert elf.get_symbol_vaddr('_start') == 0x08048d48 elf = ELF('stripped.bin') assert elf.get_arch() == 'x86' # We shouldn't be able to get the symbol from the stripped binary. try: vaddr = elf.get_symbol_vaddr('_start') assert not ('Symbol lookup on a stripped binary returned _start == 0x%0.8x' % vaddr) except: # Expected pass
def main(): options = parse_args(constants.TOOL_RUNNER) # Save us having to pass debugging everywhere. die = functools.partial(_die, options.verbosity >= 3) log.set_verbosity(options.verbosity) def done(s): ret = 0 if s: print >>options.outfile, s options.outfile.close() if options.post_render_edit and \ raw_input('Edit rendered template %s [y/N]? ' % \ options.outfile.name) == 'y': editor = os.environ.get('EDITOR', 'vim') ret = subprocess.call([editor, options.outfile.name]) sys.exit(ret) if not options.platform or options.platform in ['?', 'help'] \ or options.platform not in PLATFORMS: die('Valid --platform arguments are %s' % ', '.join(PLATFORMS)) if not options.file or len(options.file) > 1: die('A single input file must be provided for this operation') try: profiler = get_profiler(options.profiler, options.profile_log) except Exception as inst: die('Failed to create profiler: %s' % str(inst)) # Construct the compilation cache if requested. cache = None if options.cache in ['on', 'readonly', 'writeonly']: cache = Cache(options.cache_dir) f = options.file[0] try: with profiler('Reading input'): s = f.read() # Try to find this output in the compilation cache if possible. This is # one of two places that we check in the cache. This check will 'hit' # if the source files representing the input spec are identical to some # previous execution. if options.cache in ['on', 'readonly']: with profiler('Looking for a cached version of this output'): key = [version(), os.path.abspath(f.name), s, cache_relevant_options(options), options.platform, options.item] value = cache.get(key) if value is not None and value.valid(): # Cache hit. assert isinstance(value, FileSet), \ 'illegally cached a value for %s that is not a FileSet' % options.item log.debug('Retrieved %(platform)s.%(item)s from cache' % \ options.__dict__) done(value.output) with profiler('Parsing input'): ast = parser.parse_to_ast(s, options.cpp, options.cpp_flag, options.ply_optimise) parser.assign_filenames(ast, f.name) except parser.CAmkESSyntaxError as e: e.set_column(s) die('%s:%s' % (f.name, str(e))) except Exception as inst: die('While parsing \'%s\': %s' % (f.name, str(inst))) try: for t in AST_TRANSFORMS[PRE_RESOLUTION]: with profiler('Running AST transform %s' % t.__name__): ast = t(ast) except Exception as inst: die('While transforming AST: %s' % str(inst)) try: with profiler('Resolving imports'): ast, imported = parser.resolve_imports(ast, \ os.path.dirname(os.path.abspath(f.name)), options.import_path, options.cpp, options.cpp_flag, options.ply_optimise) except Exception as inst: die('While resolving imports of \'%s\': %s' % (f.name, str(inst))) try: with profiler('Combining assemblies'): # if there are multiple assemblies, combine them now compose_assemblies(ast) except Exception as inst: die('While combining assemblies: %s' % str(inst)) with profiler('Caching original AST'): orig_ast = deepcopy(ast) with profiler('Deduping AST'): ast = parser.dedupe(ast) try: with profiler('Resolving references'): ast = parser.resolve_references(ast) except Exception as inst: die('While resolving references of \'%s\': %s' % (f.name, str(inst))) try: with profiler('Collapsing references'): parser.collapse_references(ast) except Exception as inst: die('While collapsing references of \'%s\': %s' % (f.name, str(inst))) try: for t in AST_TRANSFORMS[POST_RESOLUTION]: with profiler('Running AST transform %s' % t.__name__): ast = t(ast) except Exception as inst: die('While transforming AST: %s' % str(inst)) try: with profiler('Resolving hierarchy'): resolve_hierarchy(ast) except Exception as inst: die('While resolving hierarchy: %s' % str(inst)) # If we have a readable cache check if our current target is in the cache. # The previous check will 'miss' and this one will 'hit' when the input # spec is identical to some previous execution modulo a semantically # irrelevant element (e.g. an introduced comment). I.e. the previous check # matches when the input is exactly the same and this one matches when the # AST is unchanged. if options.cache in ['on', 'readonly']: with profiler('Looking for a cached version of this output'): key = [version(), orig_ast, cache_relevant_options(options), options.platform, options.item] value = cache.get(key) if value is not None: assert options.item not in NEVER_AST_CACHE, \ '%s, that is marked \'never cache\' is in your cache' % options.item log.debug('Retrieved %(platform)s.%(item)s from cache' % \ options.__dict__) done(value) # If we have a writable cache, allow outputs to be saved to it. if options.cache in ['on', 'writeonly']: fs = FileSet(imported) def save(item, value): # Save an input-keyed cache entry. This one is based on the # pre-parsed inputs to save having to derive the AST (parse the # input) in order to locate a cache entry in following passes. # This corresponds to the first cache check above. key = [version(), os.path.abspath(options.file[0].name), s, cache_relevant_options(options), options.platform, item] specialised = fs.specialise(value) if item == 'capdl': specialised.extend(options.elf or []) cache[key] = specialised if item not in NEVER_AST_CACHE: # Save an AST-keyed cache entry. This corresponds to the second # cache check above. cache[[version(), orig_ast, cache_relevant_options(options), options.platform, item]] = value else: def save(item, value): pass # All references in the AST need to be resolved for us to continue. unresolved = reduce(lambda a, x: a.union(x), map(lambda x: x.unresolved(), ast), set()) if unresolved: die('Unresolved references in input specification:\n %s' % \ '\n '.join(map(lambda x: '%(filename)s:%(lineno)s:\'%(name)s\' of type %(type)s' % { 'filename':x.filename or '<unnamed file>', 'lineno':x.lineno, 'name':x._symbol, 'type':x._type.__name__, }, unresolved))) # Locate the assembly assembly = [x for x in ast if isinstance(x, AST.Assembly)] if len(assembly) > 1: die('Multiple assemblies found') elif len(assembly) == 1: assembly = assembly[0] else: die('No assembly found') obj_space = ObjectAllocator() cspaces = {} pds = {} conf = assembly.configuration shmem = defaultdict(dict) # We need to create a phony instance and connection to cope with cases # where the user has not defined any instances or connections (this would # be an arguably useless system, but we should still support it). We append # these to the template's view of the system below to ensure we always get # a usable template dictionary. Note that this doesn't cause any problems # because the phony items are named '' and thus unaddressable in ADL. dummy_instance = AST.Instance(AST.Reference('', AST.Instance), '') dummy_connection = AST.Connection(AST.Reference('', AST.Connector), '', \ AST.Reference('', AST.Instance), AST.Reference('', AST.Interface), \ AST.Reference('', AST.Instance), AST.Reference('', AST.Interface)) templates = Templates(options.platform, instance=map(lambda x: x.name, assembly.composition.instances + \ [dummy_instance]), \ connection=map(lambda x: x.name, assembly.composition.connections + \ [dummy_connection])) if options.templates: templates.add_root(options.templates) r = Renderer(templates.get_roots(), options) # The user may have provided their own connector definitions (with # associated) templates, in which case they won't be in the built-in lookup # dictionary. Let's add them now. Note, definitions here that conflict with # existing lookup entries will overwrite the existing entries. for c in [x for x in ast if isinstance(x, AST.Connector)]: if c.from_template: templates.add(c.name, 'from.source', c.from_template) if c.to_template: templates.add(c.name, 'to.source', c.to_template) # We're now ready to instantiate the template the user requested, but there # are a few wrinkles in the process. Namely, # 1. Template instantiation needs to be done in a deterministic order. The # runner is invoked multiple times and template code needs to be # allocated identical cap slots in each run. # 2. Components and connections need to be instantiated before any other # templates, regardless of whether they are the ones we are after. Some # other templates, such as the Makefile depend on the obj_space and # cspaces. # 3. All actual code templates, up to the template that was requested, # need to be instantiated. This is related to (1) in that the cap slots # allocated are dependent on what allocations have been done prior to a # given allocation call. # Instantiate the per-component source and header files. for id, i in enumerate(assembly.composition.instances): # Don't generate any code for hardware components. if i.type.hardware: continue if i.address_space not in cspaces: p = Perspective(phase=RUNNER, instance=i.name, group=i.address_space) cnode = obj_space.alloc(seL4_CapTableObject, name=p['cnode'], label=i.address_space) cspaces[i.address_space] = CSpaceAllocator(cnode) pd = obj_space.alloc(seL4_PageDirectoryObject, name=p['pd'], label=i.address_space) pds[i.address_space] = pd for t in ['%s.source' % i.name, '%s.header' % i.name, '%s.linker' % i.name]: try: template = templates.lookup(t, i) g = '' if template: with profiler('Rendering %s' % t): g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], \ shmem, options=options, id=id, my_pd=pds[i.address_space]) save(t, g) if options.item == t: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (i.name, str(inst))) # Instantiate the per-connection files. conn_dict = {} for id, c in enumerate(assembly.composition.connections): tmp_name = c.name key_from = (c.from_instance.name + '_' + c.from_interface.name) in conn_dict key_to = (c.to_instance.name + '_' + c.to_interface.name) in conn_dict if not key_from and not key_to: # We need a new connection name conn_name = 'conn' + str(id) c.name = conn_name conn_dict[c.from_instance.name + '_' + c.from_interface.name] = conn_name conn_dict[c.to_instance.name + '_' + c.to_interface.name] = conn_name elif not key_to: conn_name = conn_dict[c.from_instance.name + '_' + c.from_interface.name] c.name = conn_name conn_dict[c.to_instance.name + '_' + c.to_interface.name] = conn_name elif not key_from: conn_name = conn_dict[c.to_instance.name + '_' + c.to_interface.name] c.name = conn_name conn_dict[c.from_instance.name + '_' + c.from_interface.name] = conn_name else: continue for t in [('%s.from.source' % tmp_name, c.from_instance.address_space), ('%s.from.header' % tmp_name, c.from_instance.address_space), ('%s.to.source' % tmp_name, c.to_instance.address_space), ('%s.to.header' % tmp_name, c.to_instance.address_space)]: try: template = templates.lookup(t[0], c) g = '' if template: with profiler('Rendering %s' % t[0]): g = r.render(c, assembly, template, obj_space, cspaces[t[1]], \ shmem, options=options, id=id, my_pd=pds[t[1]]) save(t[0], g) if options.item == t[0]: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (t[0], str(inst))) c.name = tmp_name # The following block handles instantiations of per-connection # templates that are neither a 'source' or a 'header', as handled # above. We assume that none of these need instantiation unless we are # actually currently looking for them (== options.item). That is, we # assume that following templates, like the CapDL spec, do not require # these templates to be rendered prior to themselves. # FIXME: This is a pretty ugly way of handling this. It would be nicer # for the runner to have a more general notion of per-'thing' templates # where the per-component templates, the per-connection template loop # above, and this loop could all be done in a single unified control # flow. for t in [('%s.from.' % c.name, c.from_instance.address_space), ('%s.to.' % c.name, c.to_instance.address_space)]: if not options.item.startswith(t[0]): # This is not the item we're looking for. continue try: # If we've reached here then this is the exact item we're # after. template = templates.lookup(options.item, c) if template is None: raise Exception('no registered template for %s' % options.item) with profiler('Rendering %s' % options.item): g = r.render(c, assembly, template, obj_space, cspaces[t[1]], \ shmem, options=options, id=id, my_pd=pds[t[1]]) save(options.item, g) done(g) except Exception as inst: die('While rendering %s: %s' % (options.item, str(inst))) # Perform any per component simple generation. This needs to happen last # as this template needs to run after all other capabilities have been # allocated for id, i in enumerate(assembly.composition.instances): # Don't generate any code for hardware components. if i.type.hardware: continue assert i.address_space in cspaces if conf and conf.settings and [x for x in conf.settings if \ x.instance == i.name and x.attribute == 'simple' and x.value]: for t in ['%s.simple' % i.name]: try: template = templates.lookup(t, i) g = '' if template: with profiler('Rendering %s' % t): g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], \ shmem, options=options, id=id, my_pd=pds[i.address_space]) save(t, g) if options.item == t: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (i.name, str(inst))) # Derive a set of usable ELF objects from the filenames we were passed. elfs = {} arch = None for e in options.elf or []: try: name = os.path.basename(e) if name in elfs: raise Exception('duplicate ELF files of name \'%s\' encountered' % name) elf = ELF(e, name) if not arch: # The spec's arch will have defaulted to ARM, but we want it to # be the same as whatever ELF format we're parsing. arch = elf.get_arch() if arch == 'ARM': obj_space.spec.arch = 'arm11' elif arch == 'x86': obj_space.spec.arch = 'ia32' else: raise NotImplementedError else: # All ELF files we're parsing should be the same format. if arch != elf.get_arch(): raise Exception('ELF files are not all the same architecture') # Pass 'False' to avoid inferring a TCB as we've already created # our own. p = Perspective(phase=RUNNER, elf_name=name) group = p['group'] with profiler('Deriving CapDL spec from %s' % e): elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False, pd=pds[group], use_large_frames=options.largeframe) obj_space.merge(elf_spec, label=group) elfs[name] = (e, elf) except Exception as inst: die('While opening \'%s\': %s' % (e, str(inst))) if options.item in ['capdl', 'label-mapping']: # It's only relevant to run these filters if the final target is CapDL. # Note, this will no longer be true if we add any other templates that # depend on a fully formed CapDL spec. Guarding this loop with an if # is just an optimisation and the conditional can be removed if # desired. for f in CAPDL_FILTERS: try: with profiler('Running CapDL filter %s' % f.__name__): # Pass everything as named arguments to allow filters to # easily ignore what they don't want. f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs, profiler=profiler, options=options, shmem=shmem) except Exception as inst: die('While forming CapDL spec: %s' % str(inst)) # Instantiate any other, miscellaneous template. If we've reached this # point, we know the user did not request a code template. try: template = templates.lookup(options.item) g = '' if template: with profiler('Rendering %s' % options.item): g = r.render(assembly, assembly, template, obj_space, None, \ shmem, imported=imported, options=options) save(options.item, g) done(g) except Exception as inst: die('While rendering %s: %s' % (options.item, str(inst))) die('No valid element matching --item %s' % options.item)
# Commonwealth Scientific and Industrial Research Organisation (CSIRO) # ABN 41 687 119 230. # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(DATA61_BSD) # from __future__ import absolute_import, division, print_function, \ unicode_literals from capdl import ELF elf = ELF('unstripped.bin') assert elf.get_arch() == 'x86' # Confirm that the address concurs with the one we get from objdump. assert elf.get_symbol_vaddr('_start') == 0x08048d48 elf = ELF('stripped.bin') assert elf.get_arch() == 'x86' # We shouldn't be able to get the symbol from the stripped binary. try: vaddr = elf.get_symbol_vaddr('_start') assert not ('Symbol lookup on a stripped binary returned _start == 0x%0.8x' % vaddr) except: # Expected
#!/usr/bin/env python # # Copyright 2014, NICTA # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(NICTA_BSD) # from __future__ import absolute_import, division, print_function, \ unicode_literals from capdl import ELF elf = ELF('hello.bin') assert elf.get_arch() == 'x86' elf.get_spec()
#!/usr/bin/env python # # Copyright 2014, NICTA # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(NICTA_BSD) # from __future__ import absolute_import, division, print_function, unicode_literals from capdl import ELF elf = ELF("hello.bin") assert elf.get_arch() in [40, "EM_ARM", "ARM"] elf.get_spec()