def test_cache_hit_truncate(self): ''' A previous accelerator bug resulted in the output file not being truncated before the accelerator wrote to it. The result was that an output resulting from a cache hit that was delivered via the accelerator would have trailing garbage if it was shorter than the existing file content. More specifically, the following sequence could occur: 1. Build with cache A enabled and "short string" is output and cached; 2. Build with different config and "a slightly longer string" is output (and cached); 3. Build with original config and the accelerator enabled and "short string" is retrieved, but written without truncating the output. As a result, the final file content would end up as "short stringonger string". This test validates that this problem has not been reintroduced. ''' root = self.mkdtemp() internal_root = os.path.join(root, version(), 'cachea') c = Cache(internal_root) # Setup a basic, single-input entry. input1 = self.mkstemp() with open(input1, 'wt') as f: f.write('hello world') inputs = prime_inputs([input1]) cwd = self.mkdtemp() output = self.mkstemp() args = ['--cache-dir', root, '--outfile', output] # Write the entry to the cache with a specific short value. content = 'moo cow' c.save(args[:-2], cwd, content, inputs) c.flush() del c # Now write something *longer* into the output file. with open(output, 'wt') as f: f.write('some lengthier text') # Now run the accelerator to retrieve the original, shorter output. ret, stdout, stderr = self.execute([self.accelerator] + args, cwd=cwd) # It should have hit the cache and written the correct, shorter output. self.assertEqual(ret, 0) self.assertEqual(stdout, '') self.assertEqual(stderr, '') with open(output) as f: data = f.read() self.assertEqual(data, content)
def save(item, value): # Save an input-keyed cache entry. This one is based on the # pre-parsed inputs to save having to derive the AST (parse the # input) in order to locate a cache entry in following passes. # This corresponds to the first cache check above. key = [version(), os.path.abspath(options.file[0].name), s, cache_relevant_options(options), options.platform, item] specialised = fs.specialise(value) if item == 'capdl': specialised.extend(options.elf or []) cache[key] = specialised if item not in NEVER_AST_CACHE: # Save an AST-keyed cache entry. This corresponds to the second # cache check above. cache[[version(), orig_ast, cache_relevant_options(options), options.platform, item]] = value
def __init__(self, templates, cache, cache_dir): # PERF: This function is simply constructing a Jinja environment and # would be trivial, except that we optimise re-execution of template # code by compiling the templates to Python bytecode the first time # they are seen. This happens when the compilation cache is enabled and # should speed the execution of the template code itself in future # runs. self.templates = templates # Directory in which to store and fetch pre-compiled Jinja2 templates. template_cache = os.path.join(cache_dir, version(), 'precompiled-templates') loaders = [] if cache and os.path.exists(template_cache): # Pre-compiled templates. loaders.append(jinja2.ModuleLoader(template_cache)) # Source templates. loaders.extend( jinja2.FileSystemLoader(os.path.abspath(x)) for x in templates.get_roots()) self.env = jinja2.Environment( loader=jinja2.ChoiceLoader(loaders), extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"], block_start_string=START_BLOCK, block_end_string=END_BLOCK, variable_start_string=START_VARIABLE, variable_end_string=END_VARIABLE, comment_start_string=START_COMMENT, comment_end_string=END_COMMENT, auto_reload=False, undefined=jinja2.StrictUndefined) if cache and not os.path.exists(template_cache): # The pre-compiled template cache is enabled but does not exist. # We build it here for next time. # We filter the templates that Jinja compiles to only the ones we # know of in order to avoid errors or wasted time on other stray # garbage in the template directory (vim swp files, pycs, ...). templates = list(get_leaves(TEMPLATES)) mkdirp(template_cache) # Compile the templates. Note that we only compile them to PYCs on # Python 2, because this has no effect on Python 3 or PyPy. self.env.compile_templates( template_cache, filter_func=(lambda x: x in templates), zip=None, ignore_errors=False, py_compile=platform.python_implementation() == 'CPython' and six.PY2)
def __init__(self, templates, cache, cache_dir): # PERF: This function is simply constructing a Jinja environment and # would be trivial, except that we optimise re-execution of template # code by compiling the templates to Python bytecode the first time # they are seen. This happens when the compilation cache is enabled and # should speed the execution of the template code itself in future # runs. self.templates = templates # Directory in which to store and fetch pre-compiled Jinja2 templates. template_cache = os.path.join(cache_dir, version(), 'precompiled-templates') loaders = [] if cache and os.path.exists(template_cache): # Pre-compiled templates. loaders.append(jinja2.ModuleLoader(template_cache)) # Source templates. loaders.extend(jinja2.FileSystemLoader(os.path.abspath(x)) for x in templates.get_roots()) self.env = jinja2.Environment( loader=jinja2.ChoiceLoader(loaders), extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"], block_start_string=START_BLOCK, block_end_string=END_BLOCK, variable_start_string=START_VARIABLE, variable_end_string=END_VARIABLE, comment_start_string=START_COMMENT, comment_end_string=END_COMMENT, auto_reload=False, undefined=jinja2.StrictUndefined) if cache and not os.path.exists(template_cache): # The pre-compiled template cache is enabled but does not exist. # We build it here for next time. # We filter the templates that Jinja compiles to only the ones we # know of in order to avoid errors or wasted time on other stray # garbage in the template directory (vim swp files, pycs, ...). templates = list(get_leaves(TEMPLATES)) mkdirp(template_cache) # Compile the templates. Note that we only compile them to PYCs on # Python 2, because this has no effect on Python 3 or PyPy. self.env.compile_templates(template_cache, filter_func=(lambda x: x in templates), zip=None, ignore_errors=False, py_compile= platform.python_implementation() == 'CPython' and six.PY2)
def __init__(self, template_paths, options): # PERF: This function is simply constructing a Jinja environment and # would be trivial, except that we optimise re-execution of template # code by compiling the templates to Python bytecode the first time # they are seen. This happens when the compilation cache is enabled and # should speed the execution of the template code itself in future # runs. # Directory in which to store and fetch pre-compiled Jinja2 templates. template_cache = os.path.join(options.cache_dir, version(), 'precompiled-templates') loaders = [] if options.cache in ['on', 'readonly'] and \ os.path.exists(template_cache): # Pre-compiled templates. loaders.append(jinja2.ModuleLoader(template_cache)) # Source templates. loaders.extend( map(jinja2.FileSystemLoader, [os.path.abspath(x) for x in template_paths])) self.env = jinja2.Environment( loader=jinja2.ChoiceLoader(loaders), extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"], block_start_string=START_BLOCK, block_end_string=END_BLOCK, variable_start_string=START_VARIABLE, variable_end_string=END_VARIABLE, comment_start_string=START_COMMENT, comment_end_string=END_COMMENT, auto_reload=False) if options.cache in ['on', 'writeonly'] and \ not os.path.exists(template_cache): # The pre-compiled template cache is enabled but does not exist. # We build it here for next time. # We filter the templates that Jinja compiles to only the ones we # know of in order to avoid errors or wasted time on other stray # garbage in the template directory (vim swp files, pycs, ...). templates = list(get_leaves(TEMPLATES)) mkdirp(template_cache) self.env.compile_templates(template_cache, filter_func=(lambda x: x in templates), zip=None, ignore_errors=False, py_compile=True)
def __init__(self, template_paths, options): # PERF: This function is simply constructing a Jinja environment and # would be trivial, except that we optimise re-execution of template # code by compiling the templates to Python bytecode the first time # they are seen. This happens when the compilation cache is enabled and # should speed the execution of the template code itself in future # runs. # Directory in which to store and fetch pre-compiled Jinja2 templates. template_cache = os.path.join(options.cache_dir, version(), 'precompiled-templates') loaders = [] if options.cache in ['on', 'readonly'] and \ os.path.exists(template_cache): # Pre-compiled templates. loaders.append(jinja2.ModuleLoader(template_cache)) # Source templates. loaders.extend(map(jinja2.FileSystemLoader, [os.path.abspath(x) for x in template_paths])) self.env = jinja2.Environment( loader=jinja2.ChoiceLoader(loaders), extensions=["jinja2.ext.do", "jinja2.ext.loopcontrols"], block_start_string=START_BLOCK, block_end_string=END_BLOCK, variable_start_string=START_VARIABLE, variable_end_string=END_VARIABLE, comment_start_string=START_COMMENT, comment_end_string=END_COMMENT, auto_reload=False) if options.cache in ['on', 'writeonly'] and \ not os.path.exists(template_cache): # The pre-compiled template cache is enabled but does not exist. # We build it here for next time. # We filter the templates that Jinja compiles to only the ones we # know of in order to avoid errors or wasted time on other stray # garbage in the template directory (vim swp files, pycs, ...). templates = list(get_leaves(TEMPLATES)) mkdirp(template_cache) self.env.compile_templates(template_cache, filter_func=(lambda x: x in templates), zip=None, ignore_errors=False, py_compile=True)
def test_basic_valgrind(self): root = self.mkdtemp() # CAmkES internally suffixes the root with a couple of things to # namespace the cache. internal_root = os.path.join(root, version(), 'cachea') c = Cache(internal_root) # Construct some fake inputs. input1 = self.mkstemp() with open(input1, 'wt') as f: f.write('hello world') input2 = self.mkstemp() with open(input2, 'wt') as f: f.write('foo bar') inputs = prime_inputs([input1, input2]) # And a fake working directory. cwd = self.mkdtemp() # Imagine we were saving the output from the following file. output = self.mkstemp() # So the command line arguments would be: args = ['--cache-dir', root, '--outfile', output] # Save the entry. Note that we truncate the args because the runner and # the accelerator strip --outfile arguments before interacting with the # cache. c.save(args[:-2], cwd, 'moo cow', inputs) c.flush() # We're done with the native cache. del c # Now let's try to read back the cache entry from the accelerator. _, _, stderr = self.execute(VALGRIND + [self.debug_accelerator] + args, cwd=cwd) if valgrind_found_leak(stderr): self.fail('camkes-accelerator %s leaks memory:\n%s' % (' '.join(args), stderr)) _, _, stderr = self.execute(VALGRIND + [self.accelerator] + args, cwd=cwd) if valgrind_found_leak(stderr): self.fail('camkes-accelerator %s leaks memory (not reproducible ' 'in debug mode):\n%s' % (' '.join(args), stderr))
def test_basic(self): ''' Test we can save and retrieve something (expected case). ''' root = self.mkdtemp() # CAmkES internally suffixes the root with a couple of things to # namespace the cache. internal_root = os.path.join(root, version(), 'cachea') c = Cache(internal_root) # Construct some fake inputs. input1 = self.mkstemp() with open(input1, 'wt') as f: f.write('hello world') input2 = self.mkstemp() with open(input2, 'wt') as f: f.write('foo bar') inputs = prime_inputs([input1, input2]) # And a fake working directory. cwd = self.mkdtemp() # Imagine we were saving the output from the following file. output = self.mkstemp() # So the command line arguments would be: args = ['--cache-dir', root, '--outfile', output] # Save the entry. Note that we truncate the args because the runner and # the accelerator strip --outfile arguments before interacting with the # cache. c.save(args[:-2], cwd, 'moo cow', inputs) c.flush() # We're done with the native cache. del c # Now let's try to read back the cache entry from the accelerator. ret, _, _ = self.execute([self.accelerator] + args, cwd=cwd) self.assertEqual(ret, 0) # If it worked, we should have the output in the expected place. with open(output, 'rt') as f: data = f.read() self.assertEqual(data, 'moo cow')
def test_cache_miss_inputs_valgrind(self): # As for the basic test case... root = self.mkdtemp() internal_root = os.path.join(root, version(), 'cachea') c = Cache(internal_root) input1 = self.mkstemp() with open(input1, 'wt') as f: f.write('hello world') input2 = self.mkstemp() with open(input2, 'wt') as f: f.write('foo bar') inputs = prime_inputs([input1, input2]) cwd = self.mkdtemp() output = self.mkstemp() args = ['--cache-dir', root, '--outfile', output] c.save(args[:-2], cwd, 'moo cow', inputs) c.flush() del c # Now let's modify one of the inputs. with open(input2, 'at') as f: f.write('foo bar') _, _, stderr = self.execute(VALGRIND + [self.debug_accelerator] + args, cwd=cwd) if valgrind_found_leak(stderr): self.fail('camkes-accelerator %s leaks memory:\n%s' % (' '.join(args), stderr)) _, _, stderr = self.execute(VALGRIND + [self.accelerator] + args, cwd=cwd) if valgrind_found_leak(stderr): self.fail('camkes-accelerator %s leaks memory (not reproducible ' 'in debug mode):\n%s' % (' '.join(args), stderr))
def test_cache_miss_inputs(self): ''' Test that we correctly miss when one of the inputs has changed. ''' # As for the basic test case... root = self.mkdtemp() internal_root = os.path.join(root, version(), 'cachea') c = Cache(internal_root) input1 = self.mkstemp() with open(input1, 'wt') as f: f.write('hello world') input2 = self.mkstemp() with open(input2, 'wt') as f: f.write('foo bar') inputs = prime_inputs([input1, input2]) cwd = self.mkdtemp() output = self.mkstemp() args = ['--cache-dir', root, '--outfile', output] c.save(args[:-2], cwd, 'moo cow', inputs) c.flush() del c # Now let's modify one of the inputs. with open(input2, 'at') as f: f.write('foo bar') ret, stdout, stderr = self.execute([self.accelerator] + args, cwd=cwd) # It should have missed (== non-zero return value with no output). self.assertNotEqual(ret, 0) self.assertEqual(stdout, '') self.assertEqual(stderr, '')
def new_context(entity, assembly, obj_space, cap_space, shmem, kept_symbols, fill_frames, templates, **kwargs): '''Create a new default context for rendering.''' return dict(list(__builtins__.items()) + list({ # Kernel object allocator 'alloc_obj':(lambda name, type, **kwargs: alloc_obj((entity.label(), obj_space), obj_space, '%s_%s' % (entity.label(), name), type, label=entity.label(), **kwargs)) if obj_space else None, 'seL4_EndpointObject':seL4_EndpointObject, 'seL4_NotificationObject':seL4_NotificationObject, 'seL4_TCBObject':seL4_TCBObject, 'seL4_ARM_SmallPageObject':seL4_ARM_SmallPageObject, 'seL4_ARM_SectionObject':seL4_ARM_SectionObject, 'seL4_ARM_SuperSectionObject':seL4_ARM_SuperSectionObject, 'seL4_FrameObject':seL4_FrameObject, 'seL4_UntypedObject':seL4_UntypedObject, 'seL4_IA32_IOPort':seL4_IA32_IOPort, 'seL4_IA32_IOSpace':seL4_IA32_IOSpace, 'seL4_ARM_IOSpace':seL4_ARM_IOSpace, 'seL4_SchedContextObject':seL4_SchedContextObject, 'seL4_SchedControl':seL4_SchedControl, 'seL4_RTReplyObject':seL4_RTReplyObject, 'seL4_ASID_Pool':seL4_ASID_Pool, # Cap allocator 'alloc_cap':(lambda name, obj, **kwargs: alloc_cap((entity.label(), cap_space), cap_space, name, obj, **kwargs)) \ if cap_space else None, 'seL4_CanRead':seL4_CanRead, 'seL4_CanWrite':seL4_CanWrite, 'seL4_AllRights':seL4_AllRights, 'seL4_IRQControl':seL4_IRQControl, # The CNode root of your CSpace. Should only be necessary in cases # where you need to allocate a cap to it. 'my_cnode':cap_space.cnode if cap_space is not None else None, # Batched object and cap allocation for when you don't need a reference # to the object. Probably best not to look directly at this one. When # you see `set y = alloc('foo', bar, moo)` in template code, think: # set x = alloc_obj('foo_obj', bar) # set y = alloc_cap('foo_cap', x, moo) 'alloc':(lambda name, type, **kwargs: alloc_cap((entity.label(), cap_space), cap_space, name, alloc_obj((entity.label(), obj_space), obj_space, '%s_%s' % (entity.label(), name), type, label=entity.label(), **kwargs), **kwargs)) if cap_space else None, # Functionality for templates to inform us that they've emitted a C # variable that's intended to map to a shared variable. It is # (deliberately) left to the template authors to ensure global names # (gnames) only collide when intended; i.e. when they should map to the # same shared variable. The local name (lname) will later be used by us # to locate the relevant ELF frame(s) to remap. Note that we assume # address spaces and CSpaces are 1-to-1. 'register_shared_variable':None if cap_space is None else \ (lambda gname, lname, perm='RWX', paddr=None, frames=None, cached_hw=False: register_shared_variable(shmem, gname, cap_space.cnode.name, lname, perm, paddr, frames, cached_hw)), # Function for templates to inform us that they would like certain # 'fill' information to get placed into the provided symbol. Provided # symbol should be page size and aligned. The 'fill' parameter is # an arbitrary string that will be set as the 'fill' parameter on the # capDL frame object. The meaning of fill is completely dependent # on the underlying loader 'register_fill_frame':(lambda symbol, fill: register_fill_frame(fill_frames, symbol, fill, entity)), # Inform the linker that a C symbol should not be removed, even if # it not used by any C code. 'keep_symbol':(lambda symbol: keep_symbol(kept_symbols, symbol, entity)), # Returns an iterator over all the C symbols declared to be kept # by a given component instance (specified by name). 'kept_symbols':(lambda name: iter(kept_symbols[name] if name in kept_symbols else ())), # A `self`-like reference to the current AST object. It would be nice # to actually call this `self` to lead to more pythonic templates, but # `self` inside template blocks refers to the jinja2 parser. 'me':entity, # The AST assembly's configuration. 'configuration':assembly.configuration, # The AST assembly's composition 'composition':assembly.composition, # Shared memory metadata. Templates should only need to modify this if # they're doing something cross-component. 'shmem':shmem if entity is not None else None, # Cross-template variable passing helpers. These are quite low-level. # Avoid calling them unless necessary. 'stash':partial(stash, entity.label()), 'pop':partial(pop, entity.label()), 'guard':partial(guard, entity.label()), # If the previous group of functions are considered harmful, these are # to be considered completely off limits. These expose a mechanism for # passing data between unrelated templates (_stash and _pop) and a way # of running arbitrary Python statements and expressions. They come # with significant caveats. E.g. _stash and _pop will likely not behave # as expected with the template cache enabled. '_stash':partial(stash, ''), '_pop':partial(pop, ''), 'exec':_exec, # Helpers for creating unique symbols within templates. 'c_symbol':partial(symbol, '_camkes_%(tag)s_%(counter)d'), 'isabelle_symbol':partial(symbol, '%(tag)s%(counter)d\'', 's'), # Expose some library functions 'assert':_assert, 'itertools':itertools, 'functools':functools, 'lambda':lambda s: eval('lambda %s' % s), 'numbers':numbers, 'os':os, 'pdb':pdb, 'raise':_raise, 're':re, 'six':six, 'set':orderedset.OrderedSet, 'textwrap':textwrap, 'copy':copy, 'zip':zip, 'math':math, 'enumerate':enumerate, # Allocation pools. In general, do not touch these in templates, but # interact with them through the alloc* functions. They are only in the # context to allow unanticipated template extensions. 'obj_space':obj_space, 'cap_space':cap_space, # Debugging functions 'breakpoint':_breakpoint, 'sys':sys, # Work around for Jinja's bizarre scoping rules. 'Counter':Counter, # Support for name mangling in the templates. See existing usage for # examples. 'Perspective':lambda **kwargs:Perspective(TEMPLATES, **kwargs), # Low-level access to name mangling. Should only be required when you # need to access both mangling phases. 'NameMangling':collections.namedtuple('NameMangling', ['FILTERS', 'TEMPLATES', 'Perspective'])(FILTERS, TEMPLATES, Perspective), # Return a list of distinct elements. Normally you would just do this # as list(set(xs)), but this turns out to be non-deterministic in the # template environment for some reason. 'uniq':lambda xs: reduce(lambda ys, z: ys if z in ys else (ys + [z]), xs, []), # Functional helpers. 'flatMap':lambda f, xs: list(itertools.chain.from_iterable(map(f, xs))), 'flatten':lambda xss: list(itertools.chain.from_iterable(xss)), # Macros for common operations. 'macros':macros, # This function abstracts away the differences between the RT kernel's # seL4_Recv and the master kernel's seL4_Recv. Namely, the RT kernel's # seL4_Recv takes an extra reply object cap. # # seL4_Recv is distinct from seL4_Wait, in that a seL4_Recv() call # expects to potentially get a reply cap from the sender. 'generate_seL4_Recv': generate_seL4_Recv, # This function is similar to generate_seL4_Recv, in that it also # abstracts away the differences between the RT and master kernels. # This function specifically abstracts away the differences between # seL4_SignalRecv (on master) and seL4_NBSendRecv (on RT). 'generate_seL4_SignalRecv': generate_seL4_SignalRecv, # This function is similar to generate_seL4_Recv as well, but it # abstracts away the differences between seL4_ReplyRecv between the # RT and master branches. 'generate_seL4_ReplyRecv': generate_seL4_ReplyRecv, # Give template authors access to AST types just in case. Templates # should never be constructing objects of these types, but they may # need to do `isinstance` testing. 'camkes':collections.namedtuple('camkes', ['ast'])(AST), # Expose CapDL module for `isinstance` testing. 'capdl':capdl, # Give the template authors a mechanism for writing C-style include # guards. Use the following idiom to guard an include target: # /*- if 'template.filename' not in included' -*/ # /*- do included.add('template.filename') -*/ # ... my template ... # /*- endif -*/ 'included':set(), # Expose an exception class templates can use to throw errors related # to invalid input specification. 'TemplateError':TemplateError, # Version information. Templates are unlikely to depend on this, but we # emit it to give component instances a runtime-discoverable CAmkES # version. 'camkes_version':version(), # Look up a template 'lookup_template':lambda path, entity: templates.lookup(path, entity), }.items()) + list(kwargs.items()))
def main(argv, out, err): # We need a UTF-8 locale, so bail out if we don't have one. More # specifically, things like the version() computation traverse the file # system and, if they hit a UTF-8 filename, they try to decode it into your # preferred encoding and trigger an exception. encoding = locale.getpreferredencoding().lower() if encoding not in ('utf-8', 'utf8'): err.write('CAmkES uses UTF-8 encoding, but your locale\'s preferred ' 'encoding is %s. You can override your locale with the LANG ' 'environment variable.\n' % encoding) return -1 options = parse_args(argv, out, err) # Ensure we were supplied equal items and outfiles if len(options.outfile) != len(options.item): err.write( 'Different number of items and outfiles. Required one outfile location ' 'per item requested.\n') return -1 # No duplicates in items or outfiles if len(set(options.item)) != len(options.item): err.write('Duplicate items requested through --item.\n') return -1 if len(set(options.outfile)) != len(options.outfile): err.write('Duplicate outfiles requrested through --outfile.\n') return -1 # Save us having to pass debugging everywhere. die = functools.partial(_die, options) log.set_verbosity(options.verbosity) cwd = os.getcwd() # Build a list of item/outfile pairs that we have yet to match and process all_items = set(zip(options.item, options.outfile)) done_items = set([]) # Construct the compilation caches if requested. cachea = None cacheb = None if options.cache: # Construct a modified version of the command line arguments that we'll # use in the keys to the caches. Essentially we elide --outfile and its # parameter under the assumption that this value is never used in code # generation. The purpose of this is to allow us to successfully cache # ancillary outputs that we generate along the way to the current # output. If we were to include --outfile in the key, future attempts # to generate these ancillary outputs would unnecessarily miss the # entries generated by this execution. args = [] skip = False for index, arg in enumerate(argv[1:]): if skip: skip = False continue if arg in ('--outfile', '-O'): skip = True continue args.append(arg) cachea = LevelACache( os.path.join(options.cache_dir, version(), 'cachea')) cacheb = LevelBCache( os.path.join(options.cache_dir, version(), 'cacheb')) def done(s, file, item): ret = 0 if s: file.write(s) file.close() if cachea is not None: try: cachea.flush() except sqlite3.OperationalError as e: # The following suppresses two spurious errors: # 1. The database is locked. In a large, parallel build, writes # to the level A cache are heavily contended and this error # can occur. # 2. The database structure is unexpected. If the CAmkES # sources have changed *while* the runner was executing, # the level A cache can be looking in a different place to # where the cache was created. # Both of these are non-critical (will just result in a # potential future cache miss) so there's no need to alarm the # user. if re.search(r'database is locked', str(e)) is not None or \ re.search(r'no such table', str(e)) is not None: log.debug('failed to flush level A cache: %s' % str(e)) else: raise if cacheb is not None: try: cacheb.flush() except sqlite3.OperationalError as e: # As above for the level B cache. if re.search(r'database is locked', str(e)): log.debug('failed to flush level B cache: %s' % str(e)) else: raise done_items.add((item, file)) if len(all_items - done_items) == 0: sys.exit(ret) # Try to find this output in the level A cache if possible. This check will # 'hit' if the source files representing the input spec are identical to # some previously observed execution. if cachea is not None: assert 'args' in locals() assert len(options.outfile) == 1, 'level A cache only supported when requestiong ' \ 'single items' output = cachea.load(args, cwd) if output is not None: log.debug('Retrieved %(platform)s/%(item)s from level A cache' % options.__dict__) done(output, options.outfile[0], options.item[0]) filename = os.path.abspath(options.file.name) try: # Build the parser options parse_options = ParserOptions(options.cpp, options.cpp_flag, options.import_path, options.verbosity, options.allow_forward_references) ast, read = parse_file_cached(filename, options.data_structure_cache_dir, parse_options) except (ASTError, ParseError) as e: die(e.args) # Locate the assembly. assembly = ast.assembly if assembly is None: die('No assembly found') # Do some extra checks if the user asked for verbose output. if options.verbosity >= 2: # Try to catch type mismatches in attribute settings. Note that it is # not possible to conclusively evaluate type correctness because the # attributes' type system is (deliberately) too loose. That is, the # type of an attribute can be an uninterpreted C type the user will # provide post hoc. for i in assembly.composition.instances: for a in i.type.attributes: value = assembly.configuration[i.name].get(a.name) if value is not None: if a.type == 'string' and not \ isinstance(value, six.string_types): log.warning('attribute %s.%s has type string but is ' 'set to a value that is not a string' % (i.name, a.name)) elif a.type == 'int' and not \ isinstance(value, numbers.Number): log.warning('attribute %s.%s has type int but is set ' 'to a value that is not an integer' % (i.name, a.name)) obj_space = ObjectAllocator() obj_space.spec.arch = options.architecture cspaces = {} pds = {} conf = assembly.configuration shmem = collections.defaultdict(ShmemFactory()) kept_symbols = {} fill_frames = {} templates = Templates(options.platform) [templates.add_root(t) for t in options.templates] try: r = Renderer(templates, options.cache, options.cache_dir) except jinja2.exceptions.TemplateSyntaxError as e: die('template syntax error: %s' % e) # The user may have provided their own connector definitions (with # associated) templates, in which case they won't be in the built-in lookup # dictionary. Let's add them now. Note, definitions here that conflict with # existing lookup entries will overwrite the existing entries. Note that # the extra check that the connector has some templates is just an # optimisation; the templates module handles connectors without templates # just fine. extra_templates = set() for c in (x for x in ast.items if isinstance(x, Connector) and ( x.from_template is not None or x.to_template is not None)): try: # Find a connection that uses this type. connection = next(x for x in ast if isinstance(x, Connection) and x.type == c) # Add the custom templates and update our collection of read # inputs. It is necessary to update the read set here to avoid # false compilation cache hits when the source of a custom template # has changed. extra_templates |= templates.add(c, connection) except TemplateError as e: die('while adding connector %s: %s' % (c.name, e)) except StopIteration: # No connections use this type. There's no point adding it to the # template lookup dictionary. pass # Check if our current target is in the level B cache. The level A cache # will 'miss' and this one will 'hit' when the input spec is identical to # some previously observed execution modulo a semantically irrelevant # element (e.g. an introduced comment). ast_hash = None if cacheb is not None: ast_hash = level_b_prime(ast) assert 'args' in locals() assert len(options.item) == 1, 'level B cache only supported when requesting ' \ 'single items' output = cacheb.load(ast_hash, args, set(options.elf) | extra_templates) if output is not None: log.debug('Retrieved %(platform)s/%(item)s from level B cache' % options.__dict__) done(output, options.outfile[0], options.item[0]) # Add custom templates. read |= extra_templates # Add the CAmkES sources themselves to the accumulated list of inputs. read |= set(path for path, _ in sources()) # Add any ELF files we were passed as inputs. read |= set(options.elf) # Write a Makefile dependency rule if requested. if options.makefile_dependencies is not None: options.makefile_dependencies.write( '%s: \\\n %s\n' % (filename, ' \\\n '.join(sorted(read)))) # If we have a cache, allow outputs to be saved to it. if options.cache: assert cachea is not None, 'level A cache not available, though the ' \ 'cache is enabled (bug in runner?)' # The logic of this cache currently only works when a single item is requested # on the command line assert len(options.item) == 1, 'level A cache only supported when requesting ' \ 'single items' # Calculate the input files to the level A cache. inputs = level_a_prime(read) # Work out the position of the --item argument in the command line # parameters. We will use this to cache not only outputs for this # execution, but also outputs for ones with a different target. item_index = None assert 'args' in locals() for index, arg in enumerate(args[:-1]): if arg in ('--item', '-T'): item_index = index + 1 break assert item_index is not None, 'failed to find required argument ' \ '--item (bug in runner?)' # We should already have the necessary inputs for the level B cache. assert cacheb is not None, 'level B cache not available, though the ' \ 'cache is enabled (bug in runner?)' assert ast_hash is not None, 'AST hash not pre-computed (bug in ' \ 'runner?)' def save(item, value): # Juggle the command line arguments to cache the predicted # arguments for a call that would generate this item. new_args = args[:item_index] + [item] + args[item_index + 1:] # Save entries in both caches. cachea.save(new_args, cwd, value, inputs) if item != 'Makefile' and item != 'camkes-gen.cmake': # We avoid caching the generated Makefile because it is not # safe. The inputs to generation of the Makefile are not only # the AST, but also the file names (`inputs`). If we cache it in # the level B cache we risk the following scenario: # # 1. Generate the Makefile, caching it in the level B cache; # 2. Modify the spec to import a file containing only white # space and/or comments; then # 3. Generate the Makefile, missing the level A cache, but # hitting the level B cache. # # At this point, the generated Makefile is incorrect because it # does not capture any dependencies on the imported file. We can # now introduce something semantically relevant into this file # (e.g. an Assembly block) and it will not be seen by the build # system. cacheb.save(ast_hash, new_args, set(options.elf) | extra_templates, value) else: def save(item, value): pass def apply_capdl_filters(): # Derive a set of usable ELF objects from the filenames we were passed. elfs = {} for e in options.elf: try: name = os.path.basename(e) if name in elfs: raise Exception( 'duplicate ELF files of name \'%s\' encountered' % name) elf = ELF(e, name, options.architecture) p = Perspective(phase=RUNNER, elf_name=name) group = p['group'] # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False, pd=pds[group], use_large_frames=options.largeframe) obj_space.merge(elf_spec, label=group) elfs[name] = (e, elf) except Exception as inst: die('While opening \'%s\': %s' % (e, inst)) # It's only relevant to run these filters if the final target is CapDL. # Note, this will no longer be true if we add any other templates that # depend on a fully formed CapDL spec. Guarding this loop with an if # is just an optimisation and the conditional can be removed if # desired. filteroptions = FilterOptions( options.architecture, options.realtime, options.largeframe, options.largeframe_dma, options.default_priority, options.default_max_priority, options.default_criticality, options.default_max_criticality, options.default_affinity, options.default_period, options.default_budget, options.default_data, options.default_size_bits, options.debug_fault_handlers, options.fprovide_tcb_caps) for f in CAPDL_FILTERS: try: # Pass everything as named arguments to allow filters to # easily ignore what they don't want. f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs, options=filteroptions, shmem=shmem, fill_frames=fill_frames) except Exception as inst: die('While forming CapDL spec: %s' % inst) renderoptions = RenderOptions( options.file, options.verbosity, options.frpc_lock_elision, options.fspecialise_syscall_stubs, options.fprovide_tcb_caps, options.fsupport_init, options.largeframe, options.largeframe_dma, options.architecture, options.debug_fault_handlers, options.realtime) def instantiate_misc_template(): for (item, outfile) in (all_items - done_items): try: template = templates.lookup(item) if template: g = r.render(assembly, assembly, template, obj_space, None, shmem, kept_symbols, fill_frames, imported=read, options=renderoptions) save(item, g) done(g, outfile, item) except TemplateError as inst: die([ 'While rendering %s: %s' % (item, line) for line in inst.args ]) if options.item[0] in ('capdl', 'label-mapping') and options.data_structure_cache_dir is not None \ and len(options.outfile) == 1: # It's possible that data structures required to instantiate the capdl spec # were saved during a previous invocation of this script in the current build. cache_path = os.path.realpath(options.data_structure_cache_dir) pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE) if os.path.isfile(pickle_path): with open(pickle_path, 'rb') as pickle_file: # Found a cached version of the necessary data structures obj_space, shmem, cspaces, pds, kept_symbols, fill_frames = pickle.load( pickle_file) apply_capdl_filters() instantiate_misc_template() # If a template wasn't instantiated, something went wrong, and we can't recover raise CAmkESError( "No template instantiated on capdl generation fastpath") # We're now ready to instantiate the template the user requested, but there # are a few wrinkles in the process. Namely, # 1. Template instantiation needs to be done in a deterministic order. The # runner is invoked multiple times and template code needs to be # allocated identical cap slots in each run. # 2. Components and connections need to be instantiated before any other # templates, regardless of whether they are the ones we are after. Some # other templates, such as the Makefile depend on the obj_space and # cspaces. # 3. All actual code templates, up to the template that was requested, # need to be instantiated. This is related to (1) in that the cap slots # allocated are dependent on what allocations have been done prior to a # given allocation call. # Instantiate the per-component source and header files. for i in assembly.composition.instances: # Don't generate any code for hardware components. if i.type.hardware: continue if i.address_space not in cspaces: p = Perspective(phase=RUNNER, instance=i.name, group=i.address_space) cnode = obj_space.alloc(seL4_CapTableObject, name=p['cnode'], label=i.address_space) cspaces[i.address_space] = CSpaceAllocator(cnode) pd = obj_space.alloc(lookup_architecture( options.architecture).vspace().object, name=p['pd'], label=i.address_space) pds[i.address_space] = pd for t in ('%s/source' % i.name, '%s/header' % i.name, '%s/linker' % i.name): try: template = templates.lookup(t, i) g = '' if template: g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[i.address_space]) save(t, g) for (item, outfile) in (all_items - done_items): if item == t: if not template: log.warning('Warning: no template for %s' % item) done(g, outfile, item) break except TemplateError as inst: die([ 'While rendering %s: %s' % (i.name, line) for line in inst.args ]) # Instantiate the per-connection files. for c in assembly.composition.connections: for t in (('%s/from/source' % c.name, c.from_ends), ('%s/from/header' % c.name, c.from_ends), ('%s/to/source' % c.name, c.to_ends), ('%s/to/header' % c.name, c.to_ends)): template = templates.lookup(t[0], c) if template is not None: for id, e in enumerate(t[1]): item = '%s/%d' % (t[0], id) g = '' try: g = r.render(e, assembly, template, obj_space, cspaces[e.instance.address_space], shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[e.instance.address_space]) except TemplateError as inst: die([ 'While rendering %s: %s' % (item, line) for line in inst.args ]) except jinja2.exceptions.TemplateNotFound: die('While rendering %s: missing template for %s' % (item, c.type.name)) save(item, g) for (target, outfile) in (all_items - done_items): if target == item: if not template: log.warning('Warning: no template for %s' % item) done(g, outfile, item) break # The following block handles instantiations of per-connection # templates that are neither a 'source' or a 'header', as handled # above. We assume that none of these need instantiation unless we are # actually currently looking for them (== options.item). That is, we # assume that following templates, like the CapDL spec, do not require # these templates to be rendered prior to themselves. # FIXME: This is a pretty ugly way of handling this. It would be nicer # for the runner to have a more general notion of per-'thing' templates # where the per-component templates, the per-connection template loop # above, and this loop could all be done in a single unified control # flow. for (item, outfile) in (all_items - done_items): for t in (('%s/from/' % c.name, c.from_ends), ('%s/to/' % c.name, c.to_ends)): if not item.startswith(t[0]): # This is not the item we're looking for. continue # If we've reached here then this is the exact item we're after. template = templates.lookup(item, c) if template is None: die('no registered template for %s' % item) for e in t[1]: try: g = r.render(e, assembly, template, obj_space, cspaces[e.instance.address_space], shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[e.instance.address_space]) save(item, g) done(g, outfile, item) except TemplateError as inst: die([ 'While rendering %s: %s' % (item, line) for line in inst.args ]) # Perform any per component special generation. This needs to happen last # as these template needs to run after all other capabilities have been # allocated for i in assembly.composition.instances: # Don't generate any code for hardware components. if i.type.hardware: continue assert i.address_space in cspaces SPECIAL_TEMPLATES = [('debug', 'debug'), ('simple', 'simple'), ('rump_config', 'rumprun')] for special in [ bl for bl in SPECIAL_TEMPLATES if conf[i.name].get(bl[0]) ]: for t in ('%s/%s' % (i.name, special[1]), ): try: template = templates.lookup(t, i) g = '' if template: g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[i.address_space]) save(t, g) for (item, outfile) in (all_items - done_items): if item == t: if not template: log.warning('Warning: no template for %s' % item) done(g, outfile, item) except TemplateError as inst: die([ 'While rendering %s: %s' % (i.name, line) for line in inst.args ]) if options.data_structure_cache_dir is not None: # At this point the capdl database is in the state required for applying capdl # filters and generating the capdl spec. In case the capdl spec isn't the current # target, we pickle the database here, so when the capdl spec is built, these # data structures don't need to be regenerated. cache_path = os.path.realpath(options.data_structure_cache_dir) pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE) with open(pickle_path, 'wb') as pickle_file: pickle.dump( (obj_space, shmem, cspaces, pds, kept_symbols, fill_frames), pickle_file) for (item, outfile) in (all_items - done_items): if item in ('capdl', 'label-mapping'): apply_capdl_filters() # Instantiate any other, miscellaneous template. If we've reached this # point, we know the user did not request a code template. instantiate_misc_template() # Check if there are any remaining items not_done = all_items - done_items if len(not_done) > 0: for (item, outfile) in not_done: err.write('No valid element matching --item %s.\n' % item) return -1 return 0
def parse_args(argv, out, err): parser = argparse.ArgumentParser( prog='python -m camkes.runner', description='instantiate templates based on a CAmkES specification') parser.add_argument( '--file', '-f', help='Add this file to the list of ' 'input files to parse. Files are parsed in the order in which they are ' 'encountered on the command line.', type=argparse.FileType('r'), required=True) parser.add_argument('--cpp', action='store_true', help='Pre-process the ' 'source with CPP') parser.add_argument('--nocpp', action='store_false', dest='cpp', help='Do not pre-process the source with CPP') parser.add_argument('--cpp-flag', action='append', default=[], help='Specify a flag to pass to CPP') parser.add_argument( '--import-path', '-I', help='Add this path to the list ' 'of paths to search for built-in imports. That is, add it to the list ' 'of directories that are searched to find the file "foo" when ' 'encountering an expression "import <foo>;".', action='append', default=[]) parser.add_argument('--quiet', '-q', help='No output.', dest='verbosity', default=1, action='store_const', const=0) parser.add_argument('--verbose', '-v', help='Verbose output.', dest='verbosity', action='store_const', const=2) parser.add_argument('--debug', '-D', help='Extra verbose output.', dest='verbosity', action='store_const', const=3) parser.add_argument('--outfile', '-O', help='Output to the given file.', type=argparse.FileType('w'), required=True, action='append', default=[]) parser.add_argument('--elf', '-E', help='ELF files to contribute to a ' 'CapDL specification.', action='append', default=[]) parser.add_argument('--item', '-T', help='AST entity to produce code for.', required=True, action='append', default=[]) parser.add_argument('--platform', '-p', help='Platform to produce code ' 'for. Pass \'help\' to see valid platforms.', default='seL4', choices=PLATFORMS) parser.add_argument('--templates', '-t', help='Extra directories to ' 'search for templates (before builtin templates).', action='append', default=[]) parser.add_argument('--cache', '-c', action='store_true', help='Enable code generation cache.') parser.add_argument('--cache-dir', default=os.path.expanduser('~/.camkes/cache'), help='Set code generation cache location.') parser.add_argument('--version', action='version', version='%s %s' % (argv[0], version())) parser.add_argument('--frpc-lock-elision', action='store_true', default=True, help='Enable lock elision optimisation in seL4RPC ' 'connector.') parser.add_argument('--fno-rpc-lock-elision', action='store_false', dest='frpc_lock_elision', help='Disable lock elision optimisation in ' 'seL4RPC connector.') parser.add_argument( '--fspecialise-syscall-stubs', action='store_true', default=True, help='Generate inline syscall stubs to reduce overhead ' 'where possible.') parser.add_argument('--fno-specialise-syscall-stubs', action='store_false', dest='fspecialise_syscall_stubs', help='Always use the libsel4 syscall ' 'stubs.') parser.add_argument( '--fprovide-tcb-caps', action='store_true', default=True, help='Hand out TCB caps to components, allowing them to ' 'exit cleanly.') parser.add_argument('--fno-provide-tcb-caps', action='store_false', dest='fprovide_tcb_caps', help='Do not hand out TCB caps, causing ' 'components to fault on exiting.') parser.add_argument('--fsupport-init', action='store_true', default=True, help='Support pre_init, post_init and friends.') parser.add_argument('--fno-support-init', action='store_false', dest='fsupport_init', help='Do not support pre_init, post_init and ' 'friends.') parser.add_argument('--default-priority', type=int, default=254, help='Default component thread priority.') parser.add_argument('--default-max-priority', type=int, default=254, help='Default component thread maximum priority.') parser.add_argument('--default-criticality', type=int, default=1, help='Default component thread criticality.') parser.add_argument('--default-max-criticality', type=int, default=1, help='Default component thread maximum criticality.') parser.add_argument('--default-affinity', type=int, default=0, help='Default component thread affinity.') parser.add_argument( '--default-period', type=int, default=10000, help='Default component thread scheduling context period.') parser.add_argument( '--default-budget', type=int, default=10000, help='Default component thread scheduling context budget.') parser.add_argument( '--default-data', type=int, default=0, help='Default component thread scheduling context data.') parser.add_argument('--default-size_bits', type=int, default=8, help='Default scheduling context size bits.') parser.add_argument( '--prune', action='store_true', help='Minimise the number of functions in generated C files.') parser.add_argument('--largeframe', action='store_true', help='Try to use large frames when possible.') parser.add_argument('--architecture', '--arch', default='aarch32', type=lambda x: type('')(x).lower(), choices=('aarch32', 'arm_hyp', 'ia32', 'x86_64'), help='Target architecture.') parser.add_argument('--makefile-dependencies', '-MD', type=argparse.FileType('w'), help='Write Makefile dependency rule to ' 'FILE') parser.add_argument( '--allow-forward-references', action='store_true', help='allow refering to objects in your specification that are ' 'defined after the point at which they are referenced') parser.add_argument( '--disallow-forward-references', action='store_false', dest='allow_forward_references', help='only permit references in ' 'specifications to objects that have been defined before that point') parser.add_argument( '--debug-fault-handlers', action='store_true', help='provide fault handlers to decode cap and VM faults for the ' 'purposes of debugging') parser.add_argument( '--largeframe-dma', action='store_true', help='promote frames backing DMA pools to large frames where possible') parser.add_argument('--realtime', action='store_true', help='Target realtime seL4.') parser.add_argument( '--data-structure-cache-dir', type=str, help= 'Directory for storing pickled datastructures for re-use between multiple ' 'invocations of the camkes tool in a single build. The user should delete ' 'this directory between builds.') # Juggle the standard streams either side of parsing command-line arguments # because argparse provides no mechanism to control this. old_out = sys.stdout old_err = sys.stderr sys.stdout = out sys.stderr = err options = parser.parse_args(argv[1:]) sys.stdout = old_out sys.stderr = old_err return options
def main(argv, out, err): # We need a UTF-8 locale, so bail out if we don't have one. More # specifically, things like the version() computation traverse the file # system and, if they hit a UTF-8 filename, they try to decode it into your # preferred encoding and trigger an exception. encoding = locale.getpreferredencoding().lower() if encoding not in ('utf-8', 'utf8'): err.write('CAmkES uses UTF-8 encoding, but your locale\'s preferred ' 'encoding is %s. You can override your locale with the LANG ' 'environment variable.\n' % encoding) return -1 options = parse_args(argv, out, err) # Ensure we were supplied equal items and outfiles if len(options.outfile) != len(options.item): err.write('Different number of items and outfiles. Required one outfile location ' 'per item requested.\n') return -1 # No duplicates in items or outfiles if len(set(options.item)) != len(options.item): err.write('Duplicate items requested through --item.\n') return -1 if len(set(options.outfile)) != len(options.outfile): err.write('Duplicate outfiles requrested through --outfile.\n') return -1 # Save us having to pass debugging everywhere. die = functools.partial(_die, options) log.set_verbosity(options.verbosity) cwd = os.getcwd() # Build a list of item/outfile pairs that we have yet to match and process all_items = set(zip(options.item, options.outfile)) done_items = set([]) # Construct the compilation caches if requested. cachea = None cacheb = None if options.cache: # Construct a modified version of the command line arguments that we'll # use in the keys to the caches. Essentially we elide --outfile and its # parameter under the assumption that this value is never used in code # generation. The purpose of this is to allow us to successfully cache # ancillary outputs that we generate along the way to the current # output. If we were to include --outfile in the key, future attempts # to generate these ancillary outputs would unnecessarily miss the # entries generated by this execution. args = [] skip = False for index, arg in enumerate(argv[1:]): if skip: skip = False continue if arg in ('--outfile', '-O'): skip = True continue args.append(arg) cachea = LevelACache(os.path.join(options.cache_dir, version(), 'cachea')) cacheb = LevelBCache(os.path.join(options.cache_dir, version(), 'cacheb')) def done(s, file, item): ret = 0 if s: file.write(s) file.close() if cachea is not None: try: cachea.flush() except sqlite3.OperationalError as e: # The following suppresses two spurious errors: # 1. The database is locked. In a large, parallel build, writes # to the level A cache are heavily contended and this error # can occur. # 2. The database structure is unexpected. If the CAmkES # sources have changed *while* the runner was executing, # the level A cache can be looking in a different place to # where the cache was created. # Both of these are non-critical (will just result in a # potential future cache miss) so there's no need to alarm the # user. if re.search(r'database is locked', str(e)) is not None or \ re.search(r'no such table', str(e)) is not None: log.debug('failed to flush level A cache: %s' % str(e)) else: raise if cacheb is not None: try: cacheb.flush() except sqlite3.OperationalError as e: # As above for the level B cache. if re.search(r'database is locked', str(e)): log.debug('failed to flush level B cache: %s' % str(e)) else: raise done_items.add((item, file)) if len(all_items - done_items) == 0: sys.exit(ret) # Try to find this output in the level A cache if possible. This check will # 'hit' if the source files representing the input spec are identical to # some previously observed execution. if cachea is not None: assert 'args' in locals() assert len(options.outfile) == 1, 'level A cache only supported when requestiong ' \ 'single items' output = cachea.load(args, cwd) if output is not None: log.debug('Retrieved %(platform)s/%(item)s from level A cache' % options.__dict__) done(output, options.outfile[0], options.item[0]) filename = os.path.abspath(options.file.name) try: # Build the parser options parse_options = ParserOptions(options.cpp, options.cpp_flag, options.import_path, options.verbosity, options.allow_forward_references) ast, read = parse_file_cached(filename, options.data_structure_cache_dir, parse_options) except (ASTError, ParseError) as e: die(e.args) # Locate the assembly. assembly = ast.assembly if assembly is None: die('No assembly found') # Do some extra checks if the user asked for verbose output. if options.verbosity >= 2: # Try to catch type mismatches in attribute settings. Note that it is # not possible to conclusively evaluate type correctness because the # attributes' type system is (deliberately) too loose. That is, the # type of an attribute can be an uninterpreted C type the user will # provide post hoc. for i in assembly.composition.instances: for a in i.type.attributes: value = assembly.configuration[i.name].get(a.name) if value is not None: if a.type == 'string' and not \ isinstance(value, six.string_types): log.warning('attribute %s.%s has type string but is ' 'set to a value that is not a string' % (i.name, a.name)) elif a.type == 'int' and not \ isinstance(value, numbers.Number): log.warning('attribute %s.%s has type int but is set ' 'to a value that is not an integer' % (i.name, a.name)) obj_space = ObjectAllocator() obj_space.spec.arch = options.architecture cspaces = {} pds = {} conf = assembly.configuration shmem = collections.defaultdict(ShmemFactory()) kept_symbols = {} fill_frames = {} templates = Templates(options.platform) [templates.add_root(t) for t in options.templates] try: r = Renderer(templates, options.cache, options.cache_dir) except jinja2.exceptions.TemplateSyntaxError as e: die('template syntax error: %s' % e) # The user may have provided their own connector definitions (with # associated) templates, in which case they won't be in the built-in lookup # dictionary. Let's add them now. Note, definitions here that conflict with # existing lookup entries will overwrite the existing entries. Note that # the extra check that the connector has some templates is just an # optimisation; the templates module handles connectors without templates # just fine. extra_templates = set() for c in (x for x in ast.items if isinstance(x, Connector) and (x.from_template is not None or x.to_template is not None)): try: # Find a connection that uses this type. connection = next(x for x in ast if isinstance(x, Connection) and x.type == c) # Add the custom templates and update our collection of read # inputs. It is necessary to update the read set here to avoid # false compilation cache hits when the source of a custom template # has changed. extra_templates |= templates.add(c, connection) except TemplateError as e: die('while adding connector %s: %s' % (c.name, e)) except StopIteration: # No connections use this type. There's no point adding it to the # template lookup dictionary. pass # Check if our current target is in the level B cache. The level A cache # will 'miss' and this one will 'hit' when the input spec is identical to # some previously observed execution modulo a semantically irrelevant # element (e.g. an introduced comment). ast_hash = None if cacheb is not None: ast_hash = level_b_prime(ast) assert 'args' in locals() assert len(options.item) == 1, 'level B cache only supported when requesting ' \ 'single items' output = cacheb.load(ast_hash, args, set(options.elf) | extra_templates) if output is not None: log.debug('Retrieved %(platform)s/%(item)s from level B cache' % options.__dict__) done(output, options.outfile[0], options.item[0]) # Add custom templates. read |= extra_templates # Add the CAmkES sources themselves to the accumulated list of inputs. read |= set(path for path, _ in sources()) # Add any ELF files we were passed as inputs. read |= set(options.elf) # Write a Makefile dependency rule if requested. if options.makefile_dependencies is not None: options.makefile_dependencies.write('%s: \\\n %s\n' % (filename, ' \\\n '.join(sorted(read)))) # If we have a cache, allow outputs to be saved to it. if options.cache: assert cachea is not None, 'level A cache not available, though the ' \ 'cache is enabled (bug in runner?)' # The logic of this cache currently only works when a single item is requested # on the command line assert len(options.item) == 1, 'level A cache only supported when requesting ' \ 'single items' # Calculate the input files to the level A cache. inputs = level_a_prime(read) # Work out the position of the --item argument in the command line # parameters. We will use this to cache not only outputs for this # execution, but also outputs for ones with a different target. item_index = None assert 'args' in locals() for index, arg in enumerate(args[:-1]): if arg in ('--item', '-T'): item_index = index + 1 break assert item_index is not None, 'failed to find required argument ' \ '--item (bug in runner?)' # We should already have the necessary inputs for the level B cache. assert cacheb is not None, 'level B cache not available, though the ' \ 'cache is enabled (bug in runner?)' assert ast_hash is not None, 'AST hash not pre-computed (bug in ' \ 'runner?)' def save(item, value): # Juggle the command line arguments to cache the predicted # arguments for a call that would generate this item. new_args = args[:item_index] + [item] + args[item_index + 1:] # Save entries in both caches. cachea.save(new_args, cwd, value, inputs) if item != 'Makefile' and item != 'camkes-gen.cmake': # We avoid caching the generated Makefile because it is not # safe. The inputs to generation of the Makefile are not only # the AST, but also the file names (`inputs`). If we cache it in # the level B cache we risk the following scenario: # # 1. Generate the Makefile, caching it in the level B cache; # 2. Modify the spec to import a file containing only white # space and/or comments; then # 3. Generate the Makefile, missing the level A cache, but # hitting the level B cache. # # At this point, the generated Makefile is incorrect because it # does not capture any dependencies on the imported file. We can # now introduce something semantically relevant into this file # (e.g. an Assembly block) and it will not be seen by the build # system. cacheb.save(ast_hash, new_args, set(options.elf) | extra_templates, value) else: def save(item, value): pass def apply_capdl_filters(): # Derive a set of usable ELF objects from the filenames we were passed. elfs = {} for e in options.elf: try: name = os.path.basename(e) if name in elfs: raise Exception('duplicate ELF files of name \'%s\' encountered' % name) elf = ELF(e, name, options.architecture) p = Perspective(phase=RUNNER, elf_name=name) group = p['group'] # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False, pd=pds[group], use_large_frames=options.largeframe) obj_space.merge(elf_spec, label=group) elfs[name] = (e, elf) except Exception as inst: die('While opening \'%s\': %s' % (e, inst)) filteroptions = FilterOptions(options.architecture, options.realtime, options.largeframe, options.largeframe_dma, options.default_priority, options.default_max_priority, options.default_affinity, options.default_period, options.default_budget, options.default_data, options.default_size_bits, options.debug_fault_handlers, options.fprovide_tcb_caps) for f in CAPDL_FILTERS: try: # Pass everything as named arguments to allow filters to # easily ignore what they don't want. f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs, options=filteroptions, shmem=shmem, fill_frames=fill_frames) except Exception as inst: die('While forming CapDL spec: %s' % inst) renderoptions = RenderOptions(options.file, options.verbosity, options.frpc_lock_elision, options.fspecialise_syscall_stubs, options.fprovide_tcb_caps, options.fsupport_init, options.largeframe, options.largeframe_dma, options.architecture, options.debug_fault_handlers, options.realtime) def instantiate_misc_template(): for (item, outfile) in (all_items - done_items): try: template = templates.lookup(item) if template: g = r.render(assembly, assembly, template, obj_space, None, shmem, kept_symbols, fill_frames, imported=read, options=renderoptions) save(item, g) done(g, outfile, item) except TemplateError as inst: die(rendering_error(item, inst)) if options.item[0] in ('capdl', 'label-mapping') and options.data_structure_cache_dir is not None \ and len(options.outfile) == 1: # It's possible that data structures required to instantiate the capdl spec # were saved during a previous invocation of this script in the current build. cache_path = os.path.realpath(options.data_structure_cache_dir) pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE) if os.path.isfile(pickle_path): with open(pickle_path, 'rb') as pickle_file: # Found a cached version of the necessary data structures obj_space, shmem, cspaces, pds, kept_symbols, fill_frames = pickle.load(pickle_file) apply_capdl_filters() instantiate_misc_template() # If a template wasn't instantiated, something went wrong, and we can't recover raise CAmkESError("No template instantiated on capdl generation fastpath") # We're now ready to instantiate the template the user requested, but there # are a few wrinkles in the process. Namely, # 1. Template instantiation needs to be done in a deterministic order. The # runner is invoked multiple times and template code needs to be # allocated identical cap slots in each run. # 2. Components and connections need to be instantiated before any other # templates, regardless of whether they are the ones we are after. Some # other templates, such as the Makefile depend on the obj_space and # cspaces. # 3. All actual code templates, up to the template that was requested, # need to be instantiated. This is related to (1) in that the cap slots # allocated are dependent on what allocations have been done prior to a # given allocation call. # Instantiate the per-component source and header files. for i in assembly.composition.instances: # Don't generate any code for hardware components. if i.type.hardware: continue if i.address_space not in cspaces: p = Perspective(phase=RUNNER, instance=i.name, group=i.address_space) cnode = obj_space.alloc(seL4_CapTableObject, name=p['cnode'], label=i.address_space) cspaces[i.address_space] = CSpaceAllocator(cnode) pd = obj_space.alloc(lookup_architecture(options.architecture).vspace().object, name=p['pd'], label=i.address_space) pds[i.address_space] = pd for t in ('%s/source' % i.name, '%s/header' % i.name, '%s/c_environment_source' % i.name, '%s/cakeml_start_source' % i.name, '%s/cakeml_end_source' % i.name, '%s/linker' % i.name): try: template = templates.lookup(t, i) g = '' if template: g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[i.address_space]) save(t, g) for (item, outfile) in (all_items - done_items): if item == t: if not template: log.warning('Warning: no template for %s' % item) done(g, outfile, item) break except TemplateError as inst: die(rendering_error(i.name, inst)) # Instantiate the per-connection files. for c in assembly.composition.connections: for t in (('%s/from/source' % c.name, c.from_ends), ('%s/from/header' % c.name, c.from_ends), ('%s/to/source' % c.name, c.to_ends), ('%s/to/header' % c.name, c.to_ends)): template = templates.lookup(t[0], c) if template is not None: for id, e in enumerate(t[1]): item = '%s/%d' % (t[0], id) g = '' try: g = r.render(e, assembly, template, obj_space, cspaces[e.instance.address_space], shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[e.instance.address_space]) except TemplateError as inst: die(rendering_error(item, inst)) except jinja2.exceptions.TemplateNotFound: die('While rendering %s: missing template for %s' % (item, c.type.name)) save(item, g) for (target, outfile) in (all_items - done_items): if target == item: if not template: log.warning('Warning: no template for %s' % item) done(g, outfile, item) break # The following block handles instantiations of per-connection # templates that are neither a 'source' or a 'header', as handled # above. We assume that none of these need instantiation unless we are # actually currently looking for them (== options.item). That is, we # assume that following templates, like the CapDL spec, do not require # these templates to be rendered prior to themselves. # FIXME: This is a pretty ugly way of handling this. It would be nicer # for the runner to have a more general notion of per-'thing' templates # where the per-component templates, the per-connection template loop # above, and this loop could all be done in a single unified control # flow. for (item, outfile) in (all_items - done_items): for t in (('%s/from/' % c.name, c.from_ends), ('%s/to/' % c.name, c.to_ends)): if not item.startswith(t[0]): # This is not the item we're looking for. continue # If we've reached here then this is the exact item we're after. template = templates.lookup(item, c) if template is None: die('no registered template for %s' % item) for e in t[1]: try: g = r.render(e, assembly, template, obj_space, cspaces[e.instance.address_space], shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[e.instance.address_space]) save(item, g) done(g, outfile, item) except TemplateError as inst: die(rendering_error(item, inst)) # Perform any per component special generation. This needs to happen last # as these template needs to run after all other capabilities have been # allocated for i in assembly.composition.instances: # Don't generate any code for hardware components. if i.type.hardware: continue assert i.address_space in cspaces SPECIAL_TEMPLATES = [('debug', 'debug'), ('simple', 'simple'), ('rump_config', 'rumprun')] for special in [bl for bl in SPECIAL_TEMPLATES if conf[i.name].get(bl[0])]: for t in ('%s/%s' % (i.name, special[1]),): try: template = templates.lookup(t, i) g = '' if template: g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[i.address_space]) save(t, g) for (item, outfile) in (all_items - done_items): if item == t: if not template: log.warning('Warning: no template for %s' % item) done(g, outfile, item) except TemplateError as inst: die(rendering_error(i.name, inst)) if options.data_structure_cache_dir is not None: # At this point the capdl database is in the state required for applying capdl # filters and generating the capdl spec. In case the capdl spec isn't the current # target, we pickle the database here, so when the capdl spec is built, these # data structures don't need to be regenerated. cache_path = os.path.realpath(options.data_structure_cache_dir) pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE) with open(pickle_path, 'wb') as pickle_file: pickle.dump((obj_space, shmem, cspaces, pds, kept_symbols, fill_frames), pickle_file) for (item, outfile) in (all_items - done_items): if item in ('capdl', 'label-mapping'): apply_capdl_filters() # Instantiate any other, miscellaneous template. If we've reached this # point, we know the user did not request a code template. instantiate_misc_template() # Check if there are any remaining items not_done = all_items - done_items if len(not_done) > 0: for (item, outfile) in not_done: err.write('No valid element matching --item %s.\n' % item) return -1 return 0
def parse_args(argv, out, err): parser = argparse.ArgumentParser( prog='python -m camkes.runner', description='instantiate templates based on a CAmkES specification') parser.add_argument('--cpp', action='store_true', help='Pre-process the ' 'source with CPP') parser.add_argument('--nocpp', action='store_false', dest='cpp', help='Do not pre-process the source with CPP') parser.add_argument('--cpp-flag', action='append', default=[], help='Specify a flag to pass to CPP') parser.add_argument( '--import-path', '-I', help='Add this path to the list ' 'of paths to search for built-in imports. That is, add it to the list ' 'of directories that are searched to find the file "foo" when ' 'encountering an expression "import <foo>;".', action='append', default=[]) parser.add_argument('--quiet', '-q', help='No output.', dest='verbosity', default=1, action='store_const', const=0) parser.add_argument('--verbose', '-v', help='Verbose output.', dest='verbosity', action='store_const', const=2) parser.add_argument('--debug', '-D', help='Extra verbose output.', dest='verbosity', action='store_const', const=3) parser.add_argument('--outfile', '-O', help='Output to the given file.', type=argparse.FileType('w'), required=True, action='append', default=[]) parser.add_argument( '--verification-base-name', type=str, help='Identifier to use when generating Isabelle theory files') parser.add_argument('--elf', '-E', help='ELF files to contribute to a ' 'CapDL specification.', action='append', default=[]) parser.add_argument('--item', '-T', help='AST entity to produce code for.', required=True, action='append', default=[]) parser.add_argument('--platform', '-p', help='Platform to produce code ' 'for. Pass \'help\' to see valid platforms.', default='seL4', choices=PLATFORMS) parser.add_argument('--templates', '-t', help='Extra directories to ' 'search for templates (before builtin templates).', action='append', default=[]) parser.add_argument('--cache', '-c', action='store_true', help='Enable code generation cache.') parser.add_argument('--cache-dir', default=os.path.expanduser('~/.camkes/cache'), help='Set code generation cache location.') parser.add_argument('--version', action='version', version='%s %s' % (argv[0], version())) parser.add_argument('--frpc-lock-elision', action='store_true', default=True, help='Enable lock elision optimisation in seL4RPC ' 'connector.') parser.add_argument('--fno-rpc-lock-elision', action='store_false', dest='frpc_lock_elision', help='Disable lock elision optimisation in ' 'seL4RPC connector.') parser.add_argument( '--fspecialise-syscall-stubs', action='store_true', default=True, help='Generate inline syscall stubs to reduce overhead ' 'where possible.') parser.add_argument('--fno-specialise-syscall-stubs', action='store_false', dest='fspecialise_syscall_stubs', help='Always use the libsel4 syscall ' 'stubs.') parser.add_argument( '--fprovide-tcb-caps', action='store_true', default=True, help='Hand out TCB caps to components, allowing them to ' 'exit cleanly.') parser.add_argument('--fno-provide-tcb-caps', action='store_false', dest='fprovide_tcb_caps', help='Do not hand out TCB caps, causing ' 'components to fault on exiting.') parser.add_argument('--fsupport-init', action='store_true', default=True, help='Support pre_init, post_init and friends.') parser.add_argument('--fno-support-init', action='store_false', dest='fsupport_init', help='Do not support pre_init, post_init and ' 'friends.') parser.add_argument('--default-priority', type=int, default=254, help='Default component thread priority.') parser.add_argument('--default-max-priority', type=int, default=254, help='Default component thread maximum priority.') parser.add_argument('--default-affinity', type=int, default=0, help='Default component thread affinity.') parser.add_argument( '--default-period', type=int, default=10000, help='Default component thread scheduling context period.') parser.add_argument( '--default-budget', type=int, default=10000, help='Default component thread scheduling context budget.') parser.add_argument( '--default-data', type=int, default=0, help='Default component thread scheduling context data.') parser.add_argument('--default-size_bits', type=int, default=8, help='Default scheduling context size bits.') parser.add_argument('--default-stack-size', type=int, default=16384, help='Default stack size of each thread.') parser.add_argument( '--prune', action='store_true', help='Minimise the number of functions in generated C files.') parser.add_argument('--largeframe', action='store_true', help='Try to use large frames when possible.') parser.add_argument('--architecture', '--arch', default='aarch32', type=lambda x: type('')(x).lower(), choices=('aarch32', 'arm_hyp', 'ia32', 'x86_64', 'aarch64'), help='Target architecture.') parser.add_argument('--makefile-dependencies', '-MD', type=argparse.FileType('w'), help='Write Makefile dependency rule to ' 'FILE') parser.add_argument( '--allow-forward-references', action='store_true', help='allow refering to objects in your specification that are ' 'defined after the point at which they are referenced') parser.add_argument( '--disallow-forward-references', action='store_false', dest='allow_forward_references', help='only permit references in ' 'specifications to objects that have been defined before that point') parser.add_argument( '--debug-fault-handlers', action='store_true', help='provide fault handlers to decode cap and VM faults for the ' 'purposes of debugging') parser.add_argument( '--largeframe-dma', action='store_true', help='promote frames backing DMA pools to large frames where possible') parser.add_argument('--realtime', action='store_true', help='Target realtime seL4.') object_state_group = parser.add_mutually_exclusive_group() object_state_group.add_argument( '--load-object-state', type=argparse.FileType('rb'), help='load previously-generated cap and object state') object_state_group.add_argument( '--save-object-state', type=argparse.FileType('wb'), help='save generated cap and object state to this file') parser.add_argument('--save-ast', type=argparse.FileType('wb'), help='cache the ast during the build') # To get the AST, there should be either a pickled AST or a file to parse adl_group = parser.add_mutually_exclusive_group(required=True) adl_group.add_argument('--load-ast', type=argparse.FileType('rb'), help='load the cached ast during the build') adl_group.add_argument( '--file', '-f', help='Add this file to the list of ' 'input files to parse. Files are parsed in the order in which they are ' 'encountered on the command line.', type=argparse.FileType('r')) # Juggle the standard streams either side of parsing command-line arguments # because argparse provides no mechanism to control this. old_out = sys.stdout old_err = sys.stderr sys.stdout = out sys.stderr = err options, argv = parser.parse_known_args(argv[1:]) queries, argv = parse_query_parser_args(argv) sys.stdout = old_out sys.stderr = old_err if argv: print("Unparsed arguments present:\n{0}".format(argv)) parser.print_help() print_query_parser_help() exit(1) filteroptions = FilterOptions( options.architecture, options.realtime, options.largeframe, options.largeframe_dma, options.default_priority, options.default_max_priority, options.default_affinity, options.default_period, options.default_budget, options.default_data, options.default_size_bits, options.debug_fault_handlers, options.fprovide_tcb_caps) # Check that verification_base_name would be a valid identifer before # our templates try to use it if options.verification_base_name is not None: if not re.match(r'[a-zA-Z][a-zA-Z0-9_]*$', options.verification_base_name): parser.error( 'Not a valid identifer for --verification-base-name: %r' % options.verification_base_name) return options, queries, filteroptions
def main(): options = parse_args(constants.TOOL_RUNNER) # Save us having to pass debugging everywhere. die = functools.partial(_die, options.verbosity >= 3) log.set_verbosity(options.verbosity) def done(s): ret = 0 if s: print >>options.outfile, s options.outfile.close() if options.post_render_edit and \ raw_input('Edit rendered template %s [y/N]? ' % \ options.outfile.name) == 'y': editor = os.environ.get('EDITOR', 'vim') ret = subprocess.call([editor, options.outfile.name]) sys.exit(ret) if not options.platform or options.platform in ['?', 'help'] \ or options.platform not in PLATFORMS: die('Valid --platform arguments are %s' % ', '.join(PLATFORMS)) if not options.file or len(options.file) > 1: die('A single input file must be provided for this operation') try: profiler = get_profiler(options.profiler, options.profile_log) except Exception as inst: die('Failed to create profiler: %s' % str(inst)) # Construct the compilation cache if requested. cache = None if options.cache in ['on', 'readonly', 'writeonly']: cache = Cache(options.cache_dir) f = options.file[0] try: with profiler('Reading input'): s = f.read() # Try to find this output in the compilation cache if possible. This is # one of two places that we check in the cache. This check will 'hit' # if the source files representing the input spec are identical to some # previous execution. if options.cache in ['on', 'readonly']: with profiler('Looking for a cached version of this output'): key = [version(), os.path.abspath(f.name), s, cache_relevant_options(options), options.platform, options.item] value = cache.get(key) if value is not None and value.valid(): # Cache hit. assert isinstance(value, FileSet), \ 'illegally cached a value for %s that is not a FileSet' % options.item log.debug('Retrieved %(platform)s.%(item)s from cache' % \ options.__dict__) done(value.output) with profiler('Parsing input'): ast = parser.parse_to_ast(s, options.cpp, options.cpp_flag, options.ply_optimise) parser.assign_filenames(ast, f.name) except parser.CAmkESSyntaxError as e: e.set_column(s) die('%s:%s' % (f.name, str(e))) except Exception as inst: die('While parsing \'%s\': %s' % (f.name, str(inst))) try: for t in AST_TRANSFORMS[PRE_RESOLUTION]: with profiler('Running AST transform %s' % t.__name__): ast = t(ast) except Exception as inst: die('While transforming AST: %s' % str(inst)) try: with profiler('Resolving imports'): ast, imported = parser.resolve_imports(ast, \ os.path.dirname(os.path.abspath(f.name)), options.import_path, options.cpp, options.cpp_flag, options.ply_optimise) except Exception as inst: die('While resolving imports of \'%s\': %s' % (f.name, str(inst))) try: with profiler('Combining assemblies'): # if there are multiple assemblies, combine them now compose_assemblies(ast) except Exception as inst: die('While combining assemblies: %s' % str(inst)) with profiler('Caching original AST'): orig_ast = deepcopy(ast) with profiler('Deduping AST'): ast = parser.dedupe(ast) try: with profiler('Resolving references'): ast = parser.resolve_references(ast) except Exception as inst: die('While resolving references of \'%s\': %s' % (f.name, str(inst))) try: with profiler('Collapsing references'): parser.collapse_references(ast) except Exception as inst: die('While collapsing references of \'%s\': %s' % (f.name, str(inst))) try: for t in AST_TRANSFORMS[POST_RESOLUTION]: with profiler('Running AST transform %s' % t.__name__): ast = t(ast) except Exception as inst: die('While transforming AST: %s' % str(inst)) try: with profiler('Resolving hierarchy'): resolve_hierarchy(ast) except Exception as inst: die('While resolving hierarchy: %s' % str(inst)) # If we have a readable cache check if our current target is in the cache. # The previous check will 'miss' and this one will 'hit' when the input # spec is identical to some previous execution modulo a semantically # irrelevant element (e.g. an introduced comment). I.e. the previous check # matches when the input is exactly the same and this one matches when the # AST is unchanged. if options.cache in ['on', 'readonly']: with profiler('Looking for a cached version of this output'): key = [version(), orig_ast, cache_relevant_options(options), options.platform, options.item] value = cache.get(key) if value is not None: assert options.item not in NEVER_AST_CACHE, \ '%s, that is marked \'never cache\' is in your cache' % options.item log.debug('Retrieved %(platform)s.%(item)s from cache' % \ options.__dict__) done(value) # If we have a writable cache, allow outputs to be saved to it. if options.cache in ['on', 'writeonly']: fs = FileSet(imported) def save(item, value): # Save an input-keyed cache entry. This one is based on the # pre-parsed inputs to save having to derive the AST (parse the # input) in order to locate a cache entry in following passes. # This corresponds to the first cache check above. key = [version(), os.path.abspath(options.file[0].name), s, cache_relevant_options(options), options.platform, item] specialised = fs.specialise(value) if item == 'capdl': specialised.extend(options.elf or []) cache[key] = specialised if item not in NEVER_AST_CACHE: # Save an AST-keyed cache entry. This corresponds to the second # cache check above. cache[[version(), orig_ast, cache_relevant_options(options), options.platform, item]] = value else: def save(item, value): pass # All references in the AST need to be resolved for us to continue. unresolved = reduce(lambda a, x: a.union(x), map(lambda x: x.unresolved(), ast), set()) if unresolved: die('Unresolved references in input specification:\n %s' % \ '\n '.join(map(lambda x: '%(filename)s:%(lineno)s:\'%(name)s\' of type %(type)s' % { 'filename':x.filename or '<unnamed file>', 'lineno':x.lineno, 'name':x._symbol, 'type':x._type.__name__, }, unresolved))) # Locate the assembly assembly = [x for x in ast if isinstance(x, AST.Assembly)] if len(assembly) > 1: die('Multiple assemblies found') elif len(assembly) == 1: assembly = assembly[0] else: die('No assembly found') obj_space = ObjectAllocator() cspaces = {} pds = {} conf = assembly.configuration shmem = defaultdict(dict) # We need to create a phony instance and connection to cope with cases # where the user has not defined any instances or connections (this would # be an arguably useless system, but we should still support it). We append # these to the template's view of the system below to ensure we always get # a usable template dictionary. Note that this doesn't cause any problems # because the phony items are named '' and thus unaddressable in ADL. dummy_instance = AST.Instance(AST.Reference('', AST.Instance), '') dummy_connection = AST.Connection(AST.Reference('', AST.Connector), '', \ AST.Reference('', AST.Instance), AST.Reference('', AST.Interface), \ AST.Reference('', AST.Instance), AST.Reference('', AST.Interface)) templates = Templates(options.platform, instance=map(lambda x: x.name, assembly.composition.instances + \ [dummy_instance]), \ connection=map(lambda x: x.name, assembly.composition.connections + \ [dummy_connection])) if options.templates: templates.add_root(options.templates) r = Renderer(templates.get_roots(), options) # The user may have provided their own connector definitions (with # associated) templates, in which case they won't be in the built-in lookup # dictionary. Let's add them now. Note, definitions here that conflict with # existing lookup entries will overwrite the existing entries. for c in [x for x in ast if isinstance(x, AST.Connector)]: if c.from_template: templates.add(c.name, 'from.source', c.from_template) if c.to_template: templates.add(c.name, 'to.source', c.to_template) # We're now ready to instantiate the template the user requested, but there # are a few wrinkles in the process. Namely, # 1. Template instantiation needs to be done in a deterministic order. The # runner is invoked multiple times and template code needs to be # allocated identical cap slots in each run. # 2. Components and connections need to be instantiated before any other # templates, regardless of whether they are the ones we are after. Some # other templates, such as the Makefile depend on the obj_space and # cspaces. # 3. All actual code templates, up to the template that was requested, # need to be instantiated. This is related to (1) in that the cap slots # allocated are dependent on what allocations have been done prior to a # given allocation call. # Instantiate the per-component source and header files. for id, i in enumerate(assembly.composition.instances): # Don't generate any code for hardware components. if i.type.hardware: continue if i.address_space not in cspaces: p = Perspective(phase=RUNNER, instance=i.name, group=i.address_space) cnode = obj_space.alloc(seL4_CapTableObject, name=p['cnode'], label=i.address_space) cspaces[i.address_space] = CSpaceAllocator(cnode) pd = obj_space.alloc(seL4_PageDirectoryObject, name=p['pd'], label=i.address_space) pds[i.address_space] = pd for t in ['%s.source' % i.name, '%s.header' % i.name, '%s.linker' % i.name]: try: template = templates.lookup(t, i) g = '' if template: with profiler('Rendering %s' % t): g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], \ shmem, options=options, id=id, my_pd=pds[i.address_space]) save(t, g) if options.item == t: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (i.name, str(inst))) # Instantiate the per-connection files. conn_dict = {} for id, c in enumerate(assembly.composition.connections): tmp_name = c.name key_from = (c.from_instance.name + '_' + c.from_interface.name) in conn_dict key_to = (c.to_instance.name + '_' + c.to_interface.name) in conn_dict if not key_from and not key_to: # We need a new connection name conn_name = 'conn' + str(id) c.name = conn_name conn_dict[c.from_instance.name + '_' + c.from_interface.name] = conn_name conn_dict[c.to_instance.name + '_' + c.to_interface.name] = conn_name elif not key_to: conn_name = conn_dict[c.from_instance.name + '_' + c.from_interface.name] c.name = conn_name conn_dict[c.to_instance.name + '_' + c.to_interface.name] = conn_name elif not key_from: conn_name = conn_dict[c.to_instance.name + '_' + c.to_interface.name] c.name = conn_name conn_dict[c.from_instance.name + '_' + c.from_interface.name] = conn_name else: continue for t in [('%s.from.source' % tmp_name, c.from_instance.address_space), ('%s.from.header' % tmp_name, c.from_instance.address_space), ('%s.to.source' % tmp_name, c.to_instance.address_space), ('%s.to.header' % tmp_name, c.to_instance.address_space)]: try: template = templates.lookup(t[0], c) g = '' if template: with profiler('Rendering %s' % t[0]): g = r.render(c, assembly, template, obj_space, cspaces[t[1]], \ shmem, options=options, id=id, my_pd=pds[t[1]]) save(t[0], g) if options.item == t[0]: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (t[0], str(inst))) c.name = tmp_name # The following block handles instantiations of per-connection # templates that are neither a 'source' or a 'header', as handled # above. We assume that none of these need instantiation unless we are # actually currently looking for them (== options.item). That is, we # assume that following templates, like the CapDL spec, do not require # these templates to be rendered prior to themselves. # FIXME: This is a pretty ugly way of handling this. It would be nicer # for the runner to have a more general notion of per-'thing' templates # where the per-component templates, the per-connection template loop # above, and this loop could all be done in a single unified control # flow. for t in [('%s.from.' % c.name, c.from_instance.address_space), ('%s.to.' % c.name, c.to_instance.address_space)]: if not options.item.startswith(t[0]): # This is not the item we're looking for. continue try: # If we've reached here then this is the exact item we're # after. template = templates.lookup(options.item, c) if template is None: raise Exception('no registered template for %s' % options.item) with profiler('Rendering %s' % options.item): g = r.render(c, assembly, template, obj_space, cspaces[t[1]], \ shmem, options=options, id=id, my_pd=pds[t[1]]) save(options.item, g) done(g) except Exception as inst: die('While rendering %s: %s' % (options.item, str(inst))) # Perform any per component simple generation. This needs to happen last # as this template needs to run after all other capabilities have been # allocated for id, i in enumerate(assembly.composition.instances): # Don't generate any code for hardware components. if i.type.hardware: continue assert i.address_space in cspaces if conf and conf.settings and [x for x in conf.settings if \ x.instance == i.name and x.attribute == 'simple' and x.value]: for t in ['%s.simple' % i.name]: try: template = templates.lookup(t, i) g = '' if template: with profiler('Rendering %s' % t): g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], \ shmem, options=options, id=id, my_pd=pds[i.address_space]) save(t, g) if options.item == t: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (i.name, str(inst))) # Derive a set of usable ELF objects from the filenames we were passed. elfs = {} arch = None for e in options.elf or []: try: name = os.path.basename(e) if name in elfs: raise Exception('duplicate ELF files of name \'%s\' encountered' % name) elf = ELF(e, name) if not arch: # The spec's arch will have defaulted to ARM, but we want it to # be the same as whatever ELF format we're parsing. arch = elf.get_arch() if arch == 'ARM': obj_space.spec.arch = 'arm11' elif arch == 'x86': obj_space.spec.arch = 'ia32' else: raise NotImplementedError else: # All ELF files we're parsing should be the same format. if arch != elf.get_arch(): raise Exception('ELF files are not all the same architecture') # Pass 'False' to avoid inferring a TCB as we've already created # our own. p = Perspective(phase=RUNNER, elf_name=name) group = p['group'] with profiler('Deriving CapDL spec from %s' % e): elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False, pd=pds[group], use_large_frames=options.largeframe) obj_space.merge(elf_spec, label=group) elfs[name] = (e, elf) except Exception as inst: die('While opening \'%s\': %s' % (e, str(inst))) if options.item in ['capdl', 'label-mapping']: # It's only relevant to run these filters if the final target is CapDL. # Note, this will no longer be true if we add any other templates that # depend on a fully formed CapDL spec. Guarding this loop with an if # is just an optimisation and the conditional can be removed if # desired. for f in CAPDL_FILTERS: try: with profiler('Running CapDL filter %s' % f.__name__): # Pass everything as named arguments to allow filters to # easily ignore what they don't want. f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs, profiler=profiler, options=options, shmem=shmem) except Exception as inst: die('While forming CapDL spec: %s' % str(inst)) # Instantiate any other, miscellaneous template. If we've reached this # point, we know the user did not request a code template. try: template = templates.lookup(options.item) g = '' if template: with profiler('Rendering %s' % options.item): g = r.render(assembly, assembly, template, obj_space, None, \ shmem, imported=imported, options=options) save(options.item, g) done(g) except Exception as inst: die('While rendering %s: %s' % (options.item, str(inst))) die('No valid element matching --item %s' % options.item)
def parse_args(argv, out, err): parser = argparse.ArgumentParser(prog='python -m camkes.runner', description='instantiate templates based on a CAmkES specification') parser.add_argument('--file', '-f', help='Add this file to the list of ' 'input files to parse. Files are parsed in the order in which they are ' 'encountered on the command line.', type=argparse.FileType('r'), required=True) parser.add_argument('--cpp', action='store_true', help='Pre-process the ' 'source with CPP') parser.add_argument('--nocpp', action='store_false', dest='cpp', help='Do not pre-process the source with CPP') parser.add_argument('--cpp-flag', action='append', default=[], help='Specify a flag to pass to CPP') parser.add_argument('--import-path', '-I', help='Add this path to the list ' 'of paths to search for built-in imports. That is, add it to the list ' 'of directories that are searched to find the file "foo" when ' 'encountering an expression "import <foo>;".', action='append', default=[]) parser.add_argument('--quiet', '-q', help='No output.', dest='verbosity', default=1, action='store_const', const=0) parser.add_argument('--verbose', '-v', help='Verbose output.', dest='verbosity', action='store_const', const=2) parser.add_argument('--debug', '-D', help='Extra verbose output.', dest='verbosity', action='store_const', const=3) parser.add_argument('--outfile', '-O', help='Output to the given file.', type=argparse.FileType('w'), required=True, action='append', default=[]) parser.add_argument('--elf', '-E', help='ELF files to contribute to a ' 'CapDL specification.', action='append', default=[]) parser.add_argument('--item', '-T', help='AST entity to produce code for.', required=True, action='append', default=[]) parser.add_argument('--platform', '-p', help='Platform to produce code ' 'for. Pass \'help\' to see valid platforms.', default='seL4', choices=PLATFORMS) parser.add_argument('--templates', '-t', help='Extra directories to ' 'search for templates (before builtin templates).', action='append', default=[]) parser.add_argument('--cache', '-c', action='store_true', help='Enable code generation cache.') parser.add_argument('--cache-dir', default=os.path.expanduser('~/.camkes/cache'), help='Set code generation cache location.') parser.add_argument('--version', action='version', version='%s %s' % (argv[0], version())) parser.add_argument('--frpc-lock-elision', action='store_true', default=True, help='Enable lock elision optimisation in seL4RPC ' 'connector.') parser.add_argument('--fno-rpc-lock-elision', action='store_false', dest='frpc_lock_elision', help='Disable lock elision optimisation in ' 'seL4RPC connector.') parser.add_argument('--fspecialise-syscall-stubs', action='store_true', default=True, help='Generate inline syscall stubs to reduce overhead ' 'where possible.') parser.add_argument('--fno-specialise-syscall-stubs', action='store_false', dest='fspecialise_syscall_stubs', help='Always use the libsel4 syscall ' 'stubs.') parser.add_argument('--fprovide-tcb-caps', action='store_true', default=True, help='Hand out TCB caps to components, allowing them to ' 'exit cleanly.') parser.add_argument('--fno-provide-tcb-caps', action='store_false', dest='fprovide_tcb_caps', help='Do not hand out TCB caps, causing ' 'components to fault on exiting.') parser.add_argument('--fsupport-init', action='store_true', default=True, help='Support pre_init, post_init and friends.') parser.add_argument('--fno-support-init', action='store_false', dest='fsupport_init', help='Do not support pre_init, post_init and ' 'friends.') parser.add_argument('--default-priority', type=int, default=254, help='Default component thread priority.') parser.add_argument('--default-max-priority', type=int, default=254, help='Default component thread maximum priority.') parser.add_argument('--default-affinity', type=int, default=0, help='Default component thread affinity.') parser.add_argument('--default-period', type=int, default=10000, help='Default component thread scheduling context period.') parser.add_argument('--default-budget', type=int, default=10000, help='Default component thread scheduling context budget.') parser.add_argument('--default-data', type=int, default=0, help='Default component thread scheduling context data.') parser.add_argument('--default-size_bits', type=int, default=8, help='Default scheduling context size bits.') parser.add_argument('--prune', action='store_true', help='Minimise the number of functions in generated C files.') parser.add_argument('--largeframe', action='store_true', help='Try to use large frames when possible.') parser.add_argument('--architecture', '--arch', default='aarch32', type=lambda x: type('')(x).lower(), choices=('aarch32', 'arm_hyp', 'ia32', 'x86_64'), help='Target architecture.') parser.add_argument('--makefile-dependencies', '-MD', type=argparse.FileType('w'), help='Write Makefile dependency rule to ' 'FILE') parser.add_argument('--allow-forward-references', action='store_true', help='allow refering to objects in your specification that are ' 'defined after the point at which they are referenced') parser.add_argument('--disallow-forward-references', action='store_false', dest='allow_forward_references', help='only permit references in ' 'specifications to objects that have been defined before that point') parser.add_argument('--debug-fault-handlers', action='store_true', help='provide fault handlers to decode cap and VM faults for the ' 'purposes of debugging') parser.add_argument('--largeframe-dma', action='store_true', help='promote frames backing DMA pools to large frames where possible') parser.add_argument('--realtime', action='store_true', help='Target realtime seL4.') parser.add_argument('--data-structure-cache-dir', type=str, help='Directory for storing pickled datastructures for re-use between multiple ' 'invocations of the camkes tool in a single build. The user should delete ' 'this directory between builds.') # Juggle the standard streams either side of parsing command-line arguments # because argparse provides no mechanism to control this. old_out = sys.stdout old_err = sys.stderr sys.stdout = out sys.stderr = err options = parser.parse_args(argv[1:]) sys.stdout = old_out sys.stderr = old_err return options
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2017, Data61 # Commonwealth Scientific and Industrial Research Organisation (CSIRO) # ABN 41 687 119 230. # # This software may be distributed and modified according to the terms of # the BSD 2-Clause license. Note that NO WARRANTY is provided. # See "LICENSE_BSD2.txt" for details. # # @TAG(DATA61_BSD) # import os, sys sys.path.append(os.path.join(os.path.dirname(__file__), '../..')) from camkes.internal.version import version sys.stdout.write(''' #ifndef VERSION_H_ #define VERSION_H_ /* This file is generated; do not edit */ #define VERSION "%s" #endif '''.strip() % version())
def main(): options = parse_args(constants.TOOL_RUNNER) # Save us having to pass debugging everywhere. die = functools.partial(_die, options.verbosity >= 3) log.set_verbosity(options.verbosity) def done(s): ret = 0 if s: options.outfile.write(s) options.outfile.close() sys.exit(ret) if not options.platform or options.platform in ('?', 'help') \ or options.platform not in PLATFORMS: die('Valid --platform arguments are %s' % ', '.join(PLATFORMS)) if not options.file or len(options.file) > 1: die('A single input file must be provided for this operation') # Construct the compilation cache if requested. cache = None if options.cache in ('on', 'readonly', 'writeonly'): cache = Cache(options.cache_dir) f = options.file[0] try: s = f.read() # Try to find this output in the compilation cache if possible. This is # one of two places that we check in the cache. This check will 'hit' # if the source files representing the input spec are identical to some # previous execution. if options.cache in ('on', 'readonly'): key = [ version(), os.path.abspath(f.name), s, cache_relevant_options(options), options.platform, options.item ] value = cache.get(key) assert value is None or isinstance(value, FileSet), \ 'illegally cached a value for %s that is not a FileSet' % options.item if value is not None and value.valid(): # Cache hit. log.debug('Retrieved %(platform)s.%(item)s from cache' % \ options.__dict__) done(value.output) ast = parser.parse_to_ast(s, options.cpp, options.cpp_flag, options.ply_optimise) parser.assign_filenames(ast, f.name) except parser.CAmkESSyntaxError as e: e.set_column(s) die('%s:%s' % (f.name, str(e))) except Exception as inst: die('While parsing \'%s\': %s' % (f.name, inst)) try: for t in AST_TRANSFORMS[PRE_RESOLUTION]: ast = t(ast) except Exception as inst: die('While transforming AST: %s' % str(inst)) try: ast, imported = parser.resolve_imports(ast, \ os.path.dirname(os.path.abspath(f.name)), options.import_path, options.cpp, options.cpp_flag, options.ply_optimise) except Exception as inst: die('While resolving imports of \'%s\': %s' % (f.name, inst)) try: # if there are multiple assemblies, combine them now compose_assemblies(ast) except Exception as inst: die('While combining assemblies: %s' % str(inst)) # If we have a readable cache check if our current target is in the cache. # The previous check will 'miss' and this one will 'hit' when the input # spec is identical to some previous execution modulo a semantically # irrelevant element (e.g. an introduced comment). I.e. the previous check # matches when the input is exactly the same and this one matches when the # AST is unchanged. if options.cache in ('on', 'readonly'): key = [ version(), ast, cache_relevant_options(options), options.platform, options.item ] value = cache.get(key) if value is not None: assert options.item not in NEVER_AST_CACHE, \ '%s, that is marked \'never cache\' is in your cache' % options.item log.debug('Retrieved %(platform)s.%(item)s from cache' % \ options.__dict__) done(value) # If we have a writable cache, allow outputs to be saved to it. if options.cache in ('on', 'writeonly'): orig_ast = deepcopy(ast) fs = FileSet(imported) def save(item, value): # Save an input-keyed cache entry. This one is based on the # pre-parsed inputs to save having to derive the AST (parse the # input) in order to locate a cache entry in following passes. # This corresponds to the first cache check above. key = [ version(), os.path.abspath(options.file[0].name), s, cache_relevant_options(options), options.platform, item ] specialised = fs.specialise(value) if item == 'capdl': specialised.extend(options.elf) cache[key] = specialised if item not in NEVER_AST_CACHE: # Save an AST-keyed cache entry. This corresponds to the second # cache check above. cache[[ version(), orig_ast, cache_relevant_options(options), options.platform, item ]] = value else: def save(item, value): pass ast = parser.dedupe(ast) try: ast = parser.resolve_references(ast) except Exception as inst: die('While resolving references of \'%s\': %s' % (f.name, inst)) try: parser.collapse_references(ast) except Exception as inst: die('While collapsing references of \'%s\': %s' % (f.name, inst)) try: for t in AST_TRANSFORMS[POST_RESOLUTION]: ast = t(ast) except Exception as inst: die('While transforming AST: %s' % str(inst)) try: resolve_hierarchy(ast) except Exception as inst: die('While resolving hierarchy: %s' % str(inst)) # All references in the AST need to be resolved for us to continue. unresolved = reduce(lambda a, x: a.union(x), map(lambda x: x.unresolved(), ast), set()) if unresolved: die('Unresolved references in input specification:\n %s' % \ '\n '.join(map(lambda x: '%(filename)s:%(lineno)s:\'%(name)s\' of type %(type)s' % { 'filename':x.filename or '<unnamed file>', 'lineno':x.lineno, 'name':x._symbol, 'type':x._type.__name__, }, unresolved))) # Locate the assembly assembly = [x for x in ast if isinstance(x, AST.Assembly)] if len(assembly) > 1: die('Multiple assemblies found') elif len(assembly) == 1: assembly = assembly[0] else: die('No assembly found') obj_space = ObjectAllocator() cspaces = {} pds = {} conf = assembly.configuration shmem = defaultdict(dict) templates = Templates(options.platform) map(templates.add_root, options.templates) r = Renderer(templates.get_roots(), options) # The user may have provided their own connector definitions (with # associated) templates, in which case they won't be in the built-in lookup # dictionary. Let's add them now. Note, definitions here that conflict with # existing lookup entries will overwrite the existing entries. for c in (x for x in ast if isinstance(x, AST.Connector)): if c.from_template: templates.add(c.name, 'from.source', c.from_template) if c.to_template: templates.add(c.name, 'to.source', c.to_template) # We're now ready to instantiate the template the user requested, but there # are a few wrinkles in the process. Namely, # 1. Template instantiation needs to be done in a deterministic order. The # runner is invoked multiple times and template code needs to be # allocated identical cap slots in each run. # 2. Components and connections need to be instantiated before any other # templates, regardless of whether they are the ones we are after. Some # other templates, such as the Makefile depend on the obj_space and # cspaces. # 3. All actual code templates, up to the template that was requested, # need to be instantiated. This is related to (1) in that the cap slots # allocated are dependent on what allocations have been done prior to a # given allocation call. # Instantiate the per-component source and header files. for id, i in enumerate(assembly.composition.instances): # Don't generate any code for hardware components. if i.type.hardware: continue if i.address_space not in cspaces: p = Perspective(phase=RUNNER, instance=i.name, group=i.address_space) cnode = obj_space.alloc(seL4_CapTableObject, name=p['cnode'], label=i.address_space) cspaces[i.address_space] = CSpaceAllocator(cnode) pd = obj_space.alloc(seL4_PageDirectoryObject, name=p['pd'], label=i.address_space) pds[i.address_space] = pd for t in ('%s.source' % i.name, '%s.header' % i.name, '%s.linker' % i.name): try: template = templates.lookup(t, i) g = '' if template: g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], \ shmem, options=options, id=id, my_pd=pds[i.address_space]) save(t, g) if options.item == t: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (i.name, inst)) # Instantiate the per-connection files. conn_dict = {} for id, c in enumerate(assembly.composition.connections): tmp_name = c.name key_from = (c.from_instance.name + '_' + c.from_interface.name) in conn_dict key_to = (c.to_instance.name + '_' + c.to_interface.name) in conn_dict if not key_from and not key_to: # We need a new connection name conn_name = 'conn' + str(id) c.name = conn_name conn_dict[c.from_instance.name + '_' + c.from_interface.name] = conn_name conn_dict[c.to_instance.name + '_' + c.to_interface.name] = conn_name elif not key_to: conn_name = conn_dict[c.from_instance.name + '_' + c.from_interface.name] c.name = conn_name conn_dict[c.to_instance.name + '_' + c.to_interface.name] = conn_name elif not key_from: conn_name = conn_dict[c.to_instance.name + '_' + c.to_interface.name] c.name = conn_name conn_dict[c.from_instance.name + '_' + c.from_interface.name] = conn_name else: continue for t in (('%s.from.source' % tmp_name, c.from_instance.address_space), ('%s.from.header' % tmp_name, c.from_instance.address_space), ('%s.to.source' % tmp_name, c.to_instance.address_space), ('%s.to.header' % tmp_name, c.to_instance.address_space)): try: template = templates.lookup(t[0], c) g = '' if template: g = r.render(c, assembly, template, obj_space, cspaces[t[1]], \ shmem, options=options, id=id, my_pd=pds[t[1]]) save(t[0], g) if options.item == t[0]: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (t[0], inst)) c.name = tmp_name # The following block handles instantiations of per-connection # templates that are neither a 'source' or a 'header', as handled # above. We assume that none of these need instantiation unless we are # actually currently looking for them (== options.item). That is, we # assume that following templates, like the CapDL spec, do not require # these templates to be rendered prior to themselves. # FIXME: This is a pretty ugly way of handling this. It would be nicer # for the runner to have a more general notion of per-'thing' templates # where the per-component templates, the per-connection template loop # above, and this loop could all be done in a single unified control # flow. for t in (('%s.from.' % c.name, c.from_instance.address_space), ('%s.to.' % c.name, c.to_instance.address_space)): if not options.item.startswith(t[0]): # This is not the item we're looking for. continue try: # If we've reached here then this is the exact item we're # after. template = templates.lookup(options.item, c) if template is None: raise Exception('no registered template for %s' % options.item) g = r.render(c, assembly, template, obj_space, cspaces[t[1]], \ shmem, options=options, id=id, my_pd=pds[t[1]]) save(options.item, g) done(g) except Exception as inst: die('While rendering %s: %s' % (options.item, inst)) # Perform any per component simple generation. This needs to happen last # as this template needs to run after all other capabilities have been # allocated for id, i in enumerate(assembly.composition.instances): # Don't generate any code for hardware components. if i.type.hardware: continue assert i.address_space in cspaces if conf and conf.settings and [x for x in conf.settings if \ x.instance == i.name and x.attribute == 'simple' and x.value]: for t in ('%s.simple' % i.name, ): try: template = templates.lookup(t, i) g = '' if template: g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], \ shmem, options=options, id=id, my_pd=pds[i.address_space]) save(t, g) if options.item == t: if not template: log.warning('Warning: no template for %s' % options.item) done(g) except Exception as inst: die('While rendering %s: %s' % (i.name, inst)) # Derive a set of usable ELF objects from the filenames we were passed. elfs = {} arch = None for e in options.elf: try: name = os.path.basename(e) if name in elfs: raise Exception( 'duplicate ELF files of name \'%s\' encountered' % name) elf = ELF(e, name) if not arch: # The spec's arch will have defaulted to ARM, but we want it to # be the same as whatever ELF format we're parsing. arch = elf.get_arch() if arch == 'ARM': obj_space.spec.arch = 'arm11' elif arch == 'x86': obj_space.spec.arch = 'ia32' else: raise NotImplementedError else: # All ELF files we're parsing should be the same format. if arch != elf.get_arch(): raise Exception( 'ELF files are not all the same architecture') p = Perspective(phase=RUNNER, elf_name=name) group = p['group'] # Avoid inferring a TCB as we've already created our own. elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False, pd=pds[group], use_large_frames=options.largeframe) obj_space.merge(elf_spec, label=group) elfs[name] = (e, elf) except Exception as inst: die('While opening \'%s\': %s' % (e, inst)) if options.item in ('capdl', 'label-mapping'): # It's only relevant to run these filters if the final target is CapDL. # Note, this will no longer be true if we add any other templates that # depend on a fully formed CapDL spec. Guarding this loop with an if # is just an optimisation and the conditional can be removed if # desired. for f in CAPDL_FILTERS: try: # Pass everything as named arguments to allow filters to # easily ignore what they don't want. f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs, options=options, shmem=shmem) except Exception as inst: die('While forming CapDL spec: %s' % str(inst)) # Instantiate any other, miscellaneous template. If we've reached this # point, we know the user did not request a code template. try: template = templates.lookup(options.item) if template: g = r.render(assembly, assembly, template, obj_space, None, \ shmem, imported=imported, options=options) save(options.item, g) done(g) except Exception as inst: die('While rendering %s: %s' % (options.item, inst)) die('No valid element matching --item %s' % options.item)