예제 #1
0
    def test_timestamps(self):
        '''
        Ensure that modifying a timestamp on one of the inputs has no effect.
        '''
        root = self.mkdtemp()
        c = Cache(root)

        input = self.mkstemp()
        with open(input, 'wt') as f:
            f.write('foo bar')

        inputs = prime_inputs([input])

        cwd = os.getcwd()
        c.save(['arg1', 'arg2'], cwd, 'hello world', inputs)

        # Bump the timestamps on the input.
        st = os.stat(input)
        os.utime(input, (st[stat.ST_ATIME] + 3600, st[stat.ST_MTIME] + 3600))

        # Ensure we can find what we just saved.
        output = c.load(['arg1', 'arg2'], cwd)
        self.assertEqual(output, 'hello world')

        # And after a flush.
        c.flush()
        output = c.load(['arg1', 'arg2'], cwd)
        self.assertEqual(output, 'hello world')
예제 #2
0
    def test_miss_on_disk2(self):
        '''
        Same as the in-memory miss test except we flush the cache in-between,
        at a point at which the entry is already invalid. The invalidity is not
        actually detected then, but the later lookup should still miss.
        '''
        root = self.mkdtemp()
        c = Cache(root)

        input = self.mkstemp()
        with open(input, 'wt') as f:
            f.write('foo bar')

        inputs = prime_inputs([input])

        cwd = os.getcwd()
        c.save(['arg1', 'arg2'], cwd, 'hello world', inputs)

        # Now cause the entry to be invalid by modifying inputs.
        with open(input, 'wt') as f:
            f.write('bar foo')

        # Flush the (now invalid) entry to disk.
        c.flush()

        # Ensure we miss when now performing a lookup.
        output = c.load(['arg1', 'arg2'], cwd)
        self.assertIsNone(output)
예제 #3
0
    def test_basic(self):
        '''
        Test we can look up something we've just saved. Note that this test
        will not actually perform an on-disk lookup.
        '''
        root = self.mkdtemp()
        c = Cache(root)

        input = self.mkstemp()
        with open(input, 'wt') as f:
            f.write('foo bar')

        inputs = prime_inputs([input])

        cwd = os.getcwd()
        c.save(['arg1', 'arg2'], cwd, 'hello world', inputs)

        # Ensure we can find what we just saved.
        output = c.load(['arg1', 'arg2'], cwd)

        self.assertEqual(output, 'hello world')
예제 #4
0
    def test_cache_hit_truncate(self):
        '''
        A previous accelerator bug resulted in the output file not being
        truncated before the accelerator wrote to it. The result was that an
        output resulting from a cache hit that was delivered via the
        accelerator would have trailing garbage if it was shorter than the
        existing file content. More specifically, the following sequence could
        occur:

          1. Build with cache A enabled and "short string" is output and
             cached;
          2. Build with different config and "a slightly longer string" is
             output (and cached);
          3. Build with original config and the accelerator enabled and "short
             string" is retrieved, but written without truncating the output.

        As a result, the final file content would end up as "short stringonger
        string". This test validates that this problem has not been
        reintroduced.
        '''

        root = self.mkdtemp()

        internal_root = os.path.join(root, version(), 'cachea')
        c = Cache(internal_root)

        # Setup a basic, single-input entry.
        input1 = self.mkstemp()
        with open(input1, 'wt') as f:
            f.write('hello world')
        inputs = prime_inputs([input1])

        cwd = self.mkdtemp()

        output = self.mkstemp()

        args = ['--cache-dir', root, '--outfile', output]

        # Write the entry to the cache with a specific short value.
        content = 'moo cow'
        c.save(args[:-2], cwd, content, inputs)
        c.flush()

        del c

        # Now write something *longer* into the output file.
        with open(output, 'wt') as f:
            f.write('some lengthier text')

        # Now run the accelerator to retrieve the original, shorter output.
        ret, stdout, stderr = self.execute([self.accelerator] + args, cwd=cwd)

        # It should have hit the cache and written the correct, shorter output.
        self.assertEqual(ret, 0)
        self.assertEqual(stdout, '')
        self.assertEqual(stderr, '')
        with open(output) as f:
            data = f.read()
        self.assertEqual(data, content)
예제 #5
0
    def test_miss_from_missing_file1(self):
        '''
        Ensure cache misses from missing files function correctly.
        '''
        root = self.mkdtemp()
        c = Cache(root)

        _, input = tempfile.mkstemp()
        with open(input, 'wt') as f:
            f.write('foo bar')

        inputs = prime_inputs([input])

        cwd = os.getcwd()
        c.save(['arg1', 'arg2'], cwd, 'hello world', inputs)

        # Now cause the entry to be invalid by deleting its input.
        os.remove(input)

        # Ensure we miss when now performing a lookup.
        output = c.load(['arg1', 'arg2'], cwd)
        self.assertIsNone(output)
예제 #6
0
    def test_miss_in_memory(self):
        '''
        Test that an induced cache miss while the cache entry is still in
        memory works correctly.
        '''
        root = self.mkdtemp()
        c = Cache(root)

        input = self.mkstemp()
        with open(input, 'wt') as f:
            f.write('foo bar')

        inputs = prime_inputs([input])

        cwd = os.getcwd()
        c.save(['arg1', 'arg2'], cwd, 'hello world', inputs)

        # Now cause the entry to be invalid by modifying inputs.
        with open(input, 'wt') as f:
            f.write('bar foo')

        # Ensure we miss when now performing a lookup.
        output = c.load(['arg1', 'arg2'], cwd)
        self.assertIsNone(output)
예제 #7
0
    def test_no_inputs(self):
        '''
        Ensure we can handle an entry with no inputs.
        '''
        root = self.mkdtemp()
        c = Cache(root)

        inputs = prime_inputs([])

        cwd = os.getcwd()
        c.save(['arg1', 'arg2'], cwd, 'hello world', inputs)

        # Ensure we can find what we just saved.
        output = c.load(['arg1', 'arg2'], cwd)
        self.assertEqual(output, 'hello world')

        # Ensure it is preserved after a flush.
        c.flush()
        output = c.load(['arg1', 'arg2'], cwd)
        self.assertEqual(output, 'hello world')
예제 #8
0
    def test_no_args(self):
        root = self.mkdtemp()
        c = Cache(root)

        input = self.mkstemp()
        with open(input, 'wt') as f:
            f.write('foo bar')

        inputs = prime_inputs([input])

        cwd = os.getcwd()
        c.save([], cwd, 'hello world', inputs)

        # Ensure we can find what we just saved.
        output = c.load([], cwd)
        self.assertEqual(output, 'hello world')

        # Ensure it is preserved after a flush.
        c.flush()
        output = c.load([], cwd)
        self.assertEqual(output, 'hello world')
예제 #9
0
    def test_basic_valgrind(self):
        root = self.mkdtemp()

        # CAmkES internally suffixes the root with a couple of things to
        # namespace the cache.
        internal_root = os.path.join(root, version(), 'cachea')
        c = Cache(internal_root)

        # Construct some fake inputs.
        input1 = self.mkstemp()
        with open(input1, 'wt') as f:
            f.write('hello world')
        input2 = self.mkstemp()
        with open(input2, 'wt') as f:
            f.write('foo bar')
        inputs = prime_inputs([input1, input2])

        # And a fake working directory.
        cwd = self.mkdtemp()

        # Imagine we were saving the output from the following file.
        output = self.mkstemp()

        # So the command line arguments would be:
        args = ['--cache-dir', root, '--outfile', output]

        # Save the entry. Note that we truncate the args because the runner and
        # the accelerator strip --outfile arguments before interacting with the
        # cache.
        c.save(args[:-2], cwd, 'moo cow', inputs)
        c.flush()

        # We're done with the native cache.
        del c

        # Now let's try to read back the cache entry from the accelerator.
        _, _, stderr = self.execute(VALGRIND + [self.debug_accelerator] + args,
                                    cwd=cwd)
        if valgrind_found_leak(stderr):
            self.fail('camkes-accelerator %s leaks memory:\n%s' %
                      (' '.join(args), stderr))

        _, _, stderr = self.execute(VALGRIND + [self.accelerator] + args,
                                    cwd=cwd)
        if valgrind_found_leak(stderr):
            self.fail('camkes-accelerator %s leaks memory (not reproducible '
                      'in debug mode):\n%s' % (' '.join(args), stderr))
예제 #10
0
    def test_basic(self):
        '''
        Test we can save and retrieve something (expected case).
        '''
        root = self.mkdtemp()

        # CAmkES internally suffixes the root with a couple of things to
        # namespace the cache.
        internal_root = os.path.join(root, version(), 'cachea')
        c = Cache(internal_root)

        # Construct some fake inputs.
        input1 = self.mkstemp()
        with open(input1, 'wt') as f:
            f.write('hello world')
        input2 = self.mkstemp()
        with open(input2, 'wt') as f:
            f.write('foo bar')
        inputs = prime_inputs([input1, input2])

        # And a fake working directory.
        cwd = self.mkdtemp()

        # Imagine we were saving the output from the following file.
        output = self.mkstemp()

        # So the command line arguments would be:
        args = ['--cache-dir', root, '--outfile', output]

        # Save the entry. Note that we truncate the args because the runner and
        # the accelerator strip --outfile arguments before interacting with the
        # cache.
        c.save(args[:-2], cwd, 'moo cow', inputs)
        c.flush()

        # We're done with the native cache.
        del c

        # Now let's try to read back the cache entry from the accelerator.
        ret, _, _ = self.execute([self.accelerator] + args, cwd=cwd)
        self.assertEqual(ret, 0)

        # If it worked, we should have the output in the expected place.
        with open(output, 'rt') as f:
            data = f.read()
        self.assertEqual(data, 'moo cow')
예제 #11
0
    def test_directory_creation(self):
        '''
        The cache should be capable of creating necessary subdirectories under
        its root.
        '''
        root = os.path.join(self.mkdtemp(), 'non-existent')

        c = Cache(root)

        input = self.mkstemp()
        with open(input, 'wt') as f:
            f.write('foo bar')

        inputs = prime_inputs([input])

        c.save(['arg1', 'arg2'], os.getcwd(), 'hello world', inputs)
        c.flush()
예제 #12
0
    def test_basic_with_flush(self):
        '''
        Same as the basic test, but we'll flush in-between to ensure we perform
        an on-disk lookup.
        '''
        root = self.mkdtemp()
        c = Cache(root)

        input = self.mkstemp()
        with open(input, 'wt') as f:
            f.write('foo bar')

        inputs = prime_inputs([input])

        cwd = os.getcwd()
        c.save(['arg1', 'arg2'], cwd, 'hello world', inputs)
        c.flush()

        # Ensure we can find what we just saved.
        output = c.load(['arg1', 'arg2'], cwd)

        self.assertEqual(output, 'hello world')
예제 #13
0
    def test_miss_on_disk1(self):
        '''
        Same as the in-memory miss test except we flush the cache in-between.
        '''
        root = self.mkdtemp()
        c = Cache(root)

        input = self.mkstemp()
        with open(input, 'wt') as f:
            f.write('foo bar')

        inputs = prime_inputs([input])

        cwd = os.getcwd()
        c.save(['arg1', 'arg2'], cwd, 'hello world', inputs)
        c.flush()

        # Now cause the entry to be invalid by modifying inputs.
        with open(input, 'wt') as f:
            f.write('bar foo')

        # Ensure we miss when now performing a lookup.
        output = c.load(['arg1', 'arg2'], cwd)
        self.assertIsNone(output)
예제 #14
0
    def test_cache_miss_inputs_valgrind(self):
        # As for the basic test case...
        root = self.mkdtemp()

        internal_root = os.path.join(root, version(), 'cachea')
        c = Cache(internal_root)

        input1 = self.mkstemp()
        with open(input1, 'wt') as f:
            f.write('hello world')
        input2 = self.mkstemp()
        with open(input2, 'wt') as f:
            f.write('foo bar')
        inputs = prime_inputs([input1, input2])

        cwd = self.mkdtemp()

        output = self.mkstemp()

        args = ['--cache-dir', root, '--outfile', output]

        c.save(args[:-2], cwd, 'moo cow', inputs)
        c.flush()

        del c

        # Now let's modify one of the inputs.
        with open(input2, 'at') as f:
            f.write('foo bar')

        _, _, stderr = self.execute(VALGRIND + [self.debug_accelerator] + args,
                                    cwd=cwd)
        if valgrind_found_leak(stderr):
            self.fail('camkes-accelerator %s leaks memory:\n%s' %
                      (' '.join(args), stderr))

        _, _, stderr = self.execute(VALGRIND + [self.accelerator] + args,
                                    cwd=cwd)
        if valgrind_found_leak(stderr):
            self.fail('camkes-accelerator %s leaks memory (not reproducible '
                      'in debug mode):\n%s' % (' '.join(args), stderr))
예제 #15
0
    def test_cache_miss_inputs(self):
        '''
        Test that we correctly miss when one of the inputs has changed.
        '''
        # As for the basic test case...
        root = self.mkdtemp()

        internal_root = os.path.join(root, version(), 'cachea')
        c = Cache(internal_root)

        input1 = self.mkstemp()
        with open(input1, 'wt') as f:
            f.write('hello world')
        input2 = self.mkstemp()
        with open(input2, 'wt') as f:
            f.write('foo bar')
        inputs = prime_inputs([input1, input2])

        cwd = self.mkdtemp()

        output = self.mkstemp()

        args = ['--cache-dir', root, '--outfile', output]

        c.save(args[:-2], cwd, 'moo cow', inputs)
        c.flush()

        del c

        # Now let's modify one of the inputs.
        with open(input2, 'at') as f:
            f.write('foo bar')

        ret, stdout, stderr = self.execute([self.accelerator] + args, cwd=cwd)

        # It should have missed (== non-zero return value with no output).
        self.assertNotEqual(ret, 0)
        self.assertEqual(stdout, '')
        self.assertEqual(stderr, '')
예제 #16
0
def main(argv, out, err):

    # We need a UTF-8 locale, so bail out if we don't have one. More
    # specifically, things like the version() computation traverse the file
    # system and, if they hit a UTF-8 filename, they try to decode it into your
    # preferred encoding and trigger an exception.
    encoding = locale.getpreferredencoding().lower()
    if encoding not in ('utf-8', 'utf8'):
        err.write('CAmkES uses UTF-8 encoding, but your locale\'s preferred '
                  'encoding is %s. You can override your locale with the LANG '
                  'environment variable.\n' % encoding)
        return -1

    options = parse_args(argv, out, err)

    # Ensure we were supplied equal items and outfiles
    if len(options.outfile) != len(options.item):
        err.write(
            'Different number of items and outfiles. Required one outfile location '
            'per item requested.\n')
        return -1

    # No duplicates in items or outfiles
    if len(set(options.item)) != len(options.item):
        err.write('Duplicate items requested through --item.\n')
        return -1
    if len(set(options.outfile)) != len(options.outfile):
        err.write('Duplicate outfiles requrested through --outfile.\n')
        return -1

    # Save us having to pass debugging everywhere.
    die = functools.partial(_die, options)

    log.set_verbosity(options.verbosity)

    cwd = os.getcwd()

    # Build a list of item/outfile pairs that we have yet to match and process
    all_items = set(zip(options.item, options.outfile))
    done_items = set([])

    # Construct the compilation caches if requested.
    cachea = None
    cacheb = None
    if options.cache:

        # Construct a modified version of the command line arguments that we'll
        # use in the keys to the caches. Essentially we elide --outfile and its
        # parameter under the assumption that this value is never used in code
        # generation. The purpose of this is to allow us to successfully cache
        # ancillary outputs that we generate along the way to the current
        # output. If we were to include --outfile in the key, future attempts
        # to generate these ancillary outputs would unnecessarily miss the
        # entries generated by this execution.
        args = []
        skip = False
        for index, arg in enumerate(argv[1:]):
            if skip:
                skip = False
                continue
            if arg in ('--outfile', '-O'):
                skip = True
                continue
            args.append(arg)

        cachea = LevelACache(
            os.path.join(options.cache_dir, version(), 'cachea'))
        cacheb = LevelBCache(
            os.path.join(options.cache_dir, version(), 'cacheb'))

    def done(s, file, item):
        ret = 0
        if s:
            file.write(s)
            file.close()
        if cachea is not None:
            try:
                cachea.flush()
            except sqlite3.OperationalError as e:
                # The following suppresses two spurious errors:
                #  1. The database is locked. In a large, parallel build, writes
                #     to the level A cache are heavily contended and this error
                #     can occur.
                #  2. The database structure is unexpected. If the CAmkES
                #     sources have changed *while* the runner was executing,
                #     the level A cache can be looking in a different place to
                #     where the cache was created.
                # Both of these are non-critical (will just result in a
                # potential future cache miss) so there's no need to alarm the
                # user.
                if re.search(r'database is locked', str(e)) is not None or \
                   re.search(r'no such table', str(e)) is not None:
                    log.debug('failed to flush level A cache: %s' % str(e))
                else:
                    raise
        if cacheb is not None:
            try:
                cacheb.flush()
            except sqlite3.OperationalError as e:
                # As above for the level B cache.
                if re.search(r'database is locked', str(e)):
                    log.debug('failed to flush level B cache: %s' % str(e))
                else:
                    raise

        done_items.add((item, file))
        if len(all_items - done_items) == 0:
            sys.exit(ret)

    # Try to find this output in the level A cache if possible. This check will
    # 'hit' if the source files representing the input spec are identical to
    # some previously observed execution.
    if cachea is not None:
        assert 'args' in locals()
        assert len(options.outfile) == 1, 'level A cache only supported when requestiong ' \
            'single items'
        output = cachea.load(args, cwd)
        if output is not None:
            log.debug('Retrieved %(platform)s/%(item)s from level A cache' %
                      options.__dict__)
            done(output, options.outfile[0], options.item[0])

    filename = os.path.abspath(options.file.name)

    try:
        # Build the parser options
        parse_options = ParserOptions(options.cpp, options.cpp_flag,
                                      options.import_path, options.verbosity,
                                      options.allow_forward_references)
        ast, read = parse_file_cached(filename,
                                      options.data_structure_cache_dir,
                                      parse_options)
    except (ASTError, ParseError) as e:
        die(e.args)

    # Locate the assembly.
    assembly = ast.assembly
    if assembly is None:
        die('No assembly found')

    # Do some extra checks if the user asked for verbose output.
    if options.verbosity >= 2:

        # Try to catch type mismatches in attribute settings. Note that it is
        # not possible to conclusively evaluate type correctness because the
        # attributes' type system is (deliberately) too loose. That is, the
        # type of an attribute can be an uninterpreted C type the user will
        # provide post hoc.
        for i in assembly.composition.instances:
            for a in i.type.attributes:
                value = assembly.configuration[i.name].get(a.name)
                if value is not None:
                    if a.type == 'string' and not \
                            isinstance(value, six.string_types):
                        log.warning('attribute %s.%s has type string but is '
                                    'set to a value that is not a string' %
                                    (i.name, a.name))
                    elif a.type == 'int' and not \
                            isinstance(value, numbers.Number):
                        log.warning('attribute %s.%s has type int but is set '
                                    'to a value that is not an integer' %
                                    (i.name, a.name))

    obj_space = ObjectAllocator()
    obj_space.spec.arch = options.architecture
    cspaces = {}
    pds = {}
    conf = assembly.configuration
    shmem = collections.defaultdict(ShmemFactory())
    kept_symbols = {}
    fill_frames = {}

    templates = Templates(options.platform)
    [templates.add_root(t) for t in options.templates]
    try:
        r = Renderer(templates, options.cache, options.cache_dir)
    except jinja2.exceptions.TemplateSyntaxError as e:
        die('template syntax error: %s' % e)

    # The user may have provided their own connector definitions (with
    # associated) templates, in which case they won't be in the built-in lookup
    # dictionary. Let's add them now. Note, definitions here that conflict with
    # existing lookup entries will overwrite the existing entries. Note that
    # the extra check that the connector has some templates is just an
    # optimisation; the templates module handles connectors without templates
    # just fine.
    extra_templates = set()
    for c in (x for x in ast.items if isinstance(x, Connector) and (
            x.from_template is not None or x.to_template is not None)):
        try:
            # Find a connection that uses this type.
            connection = next(x for x in ast
                              if isinstance(x, Connection) and x.type == c)
            # Add the custom templates and update our collection of read
            # inputs. It is necessary to update the read set here to avoid
            # false compilation cache hits when the source of a custom template
            # has changed.
            extra_templates |= templates.add(c, connection)
        except TemplateError as e:
            die('while adding connector %s: %s' % (c.name, e))
        except StopIteration:
            # No connections use this type. There's no point adding it to the
            # template lookup dictionary.
            pass

    # Check if our current target is in the level B cache. The level A cache
    # will 'miss' and this one will 'hit' when the input spec is identical to
    # some previously observed execution modulo a semantically irrelevant
    # element (e.g. an introduced comment).
    ast_hash = None
    if cacheb is not None:
        ast_hash = level_b_prime(ast)
        assert 'args' in locals()
        assert len(options.item) == 1, 'level B cache only supported when requesting ' \
            'single items'
        output = cacheb.load(ast_hash, args,
                             set(options.elf) | extra_templates)
        if output is not None:
            log.debug('Retrieved %(platform)s/%(item)s from level B cache' %
                      options.__dict__)
            done(output, options.outfile[0], options.item[0])

    # Add custom templates.
    read |= extra_templates

    # Add the CAmkES sources themselves to the accumulated list of inputs.
    read |= set(path for path, _ in sources())

    # Add any ELF files we were passed as inputs.
    read |= set(options.elf)

    # Write a Makefile dependency rule if requested.
    if options.makefile_dependencies is not None:
        options.makefile_dependencies.write(
            '%s: \\\n  %s\n' % (filename, ' \\\n  '.join(sorted(read))))

    # If we have a cache, allow outputs to be saved to it.
    if options.cache:

        assert cachea is not None, 'level A cache not available, though the ' \
            'cache is enabled (bug in runner?)'
        # The logic of this cache currently only works when a single item is requested
        # on the command line
        assert len(options.item) == 1, 'level A cache only supported when requesting ' \
            'single items'

        # Calculate the input files to the level A cache.
        inputs = level_a_prime(read)

        # Work out the position of the --item argument in the command line
        # parameters. We will use this to cache not only outputs for this
        # execution, but also outputs for ones with a different target.
        item_index = None
        assert 'args' in locals()
        for index, arg in enumerate(args[:-1]):
            if arg in ('--item', '-T'):
                item_index = index + 1
                break
        assert item_index is not None, 'failed to find required argument ' \
            '--item (bug in runner?)'

        # We should already have the necessary inputs for the level B cache.
        assert cacheb is not None, 'level B cache not available, though the ' \
            'cache is enabled (bug in runner?)'
        assert ast_hash is not None, 'AST hash not pre-computed (bug in ' \
            'runner?)'

        def save(item, value):
            # Juggle the command line arguments to cache the predicted
            # arguments for a call that would generate this item.
            new_args = args[:item_index] + [item] + args[item_index + 1:]

            # Save entries in both caches.
            cachea.save(new_args, cwd, value, inputs)
            if item != 'Makefile' and item != 'camkes-gen.cmake':
                # We avoid caching the generated Makefile because it is not
                # safe. The inputs to generation of the Makefile are not only
                # the AST, but also the file names (`inputs`). If we cache it in
                # the level B cache we risk the following scenario:
                #
                #   1. Generate the Makefile, caching it in the level B cache;
                #   2. Modify the spec to import a file containing only white
                #      space and/or comments; then
                #   3. Generate the Makefile, missing the level A cache, but
                #      hitting the level B cache.
                #
                # At this point, the generated Makefile is incorrect because it
                # does not capture any dependencies on the imported file. We can
                # now introduce something semantically relevant into this file
                # (e.g. an Assembly block) and it will not be seen by the build
                # system.
                cacheb.save(ast_hash, new_args,
                            set(options.elf) | extra_templates, value)
    else:

        def save(item, value):
            pass

    def apply_capdl_filters():
        # Derive a set of usable ELF objects from the filenames we were passed.
        elfs = {}
        for e in options.elf:
            try:
                name = os.path.basename(e)
                if name in elfs:
                    raise Exception(
                        'duplicate ELF files of name \'%s\' encountered' %
                        name)
                elf = ELF(e, name, options.architecture)
                p = Perspective(phase=RUNNER, elf_name=name)
                group = p['group']
                # Avoid inferring a TCB as we've already created our own.
                elf_spec = elf.get_spec(infer_tcb=False,
                                        infer_asid=False,
                                        pd=pds[group],
                                        use_large_frames=options.largeframe)
                obj_space.merge(elf_spec, label=group)
                elfs[name] = (e, elf)
            except Exception as inst:
                die('While opening \'%s\': %s' % (e, inst))

        # It's only relevant to run these filters if the final target is CapDL.
        # Note, this will no longer be true if we add any other templates that
        # depend on a fully formed CapDL spec. Guarding this loop with an if
        # is just an optimisation and the conditional can be removed if
        # desired.
        filteroptions = FilterOptions(
            options.architecture, options.realtime, options.largeframe,
            options.largeframe_dma, options.default_priority,
            options.default_max_priority, options.default_criticality,
            options.default_max_criticality, options.default_affinity,
            options.default_period, options.default_budget,
            options.default_data, options.default_size_bits,
            options.debug_fault_handlers, options.fprovide_tcb_caps)
        for f in CAPDL_FILTERS:
            try:
                # Pass everything as named arguments to allow filters to
                # easily ignore what they don't want.
                f(ast=ast,
                  obj_space=obj_space,
                  cspaces=cspaces,
                  elfs=elfs,
                  options=filteroptions,
                  shmem=shmem,
                  fill_frames=fill_frames)
            except Exception as inst:
                die('While forming CapDL spec: %s' % inst)

    renderoptions = RenderOptions(
        options.file, options.verbosity, options.frpc_lock_elision,
        options.fspecialise_syscall_stubs, options.fprovide_tcb_caps,
        options.fsupport_init, options.largeframe, options.largeframe_dma,
        options.architecture, options.debug_fault_handlers, options.realtime)

    def instantiate_misc_template():
        for (item, outfile) in (all_items - done_items):
            try:
                template = templates.lookup(item)
                if template:
                    g = r.render(assembly,
                                 assembly,
                                 template,
                                 obj_space,
                                 None,
                                 shmem,
                                 kept_symbols,
                                 fill_frames,
                                 imported=read,
                                 options=renderoptions)
                    save(item, g)
                    done(g, outfile, item)
            except TemplateError as inst:
                die([
                    'While rendering %s: %s' % (item, line)
                    for line in inst.args
                ])

    if options.item[0] in ('capdl', 'label-mapping') and options.data_structure_cache_dir is not None \
            and len(options.outfile) == 1:
        # It's possible that data structures required to instantiate the capdl spec
        # were saved during a previous invocation of this script in the current build.
        cache_path = os.path.realpath(options.data_structure_cache_dir)
        pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE)

        if os.path.isfile(pickle_path):
            with open(pickle_path, 'rb') as pickle_file:
                # Found a cached version of the necessary data structures
                obj_space, shmem, cspaces, pds, kept_symbols, fill_frames = pickle.load(
                    pickle_file)
                apply_capdl_filters()
                instantiate_misc_template()

                # If a template wasn't instantiated, something went wrong, and we can't recover
                raise CAmkESError(
                    "No template instantiated on capdl generation fastpath")

    # We're now ready to instantiate the template the user requested, but there
    # are a few wrinkles in the process. Namely,
    #  1. Template instantiation needs to be done in a deterministic order. The
    #     runner is invoked multiple times and template code needs to be
    #     allocated identical cap slots in each run.
    #  2. Components and connections need to be instantiated before any other
    #     templates, regardless of whether they are the ones we are after. Some
    #     other templates, such as the Makefile depend on the obj_space and
    #     cspaces.
    #  3. All actual code templates, up to the template that was requested,
    #     need to be instantiated. This is related to (1) in that the cap slots
    #     allocated are dependent on what allocations have been done prior to a
    #     given allocation call.

    # Instantiate the per-component source and header files.
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue

        if i.address_space not in cspaces:
            p = Perspective(phase=RUNNER,
                            instance=i.name,
                            group=i.address_space)
            cnode = obj_space.alloc(seL4_CapTableObject,
                                    name=p['cnode'],
                                    label=i.address_space)
            cspaces[i.address_space] = CSpaceAllocator(cnode)
            pd = obj_space.alloc(lookup_architecture(
                options.architecture).vspace().object,
                                 name=p['pd'],
                                 label=i.address_space)
            pds[i.address_space] = pd

        for t in ('%s/source' % i.name, '%s/header' % i.name,
                  '%s/linker' % i.name):
            try:
                template = templates.lookup(t, i)
                g = ''
                if template:
                    g = r.render(i,
                                 assembly,
                                 template,
                                 obj_space,
                                 cspaces[i.address_space],
                                 shmem,
                                 kept_symbols,
                                 fill_frames,
                                 options=renderoptions,
                                 my_pd=pds[i.address_space])
                save(t, g)
                for (item, outfile) in (all_items - done_items):
                    if item == t:
                        if not template:
                            log.warning('Warning: no template for %s' % item)
                        done(g, outfile, item)
                        break
            except TemplateError as inst:
                die([
                    'While rendering %s: %s' % (i.name, line)
                    for line in inst.args
                ])

    # Instantiate the per-connection files.
    for c in assembly.composition.connections:

        for t in (('%s/from/source' % c.name,
                   c.from_ends), ('%s/from/header' % c.name, c.from_ends),
                  ('%s/to/source' % c.name,
                   c.to_ends), ('%s/to/header' % c.name, c.to_ends)):

            template = templates.lookup(t[0], c)

            if template is not None:
                for id, e in enumerate(t[1]):
                    item = '%s/%d' % (t[0], id)
                    g = ''
                    try:
                        g = r.render(e,
                                     assembly,
                                     template,
                                     obj_space,
                                     cspaces[e.instance.address_space],
                                     shmem,
                                     kept_symbols,
                                     fill_frames,
                                     options=renderoptions,
                                     my_pd=pds[e.instance.address_space])
                    except TemplateError as inst:
                        die([
                            'While rendering %s: %s' % (item, line)
                            for line in inst.args
                        ])
                    except jinja2.exceptions.TemplateNotFound:
                        die('While rendering %s: missing template for %s' %
                            (item, c.type.name))
                    save(item, g)
                    for (target, outfile) in (all_items - done_items):
                        if target == item:
                            if not template:
                                log.warning('Warning: no template for %s' %
                                            item)
                            done(g, outfile, item)
                            break

        # The following block handles instantiations of per-connection
        # templates that are neither a 'source' or a 'header', as handled
        # above. We assume that none of these need instantiation unless we are
        # actually currently looking for them (== options.item). That is, we
        # assume that following templates, like the CapDL spec, do not require
        # these templates to be rendered prior to themselves.
        # FIXME: This is a pretty ugly way of handling this. It would be nicer
        # for the runner to have a more general notion of per-'thing' templates
        # where the per-component templates, the per-connection template loop
        # above, and this loop could all be done in a single unified control
        # flow.
        for (item, outfile) in (all_items - done_items):
            for t in (('%s/from/' % c.name, c.from_ends), ('%s/to/' % c.name,
                                                           c.to_ends)):

                if not item.startswith(t[0]):
                    # This is not the item we're looking for.
                    continue

                # If we've reached here then this is the exact item we're after.
                template = templates.lookup(item, c)
                if template is None:
                    die('no registered template for %s' % item)

                for e in t[1]:
                    try:
                        g = r.render(e,
                                     assembly,
                                     template,
                                     obj_space,
                                     cspaces[e.instance.address_space],
                                     shmem,
                                     kept_symbols,
                                     fill_frames,
                                     options=renderoptions,
                                     my_pd=pds[e.instance.address_space])
                        save(item, g)
                        done(g, outfile, item)
                    except TemplateError as inst:
                        die([
                            'While rendering %s: %s' % (item, line)
                            for line in inst.args
                        ])

    # Perform any per component special generation. This needs to happen last
    # as these template needs to run after all other capabilities have been
    # allocated
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue
        assert i.address_space in cspaces
        SPECIAL_TEMPLATES = [('debug', 'debug'), ('simple', 'simple'),
                             ('rump_config', 'rumprun')]
        for special in [
                bl for bl in SPECIAL_TEMPLATES if conf[i.name].get(bl[0])
        ]:
            for t in ('%s/%s' % (i.name, special[1]), ):
                try:
                    template = templates.lookup(t, i)
                    g = ''
                    if template:
                        g = r.render(i,
                                     assembly,
                                     template,
                                     obj_space,
                                     cspaces[i.address_space],
                                     shmem,
                                     kept_symbols,
                                     fill_frames,
                                     options=renderoptions,
                                     my_pd=pds[i.address_space])
                    save(t, g)
                    for (item, outfile) in (all_items - done_items):
                        if item == t:
                            if not template:
                                log.warning('Warning: no template for %s' %
                                            item)
                            done(g, outfile, item)
                except TemplateError as inst:
                    die([
                        'While rendering %s: %s' % (i.name, line)
                        for line in inst.args
                    ])

    if options.data_structure_cache_dir is not None:
        # At this point the capdl database is in the state required for applying capdl
        # filters and generating the capdl spec. In case the capdl spec isn't the current
        # target, we pickle the database here, so when the capdl spec is built, these
        # data structures don't need to be regenerated.
        cache_path = os.path.realpath(options.data_structure_cache_dir)
        pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE)
        with open(pickle_path, 'wb') as pickle_file:
            pickle.dump(
                (obj_space, shmem, cspaces, pds, kept_symbols, fill_frames),
                pickle_file)

    for (item, outfile) in (all_items - done_items):
        if item in ('capdl', 'label-mapping'):
            apply_capdl_filters()

    # Instantiate any other, miscellaneous template. If we've reached this
    # point, we know the user did not request a code template.
    instantiate_misc_template()

    # Check if there are any remaining items
    not_done = all_items - done_items
    if len(not_done) > 0:
        for (item, outfile) in not_done:
            err.write('No valid element matching --item %s.\n' % item)
        return -1
    return 0
예제 #17
0
def main(argv, out, err):

    # We need a UTF-8 locale, so bail out if we don't have one. More
    # specifically, things like the version() computation traverse the file
    # system and, if they hit a UTF-8 filename, they try to decode it into your
    # preferred encoding and trigger an exception.
    encoding = locale.getpreferredencoding().lower()
    if encoding not in ('utf-8', 'utf8'):
        err.write('CAmkES uses UTF-8 encoding, but your locale\'s preferred '
            'encoding is %s. You can override your locale with the LANG '
            'environment variable.\n' % encoding)
        return -1

    options = parse_args(argv, out, err)

    # Ensure we were supplied equal items and outfiles
    if len(options.outfile) != len(options.item):
        err.write('Different number of items and outfiles. Required one outfile location '
            'per item requested.\n')
        return -1

    # No duplicates in items or outfiles
    if len(set(options.item)) != len(options.item):
        err.write('Duplicate items requested through --item.\n')
        return -1
    if len(set(options.outfile)) != len(options.outfile):
        err.write('Duplicate outfiles requrested through --outfile.\n')
        return -1

    # Save us having to pass debugging everywhere.
    die = functools.partial(_die, options)

    log.set_verbosity(options.verbosity)

    cwd = os.getcwd()

    # Build a list of item/outfile pairs that we have yet to match and process
    all_items = set(zip(options.item, options.outfile))
    done_items = set([])

    # Construct the compilation caches if requested.
    cachea = None
    cacheb = None
    if options.cache:

        # Construct a modified version of the command line arguments that we'll
        # use in the keys to the caches. Essentially we elide --outfile and its
        # parameter under the assumption that this value is never used in code
        # generation. The purpose of this is to allow us to successfully cache
        # ancillary outputs that we generate along the way to the current
        # output. If we were to include --outfile in the key, future attempts
        # to generate these ancillary outputs would unnecessarily miss the
        # entries generated by this execution.
        args = []
        skip = False
        for index, arg in enumerate(argv[1:]):
            if skip:
                skip = False
                continue
            if arg in ('--outfile', '-O'):
                skip = True
                continue
            args.append(arg)

        cachea = LevelACache(os.path.join(options.cache_dir, version(), 'cachea'))
        cacheb = LevelBCache(os.path.join(options.cache_dir, version(), 'cacheb'))

    def done(s, file, item):
        ret = 0
        if s:
            file.write(s)
            file.close()
        if cachea is not None:
            try:
                cachea.flush()
            except sqlite3.OperationalError as e:
                # The following suppresses two spurious errors:
                #  1. The database is locked. In a large, parallel build, writes
                #     to the level A cache are heavily contended and this error
                #     can occur.
                #  2. The database structure is unexpected. If the CAmkES
                #     sources have changed *while* the runner was executing,
                #     the level A cache can be looking in a different place to
                #     where the cache was created.
                # Both of these are non-critical (will just result in a
                # potential future cache miss) so there's no need to alarm the
                # user.
                if re.search(r'database is locked', str(e)) is not None or \
                   re.search(r'no such table', str(e)) is not None:
                    log.debug('failed to flush level A cache: %s' % str(e))
                else:
                    raise
        if cacheb is not None:
            try:
                cacheb.flush()
            except sqlite3.OperationalError as e:
                # As above for the level B cache.
                if re.search(r'database is locked', str(e)):
                    log.debug('failed to flush level B cache: %s' % str(e))
                else:
                    raise

        done_items.add((item, file))
        if len(all_items - done_items) == 0:
            sys.exit(ret)

    # Try to find this output in the level A cache if possible. This check will
    # 'hit' if the source files representing the input spec are identical to
    # some previously observed execution.
    if cachea is not None:
        assert 'args' in locals()
        assert len(options.outfile) == 1, 'level A cache only supported when requestiong ' \
            'single items'
        output = cachea.load(args, cwd)
        if output is not None:
            log.debug('Retrieved %(platform)s/%(item)s from level A cache' %
                options.__dict__)
            done(output, options.outfile[0], options.item[0])

    filename = os.path.abspath(options.file.name)

    try:
        # Build the parser options
        parse_options = ParserOptions(options.cpp, options.cpp_flag, options.import_path, options.verbosity, options.allow_forward_references)
        ast, read = parse_file_cached(filename, options.data_structure_cache_dir, parse_options)
    except (ASTError, ParseError) as e:
        die(e.args)

    # Locate the assembly.
    assembly = ast.assembly
    if assembly is None:
        die('No assembly found')

    # Do some extra checks if the user asked for verbose output.
    if options.verbosity >= 2:

        # Try to catch type mismatches in attribute settings. Note that it is
        # not possible to conclusively evaluate type correctness because the
        # attributes' type system is (deliberately) too loose. That is, the
        # type of an attribute can be an uninterpreted C type the user will
        # provide post hoc.
        for i in assembly.composition.instances:
            for a in i.type.attributes:
                value = assembly.configuration[i.name].get(a.name)
                if value is not None:
                    if a.type == 'string' and not \
                            isinstance(value, six.string_types):
                        log.warning('attribute %s.%s has type string but is '
                            'set to a value that is not a string' % (i.name,
                            a.name))
                    elif a.type == 'int' and not \
                            isinstance(value, numbers.Number):
                        log.warning('attribute %s.%s has type int but is set '
                            'to a value that is not an integer' % (i.name,
                                a.name))

    obj_space = ObjectAllocator()
    obj_space.spec.arch = options.architecture
    cspaces = {}
    pds = {}
    conf = assembly.configuration
    shmem = collections.defaultdict(ShmemFactory())
    kept_symbols = {}
    fill_frames = {}

    templates = Templates(options.platform)
    [templates.add_root(t) for t in options.templates]
    try:
        r = Renderer(templates, options.cache, options.cache_dir)
    except jinja2.exceptions.TemplateSyntaxError as e:
        die('template syntax error: %s' % e)

    # The user may have provided their own connector definitions (with
    # associated) templates, in which case they won't be in the built-in lookup
    # dictionary. Let's add them now. Note, definitions here that conflict with
    # existing lookup entries will overwrite the existing entries. Note that
    # the extra check that the connector has some templates is just an
    # optimisation; the templates module handles connectors without templates
    # just fine.
    extra_templates = set()
    for c in (x for x in ast.items if isinstance(x, Connector) and
            (x.from_template is not None or x.to_template is not None)):
        try:
            # Find a connection that uses this type.
            connection = next(x for x in ast if isinstance(x, Connection) and
                x.type == c)
            # Add the custom templates and update our collection of read
            # inputs. It is necessary to update the read set here to avoid
            # false compilation cache hits when the source of a custom template
            # has changed.
            extra_templates |= templates.add(c, connection)
        except TemplateError as e:
            die('while adding connector %s: %s' % (c.name, e))
        except StopIteration:
            # No connections use this type. There's no point adding it to the
            # template lookup dictionary.
            pass

    # Check if our current target is in the level B cache. The level A cache
    # will 'miss' and this one will 'hit' when the input spec is identical to
    # some previously observed execution modulo a semantically irrelevant
    # element (e.g. an introduced comment).
    ast_hash = None
    if cacheb is not None:
        ast_hash = level_b_prime(ast)
        assert 'args' in locals()
        assert len(options.item) == 1, 'level B cache only supported when requesting ' \
            'single items'
        output = cacheb.load(ast_hash, args, set(options.elf) | extra_templates)
        if output is not None:
            log.debug('Retrieved %(platform)s/%(item)s from level B cache' %
                options.__dict__)
            done(output, options.outfile[0], options.item[0])

    # Add custom templates.
    read |= extra_templates

    # Add the CAmkES sources themselves to the accumulated list of inputs.
    read |= set(path for path, _ in sources())

    # Add any ELF files we were passed as inputs.
    read |= set(options.elf)

    # Write a Makefile dependency rule if requested.
    if options.makefile_dependencies is not None:
        options.makefile_dependencies.write('%s: \\\n  %s\n' %
            (filename, ' \\\n  '.join(sorted(read))))

    # If we have a cache, allow outputs to be saved to it.
    if options.cache:

        assert cachea is not None, 'level A cache not available, though the ' \
            'cache is enabled (bug in runner?)'
        # The logic of this cache currently only works when a single item is requested
        # on the command line
        assert len(options.item) == 1, 'level A cache only supported when requesting ' \
            'single items'

        # Calculate the input files to the level A cache.
        inputs = level_a_prime(read)

        # Work out the position of the --item argument in the command line
        # parameters. We will use this to cache not only outputs for this
        # execution, but also outputs for ones with a different target.
        item_index = None
        assert 'args' in locals()
        for index, arg in enumerate(args[:-1]):
            if arg in ('--item', '-T'):
                item_index = index + 1
                break
        assert item_index is not None, 'failed to find required argument ' \
            '--item (bug in runner?)'

        # We should already have the necessary inputs for the level B cache.
        assert cacheb is not None, 'level B cache not available, though the ' \
            'cache is enabled (bug in runner?)'
        assert ast_hash is not None, 'AST hash not pre-computed (bug in ' \
            'runner?)'

        def save(item, value):
            # Juggle the command line arguments to cache the predicted
            # arguments for a call that would generate this item.
            new_args = args[:item_index] + [item] + args[item_index + 1:]

            # Save entries in both caches.
            cachea.save(new_args, cwd, value, inputs)
            if item != 'Makefile' and item != 'camkes-gen.cmake':
                # We avoid caching the generated Makefile because it is not
                # safe. The inputs to generation of the Makefile are not only
                # the AST, but also the file names (`inputs`). If we cache it in
                # the level B cache we risk the following scenario:
                #
                #   1. Generate the Makefile, caching it in the level B cache;
                #   2. Modify the spec to import a file containing only white
                #      space and/or comments; then
                #   3. Generate the Makefile, missing the level A cache, but
                #      hitting the level B cache.
                #
                # At this point, the generated Makefile is incorrect because it
                # does not capture any dependencies on the imported file. We can
                # now introduce something semantically relevant into this file
                # (e.g. an Assembly block) and it will not be seen by the build
                # system.
                cacheb.save(ast_hash, new_args,
                    set(options.elf) | extra_templates, value)
    else:
        def save(item, value):
            pass

    def apply_capdl_filters():
        # Derive a set of usable ELF objects from the filenames we were passed.
        elfs = {}
        for e in options.elf:
            try:
                name = os.path.basename(e)
                if name in elfs:
                    raise Exception('duplicate ELF files of name \'%s\' encountered' % name)
                elf = ELF(e, name, options.architecture)
                p = Perspective(phase=RUNNER, elf_name=name)
                group = p['group']
                # Avoid inferring a TCB as we've already created our own.
                elf_spec = elf.get_spec(infer_tcb=False, infer_asid=False,
                    pd=pds[group], use_large_frames=options.largeframe)
                obj_space.merge(elf_spec, label=group)
                elfs[name] = (e, elf)
            except Exception as inst:
                die('While opening \'%s\': %s' % (e, inst))

        filteroptions = FilterOptions(options.architecture, options.realtime, options.largeframe,
            options.largeframe_dma, options.default_priority, options.default_max_priority,
            options.default_affinity, options.default_period, options.default_budget,
            options.default_data, options.default_size_bits,
            options.debug_fault_handlers, options.fprovide_tcb_caps)
        for f in CAPDL_FILTERS:
            try:
                # Pass everything as named arguments to allow filters to
                # easily ignore what they don't want.
                f(ast=ast, obj_space=obj_space, cspaces=cspaces, elfs=elfs,
                    options=filteroptions, shmem=shmem, fill_frames=fill_frames)
            except Exception as inst:
                die('While forming CapDL spec: %s' % inst)

    renderoptions = RenderOptions(options.file, options.verbosity, options.frpc_lock_elision,
        options.fspecialise_syscall_stubs, options.fprovide_tcb_caps, options.fsupport_init,
        options.largeframe, options.largeframe_dma, options.architecture, options.debug_fault_handlers,
        options.realtime)

    def instantiate_misc_template():
        for (item, outfile) in (all_items - done_items):
            try:
                template = templates.lookup(item)
                if template:
                    g = r.render(assembly, assembly, template, obj_space, None,
                        shmem, kept_symbols, fill_frames, imported=read, options=renderoptions)
                    save(item, g)
                    done(g, outfile, item)
            except TemplateError as inst:
                die(rendering_error(item, inst))

    if options.item[0] in ('capdl', 'label-mapping') and options.data_structure_cache_dir is not None \
            and len(options.outfile) == 1:
        # It's possible that data structures required to instantiate the capdl spec
        # were saved during a previous invocation of this script in the current build.
        cache_path = os.path.realpath(options.data_structure_cache_dir)
        pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE)

        if os.path.isfile(pickle_path):
            with open(pickle_path, 'rb') as pickle_file:
                # Found a cached version of the necessary data structures
                obj_space, shmem, cspaces, pds, kept_symbols, fill_frames = pickle.load(pickle_file)
                apply_capdl_filters()
                instantiate_misc_template()

                # If a template wasn't instantiated, something went wrong, and we can't recover
                raise CAmkESError("No template instantiated on capdl generation fastpath")

    # We're now ready to instantiate the template the user requested, but there
    # are a few wrinkles in the process. Namely,
    #  1. Template instantiation needs to be done in a deterministic order. The
    #     runner is invoked multiple times and template code needs to be
    #     allocated identical cap slots in each run.
    #  2. Components and connections need to be instantiated before any other
    #     templates, regardless of whether they are the ones we are after. Some
    #     other templates, such as the Makefile depend on the obj_space and
    #     cspaces.
    #  3. All actual code templates, up to the template that was requested,
    #     need to be instantiated. This is related to (1) in that the cap slots
    #     allocated are dependent on what allocations have been done prior to a
    #     given allocation call.

    # Instantiate the per-component source and header files.
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue

        if i.address_space not in cspaces:
            p = Perspective(phase=RUNNER, instance=i.name,
                group=i.address_space)
            cnode = obj_space.alloc(seL4_CapTableObject,
                name=p['cnode'], label=i.address_space)
            cspaces[i.address_space] = CSpaceAllocator(cnode)
            pd = obj_space.alloc(lookup_architecture(options.architecture).vspace().object, name=p['pd'],
                label=i.address_space)
            pds[i.address_space] = pd

        for t in ('%s/source' % i.name, '%s/header' % i.name,
                '%s/c_environment_source' % i.name,
                '%s/cakeml_start_source' % i.name, '%s/cakeml_end_source' % i.name,
                '%s/linker' % i.name):
            try:
                template = templates.lookup(t, i)
                g = ''
                if template:
                    g = r.render(i, assembly, template, obj_space, cspaces[i.address_space],
                        shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[i.address_space])
                save(t, g)
                for (item, outfile) in (all_items - done_items):
                    if item == t:
                        if not template:
                            log.warning('Warning: no template for %s' % item)
                        done(g, outfile, item)
                        break
            except TemplateError as inst:
                die(rendering_error(i.name, inst))

    # Instantiate the per-connection files.
    for c in assembly.composition.connections:

        for t in (('%s/from/source' % c.name, c.from_ends),
                  ('%s/from/header' % c.name, c.from_ends),
                  ('%s/to/source' % c.name, c.to_ends),
                  ('%s/to/header' % c.name, c.to_ends)):

            template = templates.lookup(t[0], c)

            if template is not None:
                for id, e in enumerate(t[1]):
                    item = '%s/%d' % (t[0], id)
                    g = ''
                    try:
                        g = r.render(e, assembly, template, obj_space,
                            cspaces[e.instance.address_space], shmem, kept_symbols, fill_frames,
                            options=renderoptions, my_pd=pds[e.instance.address_space])
                    except TemplateError as inst:
                        die(rendering_error(item, inst))
                    except jinja2.exceptions.TemplateNotFound:
                        die('While rendering %s: missing template for %s' %
                            (item, c.type.name))
                    save(item, g)
                    for (target, outfile) in (all_items - done_items):
                        if target == item:
                            if not template:
                                log.warning('Warning: no template for %s' % item)
                            done(g, outfile, item)
                            break

        # The following block handles instantiations of per-connection
        # templates that are neither a 'source' or a 'header', as handled
        # above. We assume that none of these need instantiation unless we are
        # actually currently looking for them (== options.item). That is, we
        # assume that following templates, like the CapDL spec, do not require
        # these templates to be rendered prior to themselves.
        # FIXME: This is a pretty ugly way of handling this. It would be nicer
        # for the runner to have a more general notion of per-'thing' templates
        # where the per-component templates, the per-connection template loop
        # above, and this loop could all be done in a single unified control
        # flow.
        for (item, outfile) in (all_items - done_items):
            for t in (('%s/from/' % c.name, c.from_ends),
                    ('%s/to/' % c.name, c.to_ends)):

                if not item.startswith(t[0]):
                    # This is not the item we're looking for.
                    continue

                # If we've reached here then this is the exact item we're after.
                template = templates.lookup(item, c)
                if template is None:
                    die('no registered template for %s' % item)

                for e in t[1]:
                    try:
                        g = r.render(e, assembly, template, obj_space,
                            cspaces[e.instance.address_space], shmem, kept_symbols, fill_frames,
                            options=renderoptions, my_pd=pds[e.instance.address_space])
                        save(item, g)
                        done(g, outfile, item)
                    except TemplateError as inst:
                        die(rendering_error(item, inst))

    # Perform any per component special generation. This needs to happen last
    # as these template needs to run after all other capabilities have been
    # allocated
    for i in assembly.composition.instances:
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue
        assert i.address_space in cspaces
        SPECIAL_TEMPLATES = [('debug', 'debug'), ('simple', 'simple'), ('rump_config', 'rumprun')]
        for special in [bl for bl in SPECIAL_TEMPLATES if conf[i.name].get(bl[0])]:
            for t in ('%s/%s' % (i.name, special[1]),):
                try:
                    template = templates.lookup(t, i)
                    g = ''
                    if template:
                        g = r.render(i, assembly, template, obj_space, cspaces[i.address_space],
                            shmem, kept_symbols, fill_frames, options=renderoptions, my_pd=pds[i.address_space])
                    save(t, g)
                    for (item, outfile) in (all_items - done_items):
                        if item == t:
                            if not template:
                                log.warning('Warning: no template for %s' % item)
                            done(g, outfile, item)
                except TemplateError as inst:
                    die(rendering_error(i.name, inst))

    if options.data_structure_cache_dir is not None:
        # At this point the capdl database is in the state required for applying capdl
        # filters and generating the capdl spec. In case the capdl spec isn't the current
        # target, we pickle the database here, so when the capdl spec is built, these
        # data structures don't need to be regenerated.
        cache_path = os.path.realpath(options.data_structure_cache_dir)
        pickle_path = os.path.join(cache_path, CAPDL_STATE_PICKLE)
        with open(pickle_path, 'wb') as pickle_file:
            pickle.dump((obj_space, shmem, cspaces, pds, kept_symbols, fill_frames), pickle_file)

    for (item, outfile) in (all_items - done_items):
        if item in ('capdl', 'label-mapping'):
            apply_capdl_filters()

    # Instantiate any other, miscellaneous template. If we've reached this
    # point, we know the user did not request a code template.
    instantiate_misc_template()

    # Check if there are any remaining items
    not_done = all_items - done_items
    if len(not_done) > 0:
        for (item, outfile) in not_done:
            err.write('No valid element matching --item %s.\n' % item)
        return -1
    return 0