Ejemplo n.º 1
0
def tcb_priorities(ast, cspaces, **_):
    ''' Override a TCB's default priority if the user has specified this in an
    attribute.'''

    assembly = find_assembly(ast)

    if assembly.configuration is None or \
            len(assembly.configuration.settings) == 0:
        # We have nothing to do if no priorities were set.
        return

    for group, space in cspaces.items():
        cnode = space.cnode
        for tcb in [v.referent for v in cnode.slots.values() \
                if v is not None and isinstance(v.referent, TCB)]:

            perspective = Perspective(group=group, tcb=tcb.name)

            # Find the priority if it was set.
            prio_attribute = perspective['priority_attribute']
            name = perspective['instance']
            prio = assembly.configuration[name].get(prio_attribute)
            if prio is not None:
                tcb.prio = prio
            else:
                # See if the user assigned a general priority to this component.
                prio = assembly.configuration[name].get('priority')
                if prio is not None:
                    tcb.prio = prio
Ejemplo n.º 2
0
def sc_properties(ast, obj_space, cspaces, elfs, options, shmem):
    ''' Override an SC's default properties if the user has specified this in an
    attribute.'''

    assembly = find_assembly(ast)

    if assembly.configuration is None or \
            len(assembly.configuration.settings) == 0:
        # We have nothing to do if no properties were set.
        return

    settings = assembly.configuration.settings

    for group, space in cspaces.items():
        cnode = space.cnode
        for cap in cnode.slots.values():

            if cap is None:
                continue
            sc = cap.referent
            if not isinstance(sc, SC):
                continue

            perspective = Perspective(group=group, sc=sc.name)

            # Find the period if it was set.
            period_attribute = perspective['period_attribute']
            name = perspective['instance']
            period = assembly.configuration[name].get(period_attribute)
            if period is not None:
                sc.period = period

            # Find the budget if it was set.
            budget_attribute = perspective['budget_attribute']
            name = perspective['instance']
            budget = assembly.configuration[name].get(budget_attribute)
            if budget is not None:
                sc.budget = budget

            # Find the data if it was set.
            data_attribute = perspective['data_attribute']
            name = perspective['instance']
            data = assembly.configuration[name].get(data_attribute)
            if data is not None:
                sc.data = data
Ejemplo n.º 3
0
def tcb_domains(ast, cspaces, **_):
    '''Set the domain of a TCB if the user has specified this in an
    attribute.'''

    assembly = find_assembly(ast)

    if assembly.configuration is None or \
            len(assembly.configuration.settings) == 0:
        # We have nothing to do if no domains were set.
        return

    for group, space in cspaces.items():
        cnode = space.cnode
        for tcb in [x.referent for x in cnode.slots.values() if \
                (x is not None and isinstance(x.referent, TCB))]:

            perspective = Perspective(group=group, tcb=tcb.name)

            # Find the domain if it was set.
            dom_attribute = perspective['domain_attribute']
            name = perspective['instance']
            dom = assembly.configuration[name].get(dom_attribute)
            if dom is not None:
                tcb.domain = dom
Ejemplo n.º 4
0
def set_tcb_info(cspaces, elfs, **_):
    '''Set relevant extra info for TCB objects.'''

    for group, space in cspaces.items():
        cnode = space.cnode
        for index, tcb in [(k, v.referent) for (k, v) in cnode.slots.items() \
                if v is not None and isinstance(v.referent, TCB)]:

            perspective = Perspective(group=group, tcb=tcb.name)

            elf_name = perspective['elf_name']

            elf = elfs.get(elf_name)

            if elf is None:
                # We were not passed an ELF file for this CSpace. This will be
                # true in the first pass of the runner where no ELFs are passed.
                continue

            tcb.elf = elf_name
            tcb.ip = get_entry_point(elf)

            if perspective['pool']:
                # This TCB is part of the (cap allocator's) TCB pool.
                continue

            stack_symbol = perspective['stack_symbol']
            ipc_buffer_symbol = perspective['ipc_buffer_symbol']

            # The stack should be at least three pages and the IPC buffer
            # region should be exactly three pages. Note that these regions
            # both include a guard page either side of the used area. It is
            # assumed that the stack grows downwards.
            stack_size = get_symbol_size(elf, stack_symbol)
            assert stack_size is not None, 'Stack for %(name)s, ' \
                '\'%(symbol)s\', not found' % {
                    'name':tcb.name,
                    'symbol':stack_symbol,
                }
            assert stack_size >= PAGE_SIZE * 3, 'Stack for %(name)s, ' \
                '\'%(symbol)s\', is only %(size)d bytes and does not have ' \
                'room for guard pages' % {
                    'name':tcb.name,
                    'symbol':stack_symbol,
                    'size':stack_size,
                }
            assert get_symbol_size(elf, ipc_buffer_symbol) == PAGE_SIZE * 3

            # Move the stack pointer to the top of the stack. Extra page is
            # to account for the (upper) guard page.
            assert stack_size % PAGE_SIZE == 0
            tcb.sp = get_symbol_vaddr(elf, stack_symbol) + stack_size - PAGE_SIZE
            tcb.addr = get_symbol_vaddr(elf, ipc_buffer_symbol) + \
                2 * PAGE_SIZE - IPC_BUFFER_SIZE

            # Each TCB needs to be passed its 'thread_id' that is the value
            # it branches on in main(). This corresponds to the slot index
            # to a cap to it in the component's CNode.
            tcb.init.append(index)

            # The group's entry point expects to be passed a function pointer
            # as the second argument that is the instance's entry point.
            component_entry = perspective['entry_symbol']
            vaddr = get_symbol_vaddr(elf, component_entry)
            if vaddr is None:
                raise Exception('Entry point, %s, of %s not found' %
                    (component_entry, tcb.name))
            tcb.init.append(vaddr)

            # The group's entry point expects to be passed a function pointer
            # as the third argument that is a function that will perform
            # early tls setup
            tls_setup = perspective['tls_symbol']
            vaddr = get_symbol_vaddr(elf, tls_setup)
            if vaddr is None:
                raise Exception('TLS symbol, %s, of %s not found' % (tls_setup, tcb.name))
            tcb.init.append(vaddr)
Ejemplo n.º 5
0
def guard_pages(obj_space, cspaces, elfs, options, **_):
    '''Introduce a guard page around each stack and IPC buffer. Note that the
    templates should have ensured a three page region for each stack in order to
    enable this.'''

    for group, space in cspaces.items():
        cnode = space.cnode
        for index, tcb in [(k, v.referent) for (k, v) in cnode.slots.items() \
                if v is not None and isinstance(v.referent, TCB)]:

            perspective = Perspective(group=group, tcb=tcb.name)

            if perspective['pool']:
                # This TCB is part of the (cap allocator's) TCB pool.
                continue

            elf_name = perspective['elf_name']

            # Find the page directory.
            pd = None
            pd_name = perspective['pd']
            pds = [x for x in obj_space.spec.objs if x.name == pd_name]
            if len(pds) > 1:
                raise Exception('Multiple PDs found for group %s' % group)
            elif len(pds) == 1:
                pd, = pds
                tcb['vspace'] = Cap(pd)
            # If no PD was found we were probably just not passed any ELF files
            # in this pass.

            elf = elfs.get(elf_name)

            if pd and elf:

                ipc_symbol = perspective['ipc_buffer_symbol']

                # Find the IPC buffer's preceding guard page's virtual address.
                assert get_symbol_size(elf, ipc_symbol) == PAGE_SIZE * 3
                pre_guard = get_symbol_vaddr(elf, ipc_symbol)

                # Relate this virtual address to a PT.
                pt_index = page_table_index(get_elf_arch(elf), pre_guard,
                    options.hyp)
                if pt_index not in pd:
                    raise Exception('IPC buffer region of TCB %s in ' \
                        'group %s does not appear to be backed by a frame' \
                        % (tcb.name, group))
                pt = pd[pt_index].referent

                # Continue on to infer the page.
                p_index = page_index(get_elf_arch(elf), pre_guard, options.hyp)
                if p_index not in pt:
                    raise Exception('IPC buffer region of TCB %s in ' \
                        'group %s does not appear to be backed by a frame' \
                        % (tcb.name, group))

                # Delete the page.
                frame = pt[p_index].referent
                del pt[p_index]
                obj_space.remove(frame)

                # Now do the same for the following guard page. We do this
                # calculation separately just in case the region crosses a PT
                # boundary and the two guard pages are in separate PTs.

                post_guard = pre_guard + 2 * PAGE_SIZE

                pt_index = page_table_index(get_elf_arch(elf), post_guard,
                    options.hyp)
                if pt_index not in pd:
                    raise Exception('IPC buffer region of TCB %s in ' \
                        'group %s does not appear to be backed by a frame' \
                        % (tcb.name, group))
                pt = pd[pt_index].referent

                p_index = page_index(get_elf_arch(elf), post_guard, options.hyp)
                if p_index not in pt:
                    raise Exception('IPC buffer region of TCB %s in ' \
                        'group %s does not appear to be backed by a frame' \
                        % (tcb.name, group))

                frame = pt[p_index].referent
                del pt[p_index]
                obj_space.remove(frame)

                # Now we do the same thing for the preceding guard page of the
                # thread's stack...

                stack_symbol = perspective['stack_symbol']

                pre_guard = get_symbol_vaddr(elf, stack_symbol)

                pt_index = page_table_index(get_elf_arch(elf), pre_guard,
                    options.hyp)
                if pt_index not in pd:
                    raise Exception('stack region of TCB %s in ' \
                        'group %s does not appear to be backed by a frame' \
                        % (tcb.name, group))
                pt = pd[pt_index].referent

                p_index = page_index(get_elf_arch(elf), pre_guard, options.hyp)
                if p_index not in pt:
                    raise Exception('stack region of TCB %s in ' \
                        'group %s does not appear to be backed by a frame' \
                        % (tcb.name, group))

                frame = pt[p_index].referent
                del pt[p_index]
                obj_space.remove(frame)

                # ...and the following guard page.

                stack_region_size = get_symbol_size(elf, stack_symbol)
                assert stack_region_size % PAGE_SIZE == 0, \
                    'stack region is not page-aligned'
                assert stack_region_size >= 3 * PAGE_SIZE, \
                    'stack region has no room for guard pages'
                post_guard = pre_guard + stack_region_size - PAGE_SIZE

                pt_index = page_table_index(get_elf_arch(elf), post_guard,
                    options.hyp)
                if pt_index not in pd:
                    raise Exception('stack region of TCB %s in ' \
                        'group %s does not appear to be backed by a frame' \
                        % (tcb.name, group))
                pt = pd[pt_index].referent

                p_index = page_index(get_elf_arch(elf), post_guard, options.hyp)
                if p_index not in pt:
                    raise Exception('stack region of TCB %s in ' \
                        'group %s does not appear to be backed by a frame' \
                        % (tcb.name, group))

                frame = pt[p_index].referent
                del pt[p_index]
                obj_space.remove(frame)
Ejemplo n.º 6
0
def replace_dma_frames(ast, obj_space, elfs, options, **_):
    '''Locate the DMA pool (a region that needs to have frames whose mappings
    can be reversed) and replace its backing frames with pre-allocated,
    reversible ones.'''

    # TODO: Large parts of this function clagged from collapse_shared_frames; Refactor.

    if not elfs:
        # If we haven't been passed any ELF files this step is not relevant yet.
        return

    assembly = find_assembly(ast)

    for i in (x for x in assembly.composition.instances
            if not x.type.hardware):

        perspective = Perspective(instance=i.name, group=i.address_space)

        elf_name = perspective['elf_name']
        assert elf_name in elfs
        elf = elfs[elf_name]

        # Find this instance's page directory.
        pd_name = perspective['pd']
        pds = filter(lambda x: x.name == pd_name, obj_space.spec.objs)
        assert len(pds) == 1
        pd, = pds

        sym = perspective['dma_pool_symbol']
        base = get_symbol_vaddr(elf, sym)
        if base is None:
            # We don't have a DMA pool.
            continue
        assert base != 0
        sz = get_symbol_size(elf, sym)
        assert sz % PAGE_SIZE == 0 # DMA pool should be page-aligned.

        # Generate a list of the base addresses of the pages we need to
        # replace.
        base_vaddrs = [PAGE_SIZE * x + base for x in
            range(int(sz / PAGE_SIZE))]

        for index, v in enumerate(base_vaddrs):
            # Locate the mapping.
            pt_index = page_table_index(get_elf_arch(elf), v, options.hyp)
            p_index = page_index(get_elf_arch(elf), v, options.hyp)

            # It should contain an existing frame.
            assert pt_index in pd
            pt = pd[pt_index].referent
            assert p_index in pt
            discard_frame = pt[p_index].referent

            # Locate the frame we're going to replace it with. The logic that
            # constructs this object name is in component.template.c. Note that
            # we need to account for the guard-prefix of the instance name
            # introduced by the template context.
            p = Perspective(instance=i.name, group=i.address_space,
                dma_frame_index=index)
            dma_frames = [x for x in obj_space.spec.objs if
                x.name == p['dma_frame_symbol']]
            assert len(dma_frames) == 1
            dma_frame, = dma_frames

            # Replace the existing mapping.
            c = Cap(dma_frame, True, True, False) # RW
            c.set_cached(False)
            pt.slots[p_index] = c

            # We can now remove the old frame as we know it's not referenced
            # anywhere else. TODO: assert this somehow.
            obj_space.remove(discard_frame)
Ejemplo n.º 7
0
def collapse_shared_frames(ast, obj_space, elfs, options, **_):
    """Find regions in virtual address spaces that are intended to be backed by
    shared frames and adjust the capability distribution to reflect this."""

    if not elfs:
        # If we haven't been passed any ELF files this step is not relevant yet.
        return

    assembly = find_assembly(ast)

    # We want to track the frame objects backing shared regions with a dict
    # keyed on the name of the connection linking the regions.
    shared_frames = {}

    for i in (x for x in assembly.composition.instances
            if not x.type.hardware):

        perspective = Perspective(instance=i.name, group=i.address_space)

        elf_name = perspective['elf_name']
        assert elf_name in elfs
        elf = elfs[elf_name]

        # Find this instance's page directory.
        pd_name = perspective['pd']
        pds = [x for x in obj_space.spec.objs if x.name == pd_name]
        assert len(pds) == 1
        pd, = pds

        large_frame_uid = 0

        for d in i.type.dataports:

            # Find the connection that associates this dataport with another.
            connections = [x for x in assembly.composition.connections if \
                ((x.from_instance == i and x.from_interface == d) or \
                (x.to_instance == i and x.to_interface == d))]
            if len(connections) == 0:
                # This dataport is unconnected.
                continue
            #assert len(connections) == 1
            conn_name = connections[0].name

            if connections[0].from_instance == i and \
                    connections[0].from_interface == d:
                direction = 'from'
            else:
                assert connections[0].to_instance == i
                assert connections[0].to_interface == d
                direction = 'to'

            # Reverse the logic in the Makefile template.
            p = Perspective(instance=i.name, dataport=d.name)
            sym = p['dataport_symbol']

            vaddr = get_symbol_vaddr(elf, sym)
            assert vaddr is not None, 'failed to find dataport symbol \'%s\'' \
                ' in ELF %s' % (sym, elf_name)
            assert vaddr != 0
            assert vaddr % PAGE_SIZE == 0, 'dataport not page-aligned'
            sz = get_symbol_size(elf, sym)
            assert sz != 0

            arch = get_elf_arch(elf)

            # Infer the page table(s) and page(s) that back this region.
            pts, p_indices = zip(*[\
                (pd[page_table_index(arch, v, options.hyp)].referent,
                 page_index(arch, v, options.hyp)) \
                for v in xrange(vaddr, vaddr + sz, PAGE_SIZE)])

            # Determine the rights this mapping should have. We use these to
            # recreate the mapping below. Technically we may not need to
            # recreate this mapping if it's already correct, but do it anyway
            # for simplicity.
            # FIXME: stop hard coding this name mangling.
            rights_setting = assembly.configuration[conn_name].get('%s_access' % direction)
            if rights_setting is not None and \
                    re.match(r'^"R?W?(G|X)?"$', rights_setting):
                read = 'R' in rights_setting
                write = 'W' in rights_setting
                execute = 'X' in rights_setting or 'G' in rights_setting
            else:
                # default
                read = True
                write = True
                execute = False

            # Check if the dataport is connected *TO* a hardware component.
            if connections[0].to_instance.type.hardware:
                p = Perspective(to_interface=connections[0].to_interface.name)
                hardware_attribute = p['hardware_attribute']
                conf = assembly.configuration[connections[0].to_instance.name].get(hardware_attribute)
                assert conf is not None
                paddr, size = conf.strip('"').split(':')
                # Round up the MMIO size to PAGE_SIZE
                paddr = int(paddr, 0)
                size = int(size, 0)

                instance_name = connections[0].to_instance.name

                if size == 0:
                    raise Exception('Hardware dataport %s.%s has zero size!' % (instance_name,
                        connections[0].to_interface.name))

                # determine the size of a large frame, and the type of kernel
                # object that will be used, both of which depend on the architecture
                if get_elf_arch(elf) == 'ARM':
                    large_size = 1024 * 1024
                    large_object_type = seL4_ARM_SectionObject
                else:
                    large_size = 4 * 1024 * 1024
                    large_object_type = seL4_IA32_4M

                # Check if MMIO start and end is aligned to page table coverage.
                # This will indicate that we should use pagetable-sized pages
                # to back the device region to be consistent with the kernel.
                if paddr % large_size == 0 and size % large_size == 0:

                    # number of page tables backing device memory
                    n_pts = size / large_size

                    # index of first page table in page directory backing the device memory
                    base_pt_index = page_table_index(get_elf_arch(elf), vaddr)
                    pt_indices = xrange(base_pt_index, base_pt_index + n_pts)

                    # loop over all the page table indices and replace the page tables
                    # with large frames
                    for count, pt_index in enumerate(pt_indices):

                        # look up the page table at the current index
                        pt = pd[pt_index].referent

                        name = 'large_frame_%s_%d' % (instance_name, large_frame_uid)
                        large_frame_uid += 1

                        frame_paddr = paddr + large_size * count

                        # allocate a new large frame
                        frame = obj_space.alloc(large_object_type, name, paddr=frame_paddr)

                        # insert the frame cap into the page directory
                        frame_cap = Cap(frame, read, write, execute)
                        frame_cap.set_cached(False)
                        pd[pt_index] = frame_cap

                        # remove all the small frames from the spec
                        for p_index in pt:
                            small_frame = pt[p_index].referent
                            obj_space.remove(small_frame)

                        # remove the page table from the spec
                        obj_space.remove(pt)

                else:
                    # If the MMIO start and end are not aligned to page table coverage,
                    # loop over all the frames and set their paddrs based on the
                    # paddr in the spec.
                    for idx in xrange(0, (size + PAGE_SIZE - 1) / PAGE_SIZE):
                        try:
                            frame_obj = pts[idx][p_indices[idx]].referent
                        except IndexError:
                            raise Exception('MMIO attributes specify device ' \
                                'memory that is larger than the dataport it is ' \
                                'associated with')
                        frame_obj.paddr = paddr + PAGE_SIZE * idx
                        cap = Cap(frame_obj, read, write, execute)
                        cap.set_cached(False)
                        pts[idx].slots[p_indices[idx]] = cap
                        obj_space.relabel(conn_name, frame_obj)

                continue

            shm_keys = []
            for c in connections:
                shm_keys.append('%s_%s' % (c.from_instance.name, c.from_interface.name))
                shm_keys.append('%s_%s' % (c.to_instance.name, c.to_interface.name))

            mapped = [x for x in shm_keys if x in shared_frames]
            if mapped:
                # We've already encountered the other side of this dataport.

                # The region had better be the same size in all address spaces.
                for key in mapped:
                    assert len(shared_frames[key]) == sz / PAGE_SIZE

            # This is the first side of this dataport.

            # Save all the frames backing this region.
            for key in shm_keys:
                if mapped:
                    shared_frames[key] = shared_frames[mapped[0]]
                else:
                    shared_frames[key] = [pt[p_index].referent \
                        for (pt, p_index) in zip(pts, p_indices)]

            # Overwrite the caps backing this region with caps to the shared
            # frames.
            for j, f in enumerate(shared_frames[shm_keys[0]]):
                existing = pts[j].slots[p_indices[j]].referent
                if existing != f:
                    # We're actually modifying this mapping. Delete the
                    # unneeded frame.
                    obj_space.remove(existing)
                pts[j].slots[p_indices[j]] = Cap(f, read, write, execute)
                obj_space.relabel(conn_name, f)
Ejemplo n.º 8
0
def set_tcb_caps(ast, obj_space, cspaces, elfs, options, **_):
    assembly = find_assembly(ast)

    for group, space in cspaces.items():
        cnode = space.cnode
        for index, tcb in [(k, v.referent) for (k, v) in cnode.slots.items() \
                if v is not None and isinstance(v.referent, TCB)]:

            perspective = Perspective(tcb=tcb.name, group=group)

            # Finalise the CNode so that we know what its absolute size will
            # be. Note that we are assuming no further caps will be added to
            # the CNode after this point.
            cnode.finalise_size()

            # Allow the user to override CNode sizes with the 'cnode_size_bits'
            # attribute.
            cnode_size = assembly.configuration[group].get('cnode_size_bits')
            if cnode_size is not None:
                try:
                    if isinstance(cnode_size, str):
                        size = int(cnode_size, 0)
                    else:
                        size = cnode_size
                except ValueError:
                    raise Exception('illegal value for CNode size for %s' % \
                        group)
                if size < cnode.size_bits:
                    raise Exception('%d-bit CNode specified for %s, but this ' \
                        'CSpace needs to be at least %d bits' % \
                        (size, group, cnode.size_bits))
                cnode.size_bits = size

            cspace = Cap(cnode)
            cspace.set_guard_size(32 - cnode.size_bits)
            tcb['cspace'] = cspace

            elf_name = perspective['elf_name']

            pd = None
            pd_name = perspective['pd']
            pds = [x for x in obj_space.spec.objs if x.name == pd_name]
            if len(pds) > 1:
                raise Exception('Multiple PDs found for %s' % group)
            elif len(pds) == 1:
                pd, = pds
                tcb['vspace'] = Cap(pd)
            # If no PD was found we were probably just not passed any ELF files
            # in this pass.

            if perspective['pool']:
                # This TCB is part of the (cap allocator's) TCB pool.
                continue

            elf = elfs.get(elf_name)

            if pd and elf:

                ipc_symbol = perspective['ipc_buffer_symbol']

                # Find the IPC buffer's virtual address.
                assert get_symbol_size(elf, ipc_symbol) == PAGE_SIZE * 3
                ipc_vaddr = get_symbol_vaddr(elf, ipc_symbol) + PAGE_SIZE

                # Relate this virtual address to a PT.
                pt_index = page_table_index(get_elf_arch(elf), ipc_vaddr,
                    options.hyp)
                if pt_index not in pd:
                    raise Exception('IPC buffer of TCB %s in group %s does ' \
                        'not appear to be backed by a frame' % (tcb.name, group))
                pt = pd[pt_index].referent

                # Continue on to infer the physical frame.
                p_index = page_index(get_elf_arch(elf), ipc_vaddr, options.hyp)
                if p_index not in pt:
                    raise Exception('IPC buffer of TCB %s in group %s does ' \
                        'not appear to be backed by a frame' % (tcb.name, group))
                frame = pt[p_index].referent

                tcb['ipc_buffer_slot'] = Cap(frame, True, True, False) # RW
Ejemplo n.º 9
0
def collapse_shared_frames(ast, obj_space, cspaces, elfs, options, **_):
    """Find regions in virtual address spaces that are intended to be backed by
    shared frames and adjust the capability distribution to reflect this."""

    if not elfs:
        # If we haven't been passed any ELF files this step is not relevant yet.
        return

    assembly = find_assembly(ast)

    # We want to track the frame objects backing shared regions with a dict
    # keyed on the name of the connection linking the regions.
    shared_frames = {}

    for i in (x for x in assembly.composition.instances
            if not x.type.hardware):

        perspective = Perspective(instance=i.name, group=i.address_space)

        elf_name = perspective['elf_name']
        assert elf_name in elfs
        elf = elfs[elf_name]

        # Find this instance's page directory.
        pd_name = perspective['pd']
        pds = [x for x in obj_space.spec.objs if x.name == pd_name]
        assert len(pds) == 1
        pd, = pds

        for d in i.type.dataports:

            # Find the connection that associates this dataport with another.
            connections = [x for x in assembly.composition.connections if \
                ((x.from_instance == i and x.from_interface == d) or \
                (x.to_instance == i and x.to_interface == d))]
            if len(connections) == 0:
                # This dataport is unconnected.
                continue
            #assert len(connections) == 1
            conn_name = connections[0].name

            if connections[0].from_instance == i and \
                    connections[0].from_interface == d:
                direction = 'from'
            else:
                assert connections[0].to_instance == i
                assert connections[0].to_interface == d
                direction = 'to'

            # Reverse the logic in the Makefile template.
            p = Perspective(instance=i.name, dataport=d.name)
            sym = p['dataport_symbol']

            vaddr = get_symbol_vaddr(elf, sym)
            assert vaddr is not None, 'failed to find dataport symbol \'%s\'' \
                ' in ELF %s' % (sym, elf_name)
            assert vaddr != 0
            assert vaddr % PAGE_SIZE == 0, 'dataport %s not page-aligned' % sym
            sz = get_symbol_size(elf, sym)
            assert sz != 0

            # Infer the page table(s) and page(s) that back this region.
            pts, p_indices = zip(*[\
                (pd[page_table_index(options.architecture, v)].referent,
                 page_index(options.architecture, v)) \
                for v in xrange(vaddr, vaddr + sz, PAGE_SIZE)])

            # Determine the rights this mapping should have. We use these to
            # recreate the mapping below. Technically we may not need to
            # recreate this mapping if it's already correct, but do it anyway
            # for simplicity.
            # FIXME: stop hard coding this name mangling.
            rights_setting = assembly.configuration[conn_name].get('%s_access' % direction)
            if rights_setting is not None and \
                    re.match(r'^"R?W?(G|X)?"$', rights_setting):
                read = 'R' in rights_setting
                write = 'W' in rights_setting
                execute = 'X' in rights_setting or 'G' in rights_setting
            else:
                # default
                read = True
                write = True
                execute = False

            # Check if the dataport is connected *TO* a hardware component.
            if connections[0].to_instance.type.hardware:
                p = Perspective(to_interface=connections[0].to_interface.name)
                hardware_attribute = p['hardware_attribute']
                conf = assembly.configuration[connections[0].to_instance.name].get(hardware_attribute)
                assert conf is not None, "%s.%s not found in configuration" % \
                    (connections[0].to_instance.name, hardware_attribute)
                paddr, size = conf.strip('"').split(':')
                # Round up the MMIO size to PAGE_SIZE
                try:
                    paddr = int(paddr, 0)
                except ValueError:
                    raise Exception("Invalid physical address specified for %s.%s: %s\n" %
                                    (me.to_instance.name, me.to_interface.name, paddr))

                try:
                    size = int(size, 0)
                except ValueError:
                    raise Exception("Invalid size specified for %s.%s: %s\n" %
                                    (me.to_instance.name, me.to_interface.name, size))

                hardware_cached = p['hardware_cached']
                cached = assembly.configuration[connections[0].to_instance.name].get(hardware_cached)
                if cached is None:
                    cached = False
                elif cached.lower() == 'true':
                    cached = True
                elif cached.lower() == 'false':
                    cached = False
                else:
                    raise Exception("Value of %s.%s_cached must be either 'true' or 'false'. Got '%s'." %
                                    (me.to_instance.name, me.to_interface.name, cached))

                instance_name = connections[0].to_instance.name

                if size == 0:
                    raise Exception('Hardware dataport %s.%s has zero size!' % (instance_name,
                        connections[0].to_interface.name))

                # determine the size of a large frame, and the type of kernel
                # object that will be used, both of which depend on the architecture
                if get_elf_arch(elf) == 'ARM':
                    large_size = 1024 * 1024
                    large_object_type = seL4_ARM_SectionObject
                else:
                    large_size = 4 * 1024 * 1024
                    large_object_type = seL4_IA32_4M

                # Check if MMIO start and end is aligned to page table coverage.
                # This will indicate that we should use pagetable-sized pages
                # to back the device region to be consistent with the kernel.
                if paddr % large_size == 0 and size % large_size == 0:

                    # number of page tables backing device memory
                    n_pts = size / large_size

                    # index of first page table in page directory backing the device memory
                    base_pt_index = page_table_index(options.architecture, vaddr)
                    pt_indices = xrange(base_pt_index, base_pt_index + n_pts)

                    # loop over all the page table indices and replace the page tables
                    # with large frames
                    for count, pt_index in enumerate(pt_indices):

                        # look up the page table at the current index
                        pt = pd[pt_index].referent

                        offset = count * large_size
                        frame_paddr = paddr + offset

                        # lookup the frame, already allocated by a template
                        frame_cap = find_hardware_frame_in_cspace(
                                        cspaces[i.address_space],
                                        frame_paddr,
                                        connections[0].to_instance.name,
                                        connections[0].to_interface.name)
                        frame_obj = frame_cap.referent

                        # create a new cap for the frame to use for its mapping
                        mapping_frame_cap = Cap(frame_obj, read, write, execute)
                        mapping_frame_cap.set_cached(cached)

                        # add the mapping to the spec
                        pd[pt_index] = mapping_frame_cap

                        # add the mapping information to the original cap
                        frame_cap.set_mapping(pd, pt_index)

                        # remove all the small frames from the spec
                        for p_index in pt:
                            small_frame = pt[p_index].referent
                            obj_space.remove(small_frame)

                        # remove the page table from the spec
                        obj_space.remove(pt)

                else:
                    # If the MMIO start and end are not aligned to page table coverage,
                    # loop over all the frames and set their paddrs based on the
                    # paddr in the spec.
                    for idx in xrange(0, (size + PAGE_SIZE - 1) / PAGE_SIZE):
                        try:
                            frame_obj = pts[idx][p_indices[idx]].referent
                        except IndexError:
                            raise Exception('MMIO attributes specify device ' \
                                'memory that is larger than the dataport it is ' \
                                'associated with')

                        offset = idx * PAGE_SIZE
                        frame_paddr = paddr + offset

                        # lookup the frame, already allocated by a template
                        frame_cap = find_hardware_frame_in_cspace(
                                        cspaces[i.address_space],
                                        frame_paddr,
                                        connections[0].to_instance.name,
                                        connections[0].to_interface.name)
                        frame_obj = frame_cap.referent

                        # create a new cap for the frame to use for its mapping
                        mapping_frame_cap = Cap(frame_obj, read, write, execute)
                        mapping_frame_cap.set_cached(cached)

                        # add the mapping to the spec
                        pt = pts[idx]
                        slot = p_indices[idx]
                        pt.slots[slot] = mapping_frame_cap

                        # add the mapping information to the original cap
                        frame_cap.set_mapping(pt, slot)

                        obj_space.relabel(conn_name, frame_obj)

                continue

            # If any objects still have names indicating they are part of a hardware
            # dataport, it means that dataport hasn't been given a paddr or size.
            # This indicates an error, and the object name is invalid in capdl,
            # so catch the error here rather than having the capdl translator fail.

            for cap in (v for v in cspaces[i.address_space].cnode.slots.values() if v is not None):

                obj = cap.referent

                match = HARDWARE_FRAME_NAME_PATTERN.match(obj.name)
                assert (match is None or match.group(2) != connections[0].to_instance.name), \
                    "Missing hardware attributes for %s.%s" % (match.group(2), match.group(3))

            shm_keys = []
            for c in connections:
                shm_keys.append('%s_%s' % (c.from_instance.name, c.from_interface.name))
                shm_keys.append('%s_%s' % (c.to_instance.name, c.to_interface.name))

            mapped = [x for x in shm_keys if x in shared_frames]
            if mapped:
                # We've already encountered the other side of this dataport.

                # The region had better be the same size in all address spaces.
                for key in mapped:
                    assert len(shared_frames[key]) == sz / PAGE_SIZE

            # This is the first side of this dataport.

            # Save all the frames backing this region.
            for key in shm_keys:
                if mapped:
                    shared_frames[key] = shared_frames[mapped[0]]
                else:
                    shared_frames[key] = [pt[p_index].referent \
                        for (pt, p_index) in zip(pts, p_indices)]

            # Overwrite the caps backing this region with caps to the shared
            # frames.
            for j, f in enumerate(shared_frames[shm_keys[0]]):
                existing = pts[j].slots[p_indices[j]].referent
                if existing != f:
                    # We're actually modifying this mapping. Delete the
                    # unneeded frame.
                    obj_space.remove(existing)
                pts[j].slots[p_indices[j]] = Cap(f, read, write, execute)
                obj_space.relabel(conn_name, f)
Ejemplo n.º 10
0
def main():
    options = parse_args(constants.TOOL_RUNNER)

    # Save us having to pass debugging everywhere.
    die = functools.partial(_die, options.verbosity >= 3)

    log.set_verbosity(options.verbosity)

    def done(s):
        ret = 0
        if s:
            options.outfile.write(s)
            options.outfile.close()
        sys.exit(ret)

    if not options.platform or options.platform in ('?', 'help') \
            or options.platform not in PLATFORMS:
        die('Valid --platform arguments are %s' % ', '.join(PLATFORMS))

    if not options.file or len(options.file) > 1:
        die('A single input file must be provided for this operation')

    # Construct the compilation cache if requested.
    cache = None
    if options.cache in ('on', 'readonly', 'writeonly'):
        cache = Cache(options.cache_dir)

    f = options.file[0]
    try:
        s = f.read()
        # Try to find this output in the compilation cache if possible. This is
        # one of two places that we check in the cache. This check will 'hit'
        # if the source files representing the input spec are identical to some
        # previous execution.
        if options.cache in ('on', 'readonly'):
            key = [
                version_hash(),
                os.path.abspath(f.name), s,
                cache_relevant_options(options), options.platform, options.item
            ]
            value = cache.get(key)
            assert value is None or isinstance(value, FileSet), \
                'illegally cached a value for %s that is not a FileSet' % options.item
            if value is not None and value.valid():
                # Cache hit.
                log.debug('Retrieved %(platform)s.%(item)s from cache' % \
                    options.__dict__)
                done(value.output)
        ast = parser.parse_to_ast(s, options.cpp, options.cpp_flag,
                                  options.ply_optimise)
        parser.assign_filenames(ast, f.name)
    except parser.CAmkESSyntaxError as e:
        e.set_column(s)
        die('%s:%s' % (f.name, str(e)))
    except Exception as inst:
        die('While parsing \'%s\': %s' % (f.name, inst))

    try:
        for t in AST_TRANSFORMS[PRE_RESOLUTION]:
            ast = t(ast)
    except Exception as inst:
        die('While transforming AST: %s' % str(inst))

    try:
        ast, imported = parser.resolve_imports(ast, \
            os.path.dirname(os.path.abspath(f.name)), options.import_path,
            options.cpp, options.cpp_flag, options.ply_optimise)
    except Exception as inst:
        die('While resolving imports of \'%s\': %s' % (f.name, inst))

    try:
        # if there are multiple assemblies, combine them now
        compose_assemblies(ast)
    except Exception as inst:
        die('While combining assemblies: %s' % str(inst))

    # If we have a readable cache check if our current target is in the cache.
    # The previous check will 'miss' and this one will 'hit' when the input
    # spec is identical to some previous execution modulo a semantically
    # irrelevant element (e.g. an introduced comment). I.e. the previous check
    # matches when the input is exactly the same and this one matches when the
    # AST is unchanged.
    if options.cache in ('on', 'readonly'):
        key = [
            version_hash(), ast,
            cache_relevant_options(options), options.platform, options.item
        ]
        value = cache.get(key)
        if value is not None:
            assert options.item not in NEVER_AST_CACHE, \
                '%s, that is marked \'never cache\' is in your cache' % options.item
            log.debug('Retrieved %(platform)s.%(item)s from cache' % \
                options.__dict__)
            done(value)

    # If we have a writable cache, allow outputs to be saved to it.
    if options.cache in ('on', 'writeonly'):
        orig_ast = deepcopy(ast)
        fs = FileSet(imported)

        def save(item, value):
            # Save an input-keyed cache entry. This one is based on the
            # pre-parsed inputs to save having to derive the AST (parse the
            # input) in order to locate a cache entry in following passes.
            # This corresponds to the first cache check above.
            key = [
                version_hash(),
                os.path.abspath(options.file[0].name), s,
                cache_relevant_options(options), options.platform, item
            ]
            specialised = fs.specialise(value)
            if item == 'capdl':
                specialised.extend(options.elf)
            cache[key] = specialised
            if item not in NEVER_AST_CACHE:
                # Save an AST-keyed cache entry. This corresponds to the second
                # cache check above.
                cache[[
                    version_hash(), orig_ast,
                    cache_relevant_options(options), options.platform, item
                ]] = value
    else:

        def save(item, value):
            pass

    ast = parser.dedupe(ast)
    try:
        ast = parser.resolve_references(ast)
    except Exception as inst:
        die('While resolving references of \'%s\': %s' % (f.name, inst))

    try:
        parser.collapse_references(ast)
    except Exception as inst:
        die('While collapsing references of \'%s\': %s' % (f.name, inst))

    try:
        for t in AST_TRANSFORMS[POST_RESOLUTION]:
            ast = t(ast)
    except Exception as inst:
        die('While transforming AST: %s' % str(inst))

    try:
        resolve_hierarchy(ast)
    except Exception as inst:
        die('While resolving hierarchy: %s' % str(inst))

    # All references in the AST need to be resolved for us to continue.
    unresolved = reduce(lambda a, x: a.union(x),
                        map(lambda x: x.unresolved(), ast), set())
    if unresolved:
        die('Unresolved references in input specification:\n %s' % \
            '\n '.join(map(lambda x: '%(filename)s:%(lineno)s:\'%(name)s\' of type %(type)s' % {
                'filename':x.filename or '<unnamed file>',
                'lineno':x.lineno,
                'name':x._symbol,
                'type':x._type.__name__,
            }, unresolved)))

    # Locate the assembly
    assembly = [x for x in ast if isinstance(x, AST.Assembly)]
    if len(assembly) > 1:
        die('Multiple assemblies found')
    elif len(assembly) == 1:
        assembly = assembly[0]
    else:
        die('No assembly found')

    obj_space = ObjectAllocator()
    obj_space.spec.arch = options.architecture
    cspaces = {}
    pds = {}
    conf = assembly.configuration
    shmem = defaultdict(dict)

    templates = Templates(options.platform)
    map(templates.add_root, options.templates)
    r = Renderer(templates.get_roots(), options)

    # The user may have provided their own connector definitions (with
    # associated) templates, in which case they won't be in the built-in lookup
    # dictionary. Let's add them now. Note, definitions here that conflict with
    # existing lookup entries will overwrite the existing entries.
    for c in (x for x in ast if isinstance(x, AST.Connector)):
        if c.from_template:
            templates.add(c.name, 'from.source', c.from_template)
        if c.to_template:
            templates.add(c.name, 'to.source', c.to_template)

    # We're now ready to instantiate the template the user requested, but there
    # are a few wrinkles in the process. Namely,
    #  1. Template instantiation needs to be done in a deterministic order. The
    #     runner is invoked multiple times and template code needs to be
    #     allocated identical cap slots in each run.
    #  2. Components and connections need to be instantiated before any other
    #     templates, regardless of whether they are the ones we are after. Some
    #     other templates, such as the Makefile depend on the obj_space and
    #     cspaces.
    #  3. All actual code templates, up to the template that was requested,
    #     need to be instantiated. This is related to (1) in that the cap slots
    #     allocated are dependent on what allocations have been done prior to a
    #     given allocation call.

    # Instantiate the per-component source and header files.
    for id, i in enumerate(assembly.composition.instances):
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue

        if i.address_space not in cspaces:
            p = Perspective(phase=RUNNER,
                            instance=i.name,
                            group=i.address_space)
            cnode = obj_space.alloc(seL4_CapTableObject,
                                    name=p['cnode'],
                                    label=i.address_space)
            cspaces[i.address_space] = CSpaceAllocator(cnode)
            pd = obj_space.alloc(seL4_PageDirectoryObject,
                                 name=p['pd'],
                                 label=i.address_space)
            pds[i.address_space] = pd

        for t in ('%s.source' % i.name, '%s.header' % i.name,
                  '%s.linker' % i.name):
            try:
                template = templates.lookup(t, i)
                g = ''
                if template:
                    g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], \
                        shmem, options=options, id=id, my_pd=pds[i.address_space])
                save(t, g)
                if options.item == t:
                    if not template:
                        log.warning('Warning: no template for %s' %
                                    options.item)
                    done(g)
            except Exception as inst:
                die('While rendering %s: %s' % (i.name, inst))

    # Instantiate the per-connection files.
    conn_dict = {}
    for id, c in enumerate(assembly.composition.connections):
        tmp_name = c.name
        key_from = (c.from_instance.name + '_' +
                    c.from_interface.name) in conn_dict
        key_to = (c.to_instance.name + '_' + c.to_interface.name) in conn_dict
        if not key_from and not key_to:
            # We need a new connection name
            conn_name = 'conn' + str(id)
            c.name = conn_name
            conn_dict[c.from_instance.name + '_' +
                      c.from_interface.name] = conn_name
            conn_dict[c.to_instance.name + '_' +
                      c.to_interface.name] = conn_name
        elif not key_to:
            conn_name = conn_dict[c.from_instance.name + '_' +
                                  c.from_interface.name]
            c.name = conn_name
            conn_dict[c.to_instance.name + '_' +
                      c.to_interface.name] = conn_name
        elif not key_from:
            conn_name = conn_dict[c.to_instance.name + '_' +
                                  c.to_interface.name]
            c.name = conn_name
            conn_dict[c.from_instance.name + '_' +
                      c.from_interface.name] = conn_name
        else:
            continue

        for t in (('%s.from.source' % tmp_name, c.from_instance.address_space),
                  ('%s.from.header' % tmp_name, c.from_instance.address_space),
                  ('%s.to.source' % tmp_name, c.to_instance.address_space),
                  ('%s.to.header' % tmp_name, c.to_instance.address_space)):
            try:
                template = templates.lookup(t[0], c)
                g = ''
                if template:
                    g = r.render(c, assembly, template, obj_space, cspaces[t[1]], \
                        shmem, options=options, id=id, my_pd=pds[t[1]])
                save(t[0], g)
                if options.item == t[0]:
                    if not template:
                        log.warning('Warning: no template for %s' %
                                    options.item)
                    done(g)
            except Exception as inst:
                die('While rendering %s: %s' % (t[0], inst))
        c.name = tmp_name

        # The following block handles instantiations of per-connection
        # templates that are neither a 'source' or a 'header', as handled
        # above. We assume that none of these need instantiation unless we are
        # actually currently looking for them (== options.item). That is, we
        # assume that following templates, like the CapDL spec, do not require
        # these templates to be rendered prior to themselves.
        # FIXME: This is a pretty ugly way of handling this. It would be nicer
        # for the runner to have a more general notion of per-'thing' templates
        # where the per-component templates, the per-connection template loop
        # above, and this loop could all be done in a single unified control
        # flow.
        for t in (('%s.from.' % c.name, c.from_instance.address_space),
                  ('%s.to.' % c.name, c.to_instance.address_space)):
            if not options.item.startswith(t[0]):
                # This is not the item we're looking for.
                continue
            try:
                # If we've reached here then this is the exact item we're
                # after.
                template = templates.lookup(options.item, c)
                if template is None:
                    raise Exception('no registered template for %s' %
                                    options.item)
                g = r.render(c, assembly, template, obj_space, cspaces[t[1]], \
                    shmem, options=options, id=id, my_pd=pds[t[1]])
                save(options.item, g)
                done(g)
            except Exception as inst:
                die('While rendering %s: %s' % (options.item, inst))

    # Perform any per component simple generation. This needs to happen last
    # as this template needs to run after all other capabilities have been
    # allocated
    for id, i in enumerate(assembly.composition.instances):
        # Don't generate any code for hardware components.
        if i.type.hardware:
            continue
        assert i.address_space in cspaces
        if conf and conf.settings and [x for x in conf.settings if \
                x.instance == i.name and x.attribute == 'simple' and x.value]:
            for t in ('%s.simple' % i.name, ):
                try:
                    template = templates.lookup(t, i)
                    g = ''
                    if template:
                        g = r.render(i, assembly, template, obj_space, cspaces[i.address_space], \
                            shmem, options=options, id=id, my_pd=pds[i.address_space])
                    save(t, g)
                    if options.item == t:
                        if not template:
                            log.warning('Warning: no template for %s' %
                                        options.item)
                        done(g)
                except Exception as inst:
                    die('While rendering %s: %s' % (i.name, inst))

    # Derive a set of usable ELF objects from the filenames we were passed.
    elfs = {}
    for e in options.elf:
        try:
            name = os.path.basename(e)
            if name in elfs:
                raise Exception(
                    'duplicate ELF files of name \'%s\' encountered' % name)
            elf = ELF(e, name, options.architecture)
            p = Perspective(phase=RUNNER, elf_name=name)
            group = p['group']
            # Avoid inferring a TCB as we've already created our own.
            elf_spec = elf.get_spec(infer_tcb=False,
                                    infer_asid=False,
                                    pd=pds[group],
                                    use_large_frames=options.largeframe)
            obj_space.merge(elf_spec, label=group)
            elfs[name] = (e, elf)
        except Exception as inst:
            die('While opening \'%s\': %s' % (e, inst))

    if options.item in ('capdl', 'label-mapping'):
        # It's only relevant to run these filters if the final target is CapDL.
        # Note, this will no longer be true if we add any other templates that
        # depend on a fully formed CapDL spec. Guarding this loop with an if
        # is just an optimisation and the conditional can be removed if
        # desired.
        for f in CAPDL_FILTERS:
            try:
                # Pass everything as named arguments to allow filters to
                # easily ignore what they don't want.
                f(ast=ast,
                  obj_space=obj_space,
                  cspaces=cspaces,
                  elfs=elfs,
                  options=options,
                  shmem=shmem)
            except Exception as inst:
                die('While forming CapDL spec: %s' % str(inst))

    # Instantiate any other, miscellaneous template. If we've reached this
    # point, we know the user did not request a code template.
    try:
        template = templates.lookup(options.item)
        if template:
            g = r.render(assembly, assembly, template, obj_space, None, \
                shmem, imported=imported, options=options)
            save(options.item, g)
            done(g)
    except Exception as inst:
        die('While rendering %s: %s' % (options.item, inst))

    die('No valid element matching --item %s' % options.item)
Ejemplo n.º 11
0
def new_context(entity, assembly, obj_space, cap_space, shmem, **kwargs):
    '''Create a new default context for rendering.'''
    return dict({
        # Kernel object allocator
        'alloc_obj':(lambda name, type, **kwargs:
            alloc_obj((entity, obj_space), obj_space,
                '%s_%s' % (entity.name, name), type, label=entity.name, **kwargs))
                    if obj_space else None,
        'seL4_EndpointObject':seL4_EndpointObject,
        'seL4_AsyncEndpointObject':seL4_AsyncEndpointObject,
        'seL4_TCBObject':seL4_TCBObject,
        'seL4_ARM_SmallPageObject':seL4_ARM_SmallPageObject,
        'seL4_ARM_SectionObject':seL4_ARM_SectionObject,
        'seL4_ARM_SuperSectionObject':seL4_ARM_SuperSectionObject,
        'seL4_FrameObject':seL4_FrameObject,
        'seL4_UntypedObject':seL4_UntypedObject,
        'seL4_IA32_IOPort':seL4_IA32_IOPort,
        'seL4_IA32_IOSpace':seL4_IA32_IOSpace,
        'seL4_ASID_Pool':seL4_ASID_Pool,

        # Cap allocator
        'alloc_cap':(lambda name, obj, **kwargs: \
            alloc_cap((entity, cap_space), cap_space, name, obj, **kwargs)) \
                if cap_space else None,
        'seL4_CanRead':seL4_CanRead,
        'seL4_CanWrite':seL4_CanWrite,
        'seL4_AllRights':seL4_AllRights,
        'seL4_IRQControl':seL4_IRQControl,

        # The CNode root of your CSpace. Should only be necessary in cases
        # where you need to allocate a cap to it.
        'my_cnode':cap_space.cnode if cap_space is not None else None,

        # Some C familiars.
        'PAGE_SIZE':4096,
        'ROUND_UP':lambda x, y: int(int(math.ceil(int(x) / float(y))) * y),
        '__WORDSIZE':32,
        '__SIZEOF_POINTER__':4,
        # Calculate the size of a type at template instantiation time. In
        # general, you should prefer emitting a call to C's sizeof over this
        # because it leads to more readable and portable output. This is only
        # provided for cases where you explicitly need to know the size of a
        # type ahead of compilation time.
        'sizeof':sizeof,

        # Batched object and cap allocation for when you don't need a reference
        # to the object. Probably best not to look directly at this one. When
        # you see `set y = alloc('foo', bar, moo)` in template code, think:
        #  set x = alloc_obj('foo_obj', bar)
        #  set y = alloc_cap('foo_cap', x, moo)
        'alloc':(lambda name, type, **kwargs:
            alloc_cap((entity, cap_space), cap_space, name,
            alloc_obj((entity, obj_space), obj_space,
                '%s_%s' % (entity.name, name), type, label=entity.name,
                **kwargs),
                **kwargs)) if cap_space else None,

        # Functionality for templates to inform us that they've emitted a C
        # variable that's intended to map to a shared variable. It is
        # (deliberately) left to the template authors to ensure global names
        # (gnames) only collide when intended; i.e. when they should map to the
        # same shared variable. The local name (lname) will later be used by us
        # to locate the relevant ELF frame(s) to remap. Note that we assume
        # address spaces and CSpaces are 1-to-1.
        'register_shared_variable':None if cap_space is None else \
            (lambda gname, lname: register_shared_variable(shmem, gname,
                cap_space.cnode.name, lname)),

        # A `self`-like reference to the current AST object. It would be nice
        # to actually call this `self` to lead to more pythonic templates, but
        # `self` inside template blocks refers to the jinja2 parser.
        'me':entity,

        # The AST assembly's configuration.
        'configuration':assembly.configuration,

        # The AST assembly's composition
        'composition':assembly.composition,

        # Allow some AST objects to be printed trivially
        'show':show,

        # Cross-template variable passing helpers. These are quite low-level.
        # Avoid calling them unless necessary.
        'stash':partial(stash, entity),
        'pop':partial(pop, entity),
        'guard':partial(guard, entity),

        # If the previous group of functions are considered harmful, these are
        # to be considered completely off limits. These expose a mechanism for
        # passing data between unrelated templates (_stash and _pop) and a way
        # of running arbitrary Python statements and expressions. They come
        # with significant caveats. E.g. _stash and _pop will likely not behave
        # as expected with the template cache enabled.
        '_stash':partial(stash, ''),
        '_pop':partial(pop, ''),
        'exec':_exec,
        'eval':eval,

        # Helpers for creating unique symbols within templates.
        'c_symbol':partial(symbol, '_camkes_%(tag)s_%(counter)d'),
        'isabelle_symbol':partial(symbol, '%(tag)s%(counter)d\'', 's'),

        # Expose some library functions
        'assert':_assert,
        'bool':bool,
        'enumerate':enumerate,
        'Exception':Exception,
        'filter':filter,
        'float':float,
        'hex':hex,
        'int':int,
        'isinstance':isinstance,
        'lambda':lambda s: eval('lambda %s' % s),
        'len':len,
        'list':list,
        'map':map,
        'math':collections.namedtuple('math', ['pow'])(math.pow),
        'NotImplementedError':lambda x='NotImplementedError': NotImplementedError(x),
        'os':collections.namedtuple('os', ['path'])(os.path),
        'pdb':collections.namedtuple('pdb', ['set_trace'])(_set_trace),
        'raise':_raise,
        're':collections.namedtuple('re', ['sub', 'match'])(re.sub, re.match),
        'reduce':reduce,
        'reversed':reversed,
        'set':DeterministicSet,
        'str':str,
        'splitext':os.path.splitext,
        'arch':os.environ.get('ARCH', ''),
        'ord':ord,
        'chr':chr,
        'textwrap':collections.namedtuple('textwrap', ['wrap'])(textwrap.wrap),
        'copy':collections.namedtuple('copy', ['deepcopy'])(deepcopy),

        # Allocation pools. In general, do not touch these in templates, but
        # interact with them through the alloc* functions. They are only in the
        # context to allow unanticipated template extensions.
        'obj_space':obj_space,
        'cap_space':cap_space,

        # Debugging functions
        'breakpoint':_breakpoint,
        'print':lambda x: sys.stdout.write('%s\n' % x) or '',
        'sys':collections.namedtuple('sys', ['stdout', 'stderr'])(sys.stdout,
            sys.stderr),

        # Work around for Jinja's bizarre scoping rules.
        'Counter':Counter,

        # Helper functions for generating apply-style Isabelle proof scripts.
        'apply':apply,
        'by':by,
        'done':done,
        'oops':oops,
        'sorry':sorry,

        # Support for name mangling in the templates. See existing usage for
        # examples.
        'Perspective':lambda **kwargs:Perspective(TEMPLATES, **kwargs),

        # Low-level access to name mangling. Should only be required when you
        # need to access both mangling phases.
        'NameMangling':collections.namedtuple('NameMangling',
            ['FILTERS', 'TEMPLATES', 'Perspective'])(FILTERS, TEMPLATES,
                Perspective),

        # Return a list of distinct elements. Normally you would just do this
        # as list(set(xs)), but this turns out to be non-deterministic in the
        # template environment for some reason.
        'uniq':lambda xs: reduce(lambda ys, z: ys if z in ys else (ys + [z]), xs, []),

        # Functional helpers.
        'flatMap':lambda f, xs: list(itertools.chain.from_iterable(map(f, xs))),
        'flatten':lambda xss: list(itertools.chain.from_iterable(xss)),

        # Macros for common operations.
        'macros':macros,

        # Give template authors access to AST types just in case. Templates
        # should never be constructing objects of these types, but they may
        # need to do `isinstance` testing.
        'camkes':collections.namedtuple('camkes', ['ast'])(AST),

        # When generating Isabelle apply-style proof scripts, the results can
        # sometimes be fragile in the face of changing code. In particular,
        # sometimes a generated proof can fail because the underlying code
        # changed, but inadvertently make progress beyond the actual divergence
        # point, concealing the source of the failure. This function allows you
        # to assert within an apply-style proof what the current subgoal looks
        # like. The idea is that this step will fail early when the code
        # changes, giving you a better idea of where to begin repairing the
        # proof.
        'assert_current_goal':lambda prop:'apply (subgoal_tac "%s", assumption)' % prop \
            if kwargs['options'].verbosity >= 2 else '',

        # Give the template authors a mechanism for writing C-style include
        # guards. Use the following idiom to guard an include target:
        #  /*- if 'template.filename' not in included' -*/
        #  /*- do included.add('template.filename') -*/
        #  ... my template ...
        #  /*- endif -*/
        'included':set(),
    }.items() + kwargs.items())