def replace_frame_with_paging_structure(obj_space, vspace_root, frame_cap, bottom_level, indices): '''Given the root paging structure of a vspace, a cap to a frame in that vspace, a list of indices to traverse the paging hierarchy from the root to the that frame, and the level of the paging hierarchy containing the frame, replaces the frame with a paging structure of the same size, and populates it with appropriately sized frames.''' assert len(indices) >= 1, "Empty list of indices" paging_structure = obj_space.alloc(bottom_level.object) child_size = min(p.size for p in bottom_level.pages) # populate the paging structure with new frames for i in range(0, bottom_level.coverage // child_size): new_frame = obj_space.alloc(seL4_FrameObject, size=child_size) paging_structure[i] = Cap(new_frame, frame_cap.read, frame_cap.write, frame_cap.grant) # find the parent paging structure if len(indices) == 1: parent = vspace_root else: _, parent = lookup_vspace_indices(vspace_root, indices[0:-1]) # replace the entry in the parent parent[indices[-1]] = Cap(paging_structure, frame_cap.read, frame_cap.write, frame_cap.grant) # delete the old frame obj_space.remove(frame_cap.referent)
def set_tcb_caps(ast, obj_space, cspaces, elfs, options, **_): arch = lookup_architecture(options.architecture) assembly = ast.assembly for group, space in cspaces.items(): cnode = space.cnode for index, tcb in [(k, v.referent) for (k, v) in cnode.slots.items() if v is not None and isinstance(v.referent, TCB)]: perspective = Perspective(tcb=tcb.name, group=group) # Finalise the CNode so that we know what its absolute size will # be. Note that we are assuming no further caps will be added to # the CNode after this point. cnode.finalise_size() # Allow the user to override CNode sizes with the 'cnode_size_bits' # attribute. cnode_size = assembly.configuration[group].get('cnode_size_bits') if cnode_size is not None: try: if isinstance(cnode_size, six.string_types): size = int(cnode_size, 0) else: size = cnode_size except ValueError: raise Exception('illegal value for CNode size for %s' % group) if size < cnode.size_bits: raise Exception('%d-bit CNode specified for %s, but this ' 'CSpace needs to be at least %d bits' % (size, group, cnode.size_bits)) cnode.size_bits = size cspace = Cap(cnode) cspace.set_guard_size(arch.word_size_bits() - cnode.size_bits) tcb['cspace'] = cspace pd = None pd_name = perspective['pd'] pds = [x for x in obj_space.spec.objs if x.name == pd_name] if len(pds) > 1: raise Exception('Multiple PDs found for %s' % group) elif len(pds) == 1: pd, = pds tcb['vspace'] = Cap(pd) # If no PD was found we were probably just not passed any ELF files # in this pass. if perspective['pool']: # This TCB is part of the (cap allocator's) TCB pool. continue
def replace_frame_with_small_frames(obj_space, vspace_root, frame_cap, bottom_level, indices): '''Given the root paging structure of a vspace, a cap to a frame in that vspace, a list of indices to traverse the paging hierarchy from the root to the that frame, and the level of the paging hierarchy containing the frame, replaces the frame with a collection of smaller frames, mapped into the same paging structure.''' assert len(indices) >= 1, "Empty list of indices" # look up the paging structure containing the frame that we'll be replacing if len(indices) == 1: paging_structure = vspace_root else: _, paging_structure = lookup_vspace_indices(vspace_root, indices[0:-1]) # index of the frame we're replacing in its paging structure start_index = indices[-1] assert paging_structure[start_index] == frame_cap, "Unexpected frame cap" old_frame_size = frame_cap.referent.size new_frame_size = min(p.size for p in bottom_level.pages) assert old_frame_size % new_frame_size == 0, "Small frame size does not evenly divide larger frame size" num_frames = old_frame_size // new_frame_size # create new frames and map them in for i in range(0, num_frames): new_frame = obj_space.alloc(seL4_FrameObject, size=new_frame_size) paging_structure[start_index + i] = Cap(new_frame, frame_cap.read, frame_cap.write, frame_cap.grant) obj_space.remove(frame_cap.referent)
def capdl_declare_frame(context, cap_symbol, symbol, size=4096): state = context['state'] stash = state.stash obj = TutorialFunctions.capdl_alloc_obj(context, ObjectType.seL4_FrameObject, cap_symbol, size=size) cap_symbol = TutorialFunctions.capdl_alloc_cap(context, ObjectType.seL4_FrameObject, cap_symbol, cap_symbol, read=True, write=True, grant=True) stash.current_addr_space.add_symbol_with_caps(symbol, [size], [Cap(obj, read=True, write=True, grant=True)]) stash.current_region_symbols.append((symbol, size, "size_12bit")) return "\n".join([ cap_symbol, "extern const char %s[%d];" % (symbol, size), ])
def set_tcb_sc(tcb, ast, perspective, obj_space, group): # add SC assembly = find_assembly(ast) settings = assembly.configuration.settings if assembly.configuration is not None else [] # first check if this thread has been configured to not have an SC passive_attribute_name = perspective['passive_attribute'] instance_name = perspective['instance'] passive_attributes = [ x for x in settings if x.instance == instance_name and x.attribute == passive_attribute_name ] # Determine whether a passive component instance thread was specified if len(passive_attributes) == 0: passive_instance = False elif len(passive_attributes) == 1: if isinstance(passive_attributes[0].value, str): passive_attribute = passive_attributes[0].value.lower() if passive_attribute == 'true': passive_instance = True elif passive_attribute == 'false': passive_instance = False else: raise Exception( 'Boolean string expected for %s.%s. Got "%s".' % (instance_name, passive_attribute_name, passive_attribute)) else: raise Exception('Boolean string expected for %s.%s. Got "%s".' % (instance_name, passive_attribute_name, passive_attributes[0].value)) else: raise Exception('Multiple settings of attribute %s.%s.' % (instance_name, passive_attribute_name)) # Attach the SC to the component instance thread if it isn't passive if not passive_instance: sc_name = perspective['sc'] scs = [x for x in obj_space.spec.objs if x.name == sc_name] if len(scs) == 0: raise Exception('No SC found for active component instance %s' % instance_name) elif len(scs) > 1: raise Exception('Multiple SCs found for %s' % group) else: assert len(scs) == 1 sc, = scs tcb['sc_slot'] = Cap(sc)
def ELF(context, content, name, passive=False): """ Declares a ELF object containing content with name. """ state = context['state'] args = context['args'] stash = state.stash if args.out_dir and not args.docsite: filename = os.path.join(args.out_dir, "%s.c" % name) if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) elf_file = open(filename, 'w') print(filename, file=args.output_files) elf_file.write(content) # The following allocates objects for the main thread, its IPC buffer and stack. stack_name = "stack" ipc_name = "mainIpcBuffer" number_stack_frames = 16 frames = [stash.objects.alloc(ObjectType.seL4_FrameObject, name='stack_%d_%s_obj' % (i, name), label=name, size=4096) for i in range(number_stack_frames)] sizes = [4096] * (number_stack_frames) caps = [Cap(frame, read=True, write=True, grant=False) for frame in frames] stash.current_addr_space.add_symbol_with_caps(stack_name, sizes, caps) stash.current_region_symbols.append((stack_name, sum(sizes), 'size_12bit')) ipc_frame = stash.objects.alloc(ObjectType.seL4_FrameObject, name='ipc_%s_obj' % (name), label=name, size=4096) caps = [Cap(ipc_frame, read=True, write=True, grant=False)] sizes = [4096] stash.current_addr_space.add_symbol_with_caps(ipc_name, sizes, caps) stash.current_region_symbols.append((ipc_name, sum(sizes), 'size_12bit')) tcb = stash.objects.alloc(ObjectType.seL4_TCBObject, name='tcb_%s' % ( name)) tcb['ipc_buffer_slot'] = Cap(ipc_frame, read=True, write=True, grant=False) cap = Cap(stash.current_cspace.cnode) tcb['cspace'] = cap stash.current_cspace.cnode.update_guard_size_caps.append(cap) tcb['vspace'] = Cap(stash.current_addr_space.vspace_root) tcb.elf = name if not passive and stash.rt: sc = stash.objects.alloc(ObjectType.seL4_SchedContextObject, name='sc_%s_obj' % (name), label=name) tcb['sc_slot'] = Cap(sc) stash.finish_elf(name, "%s.c" % name, passive) print("end") return content
def collapse_shared_frames(ast, obj_space, cspaces, elfs, options, **_): """Find regions in virtual address spaces that are intended to be backed by shared frames and adjust the capability distribution to reflect this.""" if not elfs: # If we haven't been passed any ELF files this step is not relevant yet. return assembly = find_assembly(ast) # We want to track the frame objects backing shared regions with a dict # keyed on the name of the connection linking the regions. shared_frames = {} for i in (x for x in assembly.composition.instances if not x.type.hardware): perspective = Perspective(instance=i.name, group=i.address_space) elf_name = perspective['elf_name'] assert elf_name in elfs elf = elfs[elf_name] # Find this instance's page directory. pd_name = perspective['pd'] pds = [x for x in obj_space.spec.objs if x.name == pd_name] assert len(pds) == 1 pd, = pds for d in i.type.dataports: # Find the connection that associates this dataport with another. connections = [x for x in assembly.composition.connections if \ ((x.from_instance == i and x.from_interface == d) or \ (x.to_instance == i and x.to_interface == d))] if len(connections) == 0: # This dataport is unconnected. continue #assert len(connections) == 1 conn_name = connections[0].name if connections[0].from_instance == i and \ connections[0].from_interface == d: direction = 'from' else: assert connections[0].to_instance == i assert connections[0].to_interface == d direction = 'to' # Reverse the logic in the Makefile template. p = Perspective(instance=i.name, dataport=d.name) sym = p['dataport_symbol'] vaddr = get_symbol_vaddr(elf, sym) assert vaddr is not None, 'failed to find dataport symbol \'%s\'' \ ' in ELF %s' % (sym, elf_name) assert vaddr != 0 assert vaddr % PAGE_SIZE == 0, 'dataport %s not page-aligned' % sym sz = get_symbol_size(elf, sym) assert sz != 0 # Infer the page table(s) and page(s) that back this region. pts, p_indices = zip(*[\ (pd[page_table_index(options.architecture, v)].referent, page_index(options.architecture, v)) \ for v in xrange(vaddr, vaddr + sz, PAGE_SIZE)]) # Determine the rights this mapping should have. We use these to # recreate the mapping below. Technically we may not need to # recreate this mapping if it's already correct, but do it anyway # for simplicity. # FIXME: stop hard coding this name mangling. rights_setting = assembly.configuration[conn_name].get('%s_access' % direction) if rights_setting is not None and \ re.match(r'^"R?W?(G|X)?"$', rights_setting): read = 'R' in rights_setting write = 'W' in rights_setting execute = 'X' in rights_setting or 'G' in rights_setting else: # default read = True write = True execute = False # Check if the dataport is connected *TO* a hardware component. if connections[0].to_instance.type.hardware: p = Perspective(to_interface=connections[0].to_interface.name) hardware_attribute = p['hardware_attribute'] conf = assembly.configuration[connections[0].to_instance.name].get(hardware_attribute) assert conf is not None, "%s.%s not found in configuration" % \ (connections[0].to_instance.name, hardware_attribute) paddr, size = conf.strip('"').split(':') # Round up the MMIO size to PAGE_SIZE try: paddr = int(paddr, 0) except ValueError: raise Exception("Invalid physical address specified for %s.%s: %s\n" % (me.to_instance.name, me.to_interface.name, paddr)) try: size = int(size, 0) except ValueError: raise Exception("Invalid size specified for %s.%s: %s\n" % (me.to_instance.name, me.to_interface.name, size)) hardware_cached = p['hardware_cached'] cached = assembly.configuration[connections[0].to_instance.name].get(hardware_cached) if cached is None: cached = False elif cached.lower() == 'true': cached = True elif cached.lower() == 'false': cached = False else: raise Exception("Value of %s.%s_cached must be either 'true' or 'false'. Got '%s'." % (me.to_instance.name, me.to_interface.name, cached)) instance_name = connections[0].to_instance.name if size == 0: raise Exception('Hardware dataport %s.%s has zero size!' % (instance_name, connections[0].to_interface.name)) # determine the size of a large frame, and the type of kernel # object that will be used, both of which depend on the architecture if get_elf_arch(elf) == 'ARM': large_size = 1024 * 1024 large_object_type = seL4_ARM_SectionObject else: large_size = 4 * 1024 * 1024 large_object_type = seL4_IA32_4M # Check if MMIO start and end is aligned to page table coverage. # This will indicate that we should use pagetable-sized pages # to back the device region to be consistent with the kernel. if paddr % large_size == 0 and size % large_size == 0: # number of page tables backing device memory n_pts = size / large_size # index of first page table in page directory backing the device memory base_pt_index = page_table_index(options.architecture, vaddr) pt_indices = xrange(base_pt_index, base_pt_index + n_pts) # loop over all the page table indices and replace the page tables # with large frames for count, pt_index in enumerate(pt_indices): # look up the page table at the current index pt = pd[pt_index].referent offset = count * large_size frame_paddr = paddr + offset # lookup the frame, already allocated by a template frame_cap = find_hardware_frame_in_cspace( cspaces[i.address_space], frame_paddr, connections[0].to_instance.name, connections[0].to_interface.name) frame_obj = frame_cap.referent # create a new cap for the frame to use for its mapping mapping_frame_cap = Cap(frame_obj, read, write, execute) mapping_frame_cap.set_cached(cached) # add the mapping to the spec pd[pt_index] = mapping_frame_cap # add the mapping information to the original cap frame_cap.set_mapping(pd, pt_index) # remove all the small frames from the spec for p_index in pt: small_frame = pt[p_index].referent obj_space.remove(small_frame) # remove the page table from the spec obj_space.remove(pt) else: # If the MMIO start and end are not aligned to page table coverage, # loop over all the frames and set their paddrs based on the # paddr in the spec. for idx in xrange(0, (size + PAGE_SIZE - 1) / PAGE_SIZE): try: frame_obj = pts[idx][p_indices[idx]].referent except IndexError: raise Exception('MMIO attributes specify device ' \ 'memory that is larger than the dataport it is ' \ 'associated with') offset = idx * PAGE_SIZE frame_paddr = paddr + offset # lookup the frame, already allocated by a template frame_cap = find_hardware_frame_in_cspace( cspaces[i.address_space], frame_paddr, connections[0].to_instance.name, connections[0].to_interface.name) frame_obj = frame_cap.referent # create a new cap for the frame to use for its mapping mapping_frame_cap = Cap(frame_obj, read, write, execute) mapping_frame_cap.set_cached(cached) # add the mapping to the spec pt = pts[idx] slot = p_indices[idx] pt.slots[slot] = mapping_frame_cap # add the mapping information to the original cap frame_cap.set_mapping(pt, slot) obj_space.relabel(conn_name, frame_obj) continue # If any objects still have names indicating they are part of a hardware # dataport, it means that dataport hasn't been given a paddr or size. # This indicates an error, and the object name is invalid in capdl, # so catch the error here rather than having the capdl translator fail. for cap in (v for v in cspaces[i.address_space].cnode.slots.values() if v is not None): obj = cap.referent match = HARDWARE_FRAME_NAME_PATTERN.match(obj.name) assert (match is None or match.group(2) != connections[0].to_instance.name), \ "Missing hardware attributes for %s.%s" % (match.group(2), match.group(3)) shm_keys = [] for c in connections: shm_keys.append('%s_%s' % (c.from_instance.name, c.from_interface.name)) shm_keys.append('%s_%s' % (c.to_instance.name, c.to_interface.name)) mapped = [x for x in shm_keys if x in shared_frames] if mapped: # We've already encountered the other side of this dataport. # The region had better be the same size in all address spaces. for key in mapped: assert len(shared_frames[key]) == sz / PAGE_SIZE # This is the first side of this dataport. # Save all the frames backing this region. for key in shm_keys: if mapped: shared_frames[key] = shared_frames[mapped[0]] else: shared_frames[key] = [pt[p_index].referent \ for (pt, p_index) in zip(pts, p_indices)] # Overwrite the caps backing this region with caps to the shared # frames. for j, f in enumerate(shared_frames[shm_keys[0]]): existing = pts[j].slots[p_indices[j]].referent if existing != f: # We're actually modifying this mapping. Delete the # unneeded frame. obj_space.remove(existing) pts[j].slots[p_indices[j]] = Cap(f, read, write, execute) obj_space.relabel(conn_name, f)
def ELF(context, content, name, passive=False): """ Declares a ELF object containing content with name. """ state = context['state'] args = context['args'] stash = state.stash if args.out_dir and not args.docsite: filename = os.path.join(args.out_dir, "%s.c" % name) if not os.path.exists(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) elf_file = open(filename, 'w') print(filename, file=args.output_files) elf_file.write(content) objects = stash.allocator_state.obj_space # The following allocates objects for the main thread, its IPC buffer and stack. stack_name = "stack" ipc_name = "mainIpcBuffer" number_stack_frames = 16 frames = [objects.alloc(ObjectType.seL4_FrameObject, name='stack_%d_%s_obj' % (i, name), label=name, size=4096) for i in range(number_stack_frames)] sizes = [4096] * (number_stack_frames) caps = [Cap(frame, read=True, write=True, grant=False) for frame in frames] stash.current_addr_space.add_symbol_with_caps(stack_name, sizes, caps) stash.current_region_symbols.append((stack_name, sum(sizes), 'size_12bit')) ipc_frame = objects.alloc(ObjectType.seL4_FrameObject, name='ipc_%s_obj' % (name), label=name, size=4096) caps = [Cap(ipc_frame, read=True, write=True, grant=False)] sizes = [4096] stash.current_addr_space.add_symbol_with_caps(ipc_name, sizes, caps) stash.current_region_symbols.append((ipc_name, sum(sizes), 'size_12bit')) tcb = objects.alloc(ObjectType.seL4_TCBObject, name='tcb_%s' % (name)) tcb['ipc_buffer_slot'] = Cap(ipc_frame, read=True, write=True, grant=False) cap = Cap(stash.current_cspace.cnode) tcb['cspace'] = cap stash.current_cspace.cnode.update_guard_size_caps.append(cap) tcb['vspace'] = Cap(stash.current_addr_space.vspace_root) tcb.sp = "get_vaddr(\'%s\') + %d" % (stack_name, sum(sizes)) tcb.addr = "get_vaddr(\'%s\')" % (ipc_name) tcb.ip = "get_vaddr(\'%s\')" % ("_start") # This initialises the main thread's stack so that a normal _start routine provided by libmuslc can be used # The capdl loader app takes the first 4 arguments of .init and sets registers to them, # argc = 2, # argv[0] = get_vaddr("progname") which is a string of the program name, # argv[1] = 1 This could be changed to anything, # 0, 0, null terminates the argument vector and null terminates the empty environment string vector # 32, is an aux vector key of AT_SYSINFO # get_vaddr(\"sel4_vsyscall\") is the address of the SYSINFO table # 0, 0, null terminates the aux vectors. tcb.init = "[0,0,0,0,2,get_vaddr(\"progname\"),1,0,0,32,get_vaddr(\"sel4_vsyscall\"),0,0]" if not passive and stash.rt: sc = objects.alloc(ObjectType.seL4_SchedContextObject, name='sc_%s_obj' % (name), label=name) sc.size_bits = 8 tcb['sc_slot'] = Cap(sc) stash.current_cspace.alloc(tcb) stash.finish_elf(name, "%s.c" % name) print("end") return content
def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) plat = args[0].plat if plat in VIRTIO_PARAMS: virtio = VIRTIO_PARAMS[plat] else: raise Exception("Unsupported platform '%s'" % (plat)) virtio_region_start = align_down(virtio["paddr"], PAGE_SIZE) virtio_region_end = align_up( virtio["paddr"] + virtio["count"] * MMIO_BLOCK_SIZE, PAGE_SIZE) # first allocate vaddrs for the virtio region self.align(PAGE_SIZE) self.skip(PAGE_SIZE) virtio_vaddr = self.cur_vaddr virtio_size = virtio_region_end - virtio_region_start self.skip(virtio_size) # add some padding to catch out-of-bounds access self.skip(PAGE_SIZE) # allocate vaddrs for the virtio pool, these pages must be # shared with the host virtio_pool_vaddr = self.cur_vaddr self.skip(align_up(virtio["poolsize"], PAGE_SIZE)) # more padding self.skip(PAGE_SIZE) # map in the virtio-mmio region # TODO do we need to do this a page at a time? for (vaddr, paddr) in zip( it.count(virtio_vaddr, PAGE_SIZE), range(virtio_region_start, virtio_region_end, PAGE_SIZE)): self.map_with_size(vaddr=vaddr, paddr=paddr, size=PAGE_SIZE, read=True, write=True) # allocate some pages to put at a fixed address for communication over virtio virtio_pool_pages = [] for vaddr in range( virtio_pool_vaddr, align_up(virtio_pool_vaddr + virtio["poolsize"], PAGE_SIZE), PAGE_SIZE): page = self.alloc(ObjectType.seL4_FrameObject, name='virtio_page_{:#x}'.format(vaddr), size=4096) cap = self.cspace().alloc(page, read=True, write=True, cached=False) virtio_pool_pages.append(cap) self.addr_space().add_hack_page( vaddr, PAGE_SIZE, Cap(page, read=True, write=True, cached=False)) # create irq handler objects to catch all virtio IRQs self.event_nfn = self.alloc(ObjectType.seL4_NotificationObject, name='event_nfn') virtio_irq_handlers = [] for irq in range(virtio["irq"], virtio["irq"] + virtio["count"]): irq_handler = self.alloc(ObjectType.seL4_IRQHandler, name='irq_{}_handler'.format(irq), number=irq, trigger=ARMIRQMode.seL4_ARM_IRQ_LEVEL, notification=Cap(self.event_nfn, badge=BADGE_IRQ)) cap = self.cspace().alloc(irq_handler) virtio_irq_handlers.append(cap) self._arg = { 'virtio_region': (virtio_vaddr, virtio_vaddr + virtio_size), 'virtio_irq_handlers': virtio_irq_handlers, 'virtio_pool_region': (virtio_pool_vaddr, align_up(virtio_pool_vaddr + virtio["poolsize"], PAGE_SIZE)), 'virtio_pool_pages': virtio_pool_pages, 'event_nfn': self.cspace().alloc(self.event_nfn, read=True), 'badges': { 'irq': BADGE_IRQ, 'client': BADGE_CLIENT, }, }
def set_tcb_info(cspaces, obj_space, elfs, options, **_): '''Set relevant extra info for TCB objects.''' arch = lookup_architecture(options.architecture) for group, space in cspaces.items(): cnode = space.cnode for index, tcb in [(k, v.referent) for (k, v) in cnode.slots.items() if v is not None and isinstance(v.referent, TCB)]: perspective = Perspective(group=group, tcb=tcb.name) elf_name = perspective['elf_name'] elf = elfs.get(elf_name) if elf is None: # We were not passed an ELF file for this CSpace. This will be # true in the first pass of the runner where no ELFs are passed. continue tcb.elf = elf_name tcb.ip = get_symbol_vaddr(elf, perspective['entry_symbol']) assert tcb.ip != 0, 'entry point \'%s\' of %s appears to be 0x0' \ % (perspective['entry_symbol'], tcb.name) if perspective['pool']: # This TCB is part of the (cap allocator's) TCB pool. continue stack_symbol = perspective['stack_symbol'] ipc_buffer_symbol = perspective['ipc_buffer_symbol'] # The stack should be at least three pages and the IPC buffer # region should be exactly three pages. Note that these regions # both include a guard page either side of the used area. It is # assumed that the stack grows downwards. stack_size = get_symbol_size(elf, stack_symbol) assert stack_size is not None, 'Stack for %(name)s, ' \ '\'%(symbol)s\', not found' % { 'name':tcb.name, 'symbol':stack_symbol, } assert stack_size >= PAGE_SIZE * 3, 'Stack for %(name)s, ' \ '\'%(symbol)s\', is only %(size)d bytes and does not have ' \ 'room for guard pages' % { 'name':tcb.name, 'symbol':stack_symbol, 'size':stack_size, } assert get_symbol_size(elf, ipc_buffer_symbol) == PAGE_SIZE * 3 # Move the stack pointer to the top of the stack. Extra page is # to account for the (upper) guard page. assert stack_size % PAGE_SIZE == 0 tcb.sp = get_symbol_vaddr(elf, stack_symbol) + stack_size - PAGE_SIZE tcb.addr = get_symbol_vaddr(elf, ipc_buffer_symbol) + \ 2 * PAGE_SIZE - arch.ipc_buffer_size() # Each TCB needs to be passed its 'thread_id' that is the value # it branches on in main(). This corresponds to the slot index # to a cap to it in the component's CNode. tcb.init.append(index) if options.realtime: sc_name = perspective['sc'] if sc_name in obj_space: # For non-passive threads, associate the sc with the tcb sc = obj_space[sc_name] tcb['sc_slot'] = Cap(sc)
def replace_dma_frames(ast, obj_space, elfs, options, **_): '''Locate the DMA pool (a region that needs to have frames whose mappings can be reversed) and replace its backing frames with pre-allocated, reversible ones.''' if not elfs: # If we haven't been passed any ELF files this step is not relevant yet. return arch = lookup_architecture(options.architecture) assembly = ast.assembly for i in (x for x in assembly.composition.instances if not x.type.hardware): perspective = Perspective(instance=i.name, group=i.address_space) elf_name = perspective['elf_name'] assert elf_name in elfs elf = elfs[elf_name] # Find this instance's page directory. pd_name = perspective['pd'] pds = [x for x in obj_space.spec.objs if x.name == pd_name] assert len(pds) == 1 pd, = pds sym = perspective['dma_pool_symbol'] base = get_symbol_vaddr(elf, sym) if base is None: # We don't have a DMA pool. continue assert base != 0 sz = get_symbol_size(elf, sym) assert sz % PAGE_SIZE == 0 # DMA pool should be at least page-aligned. # Replicate logic from the template to determine the page size used to # back the DMA pool. page_size = 4 * 1024 if options.largeframe_dma: for size in reversed(page_sizes(options.architecture)): if sz >= size: page_size = size break assert sz % page_size == 0, 'DMA pool not rounded up to a multiple ' \ 'of page size %d (template bug?)' % page_size dma_frame_index = 0 def get_dma_frame(index): ''' Find the `index`-th DMA frame. Note that these are constructed in the component template itself. ''' p = Perspective(instance=i.name, group=i.address_space, dma_frame_index=index) name = p['dma_frame_symbol'] assert name in obj_space, "No such symbol in capdl spec %s" % name return obj_space[name] # Ensure paging structures are in place to map in dma frames replace_large_frames(obj_space, arch, pd, base, sz, page_size) for page_vaddr in six.moves.range(base, base + sz, page_size): cap = Cap(get_dma_frame(dma_frame_index), True, True, False) cap.set_cached(False) update_frame_in_vaddr(arch, pd, page_vaddr, page_size, cap) dma_frame_index = dma_frame_index + 1
def replace_dma_frames(ast, obj_space, elfs, options, **_): '''Locate the DMA pool (a region that needs to have frames whose mappings can be reversed) and replace its backing frames with pre-allocated, reversible ones.''' # TODO: Large parts of this function clagged from collapse_shared_frames; Refactor. if not elfs: # If we haven't been passed any ELF files this step is not relevant yet. return assembly = find_assembly(ast) for i in (x for x in assembly.composition.instances if not x.type.hardware): perspective = Perspective(instance=i.name, group=i.address_space) elf_name = perspective['elf_name'] assert elf_name in elfs elf = elfs[elf_name] # Find this instance's page directory. pd_name = perspective['pd'] pds = filter(lambda x: x.name == pd_name, obj_space.spec.objs) assert len(pds) == 1 pd, = pds sym = perspective['dma_pool_symbol'] base = get_symbol_vaddr(elf, sym) if base is None: # We don't have a DMA pool. continue assert base != 0 sz = get_symbol_size(elf, sym) assert sz % PAGE_SIZE == 0 # DMA pool should be page-aligned. # Generate a list of the base addresses of the pages we need to # replace. base_vaddrs = [PAGE_SIZE * x + base for x in range(int(sz / PAGE_SIZE))] for index, v in enumerate(base_vaddrs): # Locate the mapping. pt_index = page_table_index(get_elf_arch(elf), v, options.hyp) p_index = page_index(get_elf_arch(elf), v, options.hyp) # It should contain an existing frame. assert pt_index in pd pt = pd[pt_index].referent assert p_index in pt discard_frame = pt[p_index].referent # Locate the frame we're going to replace it with. The logic that # constructs this object name is in component.template.c. Note that # we need to account for the guard-prefix of the instance name # introduced by the template context. p = Perspective(instance=i.name, group=i.address_space, dma_frame_index=index) dma_frames = [x for x in obj_space.spec.objs if x.name == p['dma_frame_symbol']] assert len(dma_frames) == 1 dma_frame, = dma_frames # Replace the existing mapping. c = Cap(dma_frame, True, True, False) # RW c.set_cached(False) pt.slots[p_index] = c # We can now remove the old frame as we know it's not referenced # anywhere else. TODO: assert this somehow. obj_space.remove(discard_frame)
def collapse_shared_frames(ast, obj_space, cspaces, elfs, *_): """Find regions in virtual address spaces that are intended to be backed by shared frames and adjust the capability distribution to reflect this.""" if not elfs: # If we haven't been passed any ELF files this step is not relevant yet. return assembly = find_assembly(ast) settings = \ assembly.configuration.settings if assembly.configuration is not None \ else [] # We want to track the frame objects backing shared regions with a dict # keyed on the name of the connection linking the regions. shared_frames = {} for i in assembly.composition.instances: if i.type.hardware: continue perspective = Perspective(instance=i.name, group=i.address_space) elf_name = perspective['elf_name'] assert elf_name in elfs elf = elfs[elf_name] # Find this instance's page directory. pd_name = perspective['pd'] pds = filter(lambda x: x.name == pd_name, obj_space.spec.objs) assert len(pds) == 1 pd, = pds for d in i.type.dataports: # Find the connection that associates this dataport with another. connections = filter(lambda x: \ (x.from_instance == i and x.from_interface == d) or \ (x.to_instance == i and x.to_interface == d), \ assembly.composition.connections) if len(connections) == 0: # This dataport is unconnected. continue #assert len(connections) == 1 conn_name = connections[0].name if connections[0].from_instance == i and \ connections[0].from_interface == d: direction = 'from' else: assert connections[0].to_instance == i assert connections[0].to_interface == d direction = 'to' # Reverse the logic in the Makefile template. p = Perspective(instance=i.name, dataport=d.name) sym = p['dataport_symbol'] vaddr = get_symbol_vaddr(elf, sym) assert vaddr is not None, 'failed to find dataport symbol \'%s\'' \ ' in ELF %s' % (sym, elf_name) assert vaddr != 0 sz = get_symbol_size(elf, sym) assert sz != 0 # Infer the page table that backs this vaddr. pt_index = page_table_index(get_elf_arch(elf), vaddr) assert pt_index in pd pt = pd[pt_index].referent # Infer the starting page index of this vaddr. p_index = page_index(get_elf_arch(elf), vaddr) # TODO: If the following assertion fails it means that the shared # region crosses a PT boundary (i.e. it is backed by more than one # PT). In theory we could support this with a bit more cleverness # here. assert page_table_index(get_elf_arch(elf), vaddr + sz - 1) == pt_index # Determine the rights this mapping should have. We use these to # recreate the mapping below. Technically we may not need to # recreate this mapping if it's already correct, but do it anyway # for simplicity. # FIXME: stop hard coding this name mangling. rights_setting = filter(lambda x: x.instance == conn_name and \ x.attribute == '%s_access' % direction, settings) if len(rights_setting) == 1 and \ re.match(r'^"R?W?(G|X)?"$', rights_setting[0].value): read = 'R' in rights_setting[0].value write = 'W' in rights_setting[0].value execute = 'X' in rights_setting[0].value or \ 'G' in rights_setting[0].value else: # default read = True write = True execute = False # Check if the dataport is connected *TO* a hardware component. if connections[0].to_instance.type.hardware and \ assembly.configuration is not None: p = Perspective(to_interface=connections[0].to_interface.name) hardware_attribute = p['hardware_attribute'] configurations = filter(lambda x: \ (x.instance == connections[0].to_instance.name and \ x.attribute == hardware_attribute), \ assembly.configuration.settings) assert len(configurations) == 1 paddr, size = configurations[0].value.strip('"').split(':') # Round up the MMIO size to PAGE_SIZE size = int(size, 16) for idx in range(0, (size + PAGE_SIZE - 1) / PAGE_SIZE): frame_obj = pt[p_index + idx].referent frame_obj.paddr = int(paddr, 16) + PAGE_SIZE * idx cap = Cap(frame_obj, read, write, execute) cap.set_cached(False) pt.slots[p_index + idx] = cap obj_space.relabel(conn_name, frame_obj) continue shm_keys = [] for c in connections: shm_keys.append('%s_%s' % (c.from_instance.name, c.from_interface.name)) shm_keys.append('%s_%s' % (c.to_instance.name, c.to_interface.name)) mapped = filter(lambda x: x in shared_frames, shm_keys) if mapped: # We've already encountered the other side of tnhis dataport. # The region had better be the same size in all address spaces. for key in mapped: assert len(shared_frames[key]) == sz / PAGE_SIZE # This is the first side of this dataport. # Save all the frames backing this region. for key in shm_keys: if mapped: shared_frames[key] = shared_frames[mapped[0]] else: shared_frames[key] = \ map(lambda x: pt[p_index + x].referent, \ range(0, sz / PAGE_SIZE)) # Overwrite the caps backing this region with caps to the shared # frames. Again, note we may not need to do this, but doing it # unconditionally is simpler. for j in range(0, sz / PAGE_SIZE): f = shared_frames[shm_keys[0]][j] pt.slots[p_index + j] = Cap(f, read, write, execute) obj_space.relabel(conn_name, f)
def set_tcb_caps(ast, obj_space, cspaces, elfs, *_): for group, space in cspaces.items(): cnode = space.cnode for index, cap in cnode.slots.items(): if cap is None: continue tcb = cap.referent if not isinstance(tcb, TCB): continue perspective = Perspective(tcb=tcb.name, group=group) # Finalise the CNode so that we know what its absolute size will # be. Note that we are assuming no further caps will be added to # the CNode after this point. cnode.finalise_size() cspace = Cap(cnode) cspace.set_guard_size(32 - cnode.size_bits) tcb['cspace'] = cspace elf_name = perspective['elf_name'] pd = None pd_name = perspective['pd'] pds = filter(lambda x: x.name == pd_name, obj_space.spec.objs) if len(pds) > 1: raise Exception('Multiple PDs found for %s' % group) elif len(pds) == 1: pd, = pds tcb['vspace'] = Cap(pd) # If no PD was found we were probably just not passed any ELF files # in this pass. if perspective['pool']: # This TCB is part of the (cap allocator's) TCB pool. continue elf = elfs.get(elf_name) if pd and elf: ipc_symbol = perspective['ipc_buffer_symbol'] # Find the IPC buffer's virtual address. assert get_symbol_size(elf, ipc_symbol) == PAGE_SIZE * 3 ipc_vaddr = get_symbol_vaddr(elf, ipc_symbol) + PAGE_SIZE # Relate this virtual address to a PT. pt_index = page_table_index(get_elf_arch(elf), ipc_vaddr) if pt_index not in pd: raise Exception('IPC buffer of TCB %s in group %s does ' \ 'not appear to be backed by a frame' % (tcb.name, group)) pt = pd[pt_index].referent # Continue on to infer the physical frame. p_index = page_index(get_elf_arch(elf), ipc_vaddr) if p_index not in pt: raise Exception('IPC buffer of TCB %s in group %s does ' \ 'not appear to be backed by a frame' % (tcb.name, group)) frame = pt[p_index].referent tcb['ipc_buffer_slot'] = Cap(frame, True, True, True) # RWG
def set_tcb_caps(ast, obj_space, cspaces, elfs, options, **_): assembly = find_assembly(ast) for group, space in cspaces.items(): cnode = space.cnode for index, tcb in [(k, v.referent) for (k, v) in cnode.slots.items() \ if v is not None and isinstance(v.referent, TCB)]: perspective = Perspective(tcb=tcb.name, group=group) # Finalise the CNode so that we know what its absolute size will # be. Note that we are assuming no further caps will be added to # the CNode after this point. cnode.finalise_size() # Allow the user to override CNode sizes with the 'cnode_size_bits' # attribute. cnode_size = assembly.configuration[group].get('cnode_size_bits') if cnode_size is not None: try: if isinstance(cnode_size, str): size = int(cnode_size, 0) else: size = cnode_size except ValueError: raise Exception('illegal value for CNode size for %s' % \ group) if size < cnode.size_bits: raise Exception('%d-bit CNode specified for %s, but this ' \ 'CSpace needs to be at least %d bits' % \ (size, group, cnode.size_bits)) cnode.size_bits = size cspace = Cap(cnode) cspace.set_guard_size(32 - cnode.size_bits) tcb['cspace'] = cspace elf_name = perspective['elf_name'] pd = None pd_name = perspective['pd'] pds = [x for x in obj_space.spec.objs if x.name == pd_name] if len(pds) > 1: raise Exception('Multiple PDs found for %s' % group) elif len(pds) == 1: pd, = pds tcb['vspace'] = Cap(pd) # If no PD was found we were probably just not passed any ELF files # in this pass. if perspective['pool']: # This TCB is part of the (cap allocator's) TCB pool. continue elf = elfs.get(elf_name) if pd and elf: ipc_symbol = perspective['ipc_buffer_symbol'] # Find the IPC buffer's virtual address. assert get_symbol_size(elf, ipc_symbol) == PAGE_SIZE * 3 ipc_vaddr = get_symbol_vaddr(elf, ipc_symbol) + PAGE_SIZE # Relate this virtual address to a PT. pt_index = page_table_index(get_elf_arch(elf), ipc_vaddr, options.hyp) if pt_index not in pd: raise Exception('IPC buffer of TCB %s in group %s does ' \ 'not appear to be backed by a frame' % (tcb.name, group)) pt = pd[pt_index].referent # Continue on to infer the physical frame. p_index = page_index(get_elf_arch(elf), ipc_vaddr, options.hyp) if p_index not in pt: raise Exception('IPC buffer of TCB %s in group %s does ' \ 'not appear to be backed by a frame' % (tcb.name, group)) frame = pt[p_index].referent tcb['ipc_buffer_slot'] = Cap(frame, True, True, False) # RW
def collapse_shared_frames(ast, obj_space, elfs, options, **_): """Find regions in virtual address spaces that are intended to be backed by shared frames and adjust the capability distribution to reflect this.""" if not elfs: # If we haven't been passed any ELF files this step is not relevant yet. return assembly = find_assembly(ast) # We want to track the frame objects backing shared regions with a dict # keyed on the name of the connection linking the regions. shared_frames = {} for i in (x for x in assembly.composition.instances if not x.type.hardware): perspective = Perspective(instance=i.name, group=i.address_space) elf_name = perspective['elf_name'] assert elf_name in elfs elf = elfs[elf_name] # Find this instance's page directory. pd_name = perspective['pd'] pds = [x for x in obj_space.spec.objs if x.name == pd_name] assert len(pds) == 1 pd, = pds large_frame_uid = 0 for d in i.type.dataports: # Find the connection that associates this dataport with another. connections = [x for x in assembly.composition.connections if \ ((x.from_instance == i and x.from_interface == d) or \ (x.to_instance == i and x.to_interface == d))] if len(connections) == 0: # This dataport is unconnected. continue #assert len(connections) == 1 conn_name = connections[0].name if connections[0].from_instance == i and \ connections[0].from_interface == d: direction = 'from' else: assert connections[0].to_instance == i assert connections[0].to_interface == d direction = 'to' # Reverse the logic in the Makefile template. p = Perspective(instance=i.name, dataport=d.name) sym = p['dataport_symbol'] vaddr = get_symbol_vaddr(elf, sym) assert vaddr is not None, 'failed to find dataport symbol \'%s\'' \ ' in ELF %s' % (sym, elf_name) assert vaddr != 0 assert vaddr % PAGE_SIZE == 0, 'dataport not page-aligned' sz = get_symbol_size(elf, sym) assert sz != 0 arch = get_elf_arch(elf) # Infer the page table(s) and page(s) that back this region. pts, p_indices = zip(*[\ (pd[page_table_index(arch, v, options.hyp)].referent, page_index(arch, v, options.hyp)) \ for v in xrange(vaddr, vaddr + sz, PAGE_SIZE)]) # Determine the rights this mapping should have. We use these to # recreate the mapping below. Technically we may not need to # recreate this mapping if it's already correct, but do it anyway # for simplicity. # FIXME: stop hard coding this name mangling. rights_setting = assembly.configuration[conn_name].get('%s_access' % direction) if rights_setting is not None and \ re.match(r'^"R?W?(G|X)?"$', rights_setting): read = 'R' in rights_setting write = 'W' in rights_setting execute = 'X' in rights_setting or 'G' in rights_setting else: # default read = True write = True execute = False # Check if the dataport is connected *TO* a hardware component. if connections[0].to_instance.type.hardware: p = Perspective(to_interface=connections[0].to_interface.name) hardware_attribute = p['hardware_attribute'] conf = assembly.configuration[connections[0].to_instance.name].get(hardware_attribute) assert conf is not None paddr, size = conf.strip('"').split(':') # Round up the MMIO size to PAGE_SIZE paddr = int(paddr, 0) size = int(size, 0) instance_name = connections[0].to_instance.name if size == 0: raise Exception('Hardware dataport %s.%s has zero size!' % (instance_name, connections[0].to_interface.name)) # determine the size of a large frame, and the type of kernel # object that will be used, both of which depend on the architecture if get_elf_arch(elf) == 'ARM': large_size = 1024 * 1024 large_object_type = seL4_ARM_SectionObject else: large_size = 4 * 1024 * 1024 large_object_type = seL4_IA32_4M # Check if MMIO start and end is aligned to page table coverage. # This will indicate that we should use pagetable-sized pages # to back the device region to be consistent with the kernel. if paddr % large_size == 0 and size % large_size == 0: # number of page tables backing device memory n_pts = size / large_size # index of first page table in page directory backing the device memory base_pt_index = page_table_index(get_elf_arch(elf), vaddr) pt_indices = xrange(base_pt_index, base_pt_index + n_pts) # loop over all the page table indices and replace the page tables # with large frames for count, pt_index in enumerate(pt_indices): # look up the page table at the current index pt = pd[pt_index].referent name = 'large_frame_%s_%d' % (instance_name, large_frame_uid) large_frame_uid += 1 frame_paddr = paddr + large_size * count # allocate a new large frame frame = obj_space.alloc(large_object_type, name, paddr=frame_paddr) # insert the frame cap into the page directory frame_cap = Cap(frame, read, write, execute) frame_cap.set_cached(False) pd[pt_index] = frame_cap # remove all the small frames from the spec for p_index in pt: small_frame = pt[p_index].referent obj_space.remove(small_frame) # remove the page table from the spec obj_space.remove(pt) else: # If the MMIO start and end are not aligned to page table coverage, # loop over all the frames and set their paddrs based on the # paddr in the spec. for idx in xrange(0, (size + PAGE_SIZE - 1) / PAGE_SIZE): try: frame_obj = pts[idx][p_indices[idx]].referent except IndexError: raise Exception('MMIO attributes specify device ' \ 'memory that is larger than the dataport it is ' \ 'associated with') frame_obj.paddr = paddr + PAGE_SIZE * idx cap = Cap(frame_obj, read, write, execute) cap.set_cached(False) pts[idx].slots[p_indices[idx]] = cap obj_space.relabel(conn_name, frame_obj) continue shm_keys = [] for c in connections: shm_keys.append('%s_%s' % (c.from_instance.name, c.from_interface.name)) shm_keys.append('%s_%s' % (c.to_instance.name, c.to_interface.name)) mapped = [x for x in shm_keys if x in shared_frames] if mapped: # We've already encountered the other side of this dataport. # The region had better be the same size in all address spaces. for key in mapped: assert len(shared_frames[key]) == sz / PAGE_SIZE # This is the first side of this dataport. # Save all the frames backing this region. for key in shm_keys: if mapped: shared_frames[key] = shared_frames[mapped[0]] else: shared_frames[key] = [pt[p_index].referent \ for (pt, p_index) in zip(pts, p_indices)] # Overwrite the caps backing this region with caps to the shared # frames. for j, f in enumerate(shared_frames[shm_keys[0]]): existing = pts[j].slots[p_indices[j]].referent if existing != f: # We're actually modifying this mapping. Delete the # unneeded frame. obj_space.remove(existing) pts[j].slots[p_indices[j]] = Cap(f, read, write, execute) obj_space.relabel(conn_name, f)
def collapse_shared_frames(ast, obj_space, elfs, shmem, options, **_): """Find regions in virtual address spaces that are intended to be backed by shared frames and adjust the capability distribution to reflect this.""" if not elfs: # If we haven't been passed any ELF files this step is not relevant yet. return #for i in ast._items: #print ("rpc start") #print (i.__dict__) #print ("rpc end") arch = lookup_architecture(options.architecture) assembly = ast.assembly for window, mappings in shmem.items(): frames = None exact_frames = False # If the shared variable has an associated set of backing frames # allocated already (ie. allocated in a template), look it up # before collapsing the shared variable. for mapping in mappings.values(): for _, _, _, prealloc_frames, _ in mapping: if prealloc_frames is not None: assert frames is None, 'Multiple sides of shared memory with' \ 'preallocated frames for shared variable "%s"' % window frames = prealloc_frames exact_frames = True for cnode, local_mappings in mappings.items(): for sym, permissions, paddr, _, cached_hw in local_mappings: perspective = Perspective(cnode=cnode) # Find this instance's ELF file. elf_name = perspective['elf_name'] assert elf_name in elfs elf = elfs[elf_name] # Find this instance's page directory. pd_name = perspective['pd'] pds = [x for x in obj_space.spec.objs if x.name == pd_name] assert len(pds) == 1 pd = pds[0] # Look up the ELF-local version of this symbol. vaddr = get_symbol_vaddr(elf, sym) assert vaddr is not None, 'shared symbol \'%s\' not found in ' \ 'ELF %s (template bug?)' % (sym, elf_name) assert vaddr != 0, 'shared symbol \'%s\' located at NULL in ELF ' \ '%s (template bug?)' % (sym, elf_name) assert vaddr % PAGE_SIZE == 0, 'shared symbol \'%s\' not ' \ 'page-aligned in ELF %s (template bug?)' % (sym, elf_name) size = get_symbol_size(elf, sym) assert size != 0, 'shared symbol \'%s\' has size 0 in ELF %s ' \ '(template bug?)' % (sym, elf_name) assert size % PAGE_SIZE == 0, 'shared symbol \'%s\' in ELF %s ' \ 'has a size that is not page-aligned (template bug?)' % \ (sym, elf_name) # Infer the page table(s) and small page(s) that currently back this # region. map_indices = [ make_indices(arch, v, PAGE_SIZE) for v in six.moves.range(vaddr, vaddr + size, PAGE_SIZE) ] # Permissions that we will apply to the eventual mapping. read = 'R' in permissions write = 'W' in permissions execute = 'X' in permissions largest_frame_size, level_num = find_optimal_frame_size( arch, vaddr, size) if frames is None: # First iteration of the loop; we need to discover the backing # frames for this region. frames = [] # We want to derive large frames if (a) this window is device # registers and large-frame-sized (in which case the kernel # will have created it as large frames) or (b) the user has # requested large frame promotion. if largest_frame_size != PAGE_SIZE and ( options.largeframe or paddr is not None): # Grab a copy of the frame for every entry we're going to end up making new_frames = {} for new_vaddr in six.moves.range( vaddr, vaddr + size, largest_frame_size): new_frames[new_vaddr] = obj_space.alloc( seL4_FrameObject, size=largest_frame_size) # Iterate over every unique index in every object below this one delete_small_frames(arch, obj_space, pd, level_num, map_indices) # Now insert the new frames for new_vaddr in six.moves.range( vaddr, vaddr + size, largest_frame_size): frame = new_frames[new_vaddr] cap = Cap(frame, read, write, execute) if paddr is not None: frame.paddr = paddr + (new_vaddr - vaddr) cap.set_cached(cached_hw) update_frame_in_vaddr(arch, pd, new_vaddr, largest_frame_size, cap) frames.append(frame) else: # We don't need to handle large frame promotion. Just tweak # the permissions and optionally the physical address of # all the current mappings. for offset, indices in enumerate(map_indices): (cap, frame) = lookup_vspace_indices(pd, indices) cap.read = read cap.write = write cap.grant = execute if paddr is not None: frame.paddr = paddr + offset * PAGE_SIZE cap.set_cached(cached_hw) frames.append(frame) else: # We have already discovered frames to back this region and now # we just need to adjust page table mappings. assert size == sum(f.size for f in frames), 'mismatched ' \ 'sizes of shared region \'%s\' (template bug?)' % window if not exact_frames: # We do not need to preserve the exact same frames / frame sizings, so # we can delete the entire region ready to put in our new frames # Delete all the underlying frames / objects for this range delete_small_frames(arch, obj_space, pd, level_num, map_indices) offset = 0 for frame in frames: cap = Cap(frame, read, write, execute) if paddr is not None: cap.set_cached(cached_hw) if exact_frames: # If we need to preserve the exact frames then we need to clear # the range for each frame individually, up to the required level # for that frame. This is to allow for 'weird' shared memory regions # that have preallocated frames with different sized frames in # the one region. frame_map_indices = [ make_indices(arch, v, PAGE_SIZE) for v in six.moves.range( vaddr + offset, vaddr + offset + frame.size, PAGE_SIZE) ] _, frame_level_num = find_optimal_frame_size( arch, 0, frame.size) delete_small_frames(arch, obj_space, pd, frame_level_num, frame_map_indices) # Now, with exact_frames or not, we know that the slot for this frame is # free and we can re-insert the correct frame update_frame_in_vaddr(arch, pd, vaddr + offset, frame.size, cap) offset = offset + frame.size
def guard_pages(obj_space, cspaces, elfs, options, **_): '''Introduce a guard page around each stack and IPC buffer. Note that the templates should have ensured a three page region for each stack in order to enable this.''' for group, space in cspaces.items(): cnode = space.cnode for index, tcb in [(k, v.referent) for (k, v) in cnode.slots.items() \ if v is not None and isinstance(v.referent, TCB)]: perspective = Perspective(group=group, tcb=tcb.name) if perspective['pool']: # This TCB is part of the (cap allocator's) TCB pool. continue elf_name = perspective['elf_name'] # Find the page directory. pd = None pd_name = perspective['pd'] pds = [x for x in obj_space.spec.objs if x.name == pd_name] if len(pds) > 1: raise Exception('Multiple PDs found for group %s' % group) elif len(pds) == 1: pd, = pds tcb['vspace'] = Cap(pd) # If no PD was found we were probably just not passed any ELF files # in this pass. elf = elfs.get(elf_name) if pd and elf: ipc_symbol = perspective['ipc_buffer_symbol'] # Find the IPC buffer's preceding guard page's virtual address. assert get_symbol_size(elf, ipc_symbol) == PAGE_SIZE * 3 pre_guard = get_symbol_vaddr(elf, ipc_symbol) # Relate this virtual address to a PT. pt_index = page_table_index(get_elf_arch(elf), pre_guard, options.hyp) if pt_index not in pd: raise Exception('IPC buffer region of TCB %s in ' \ 'group %s does not appear to be backed by a frame' \ % (tcb.name, group)) pt = pd[pt_index].referent # Continue on to infer the page. p_index = page_index(get_elf_arch(elf), pre_guard, options.hyp) if p_index not in pt: raise Exception('IPC buffer region of TCB %s in ' \ 'group %s does not appear to be backed by a frame' \ % (tcb.name, group)) # Delete the page. frame = pt[p_index].referent del pt[p_index] obj_space.remove(frame) # Now do the same for the following guard page. We do this # calculation separately just in case the region crosses a PT # boundary and the two guard pages are in separate PTs. post_guard = pre_guard + 2 * PAGE_SIZE pt_index = page_table_index(get_elf_arch(elf), post_guard, options.hyp) if pt_index not in pd: raise Exception('IPC buffer region of TCB %s in ' \ 'group %s does not appear to be backed by a frame' \ % (tcb.name, group)) pt = pd[pt_index].referent p_index = page_index(get_elf_arch(elf), post_guard, options.hyp) if p_index not in pt: raise Exception('IPC buffer region of TCB %s in ' \ 'group %s does not appear to be backed by a frame' \ % (tcb.name, group)) frame = pt[p_index].referent del pt[p_index] obj_space.remove(frame) # Now we do the same thing for the preceding guard page of the # thread's stack... stack_symbol = perspective['stack_symbol'] pre_guard = get_symbol_vaddr(elf, stack_symbol) pt_index = page_table_index(get_elf_arch(elf), pre_guard, options.hyp) if pt_index not in pd: raise Exception('stack region of TCB %s in ' \ 'group %s does not appear to be backed by a frame' \ % (tcb.name, group)) pt = pd[pt_index].referent p_index = page_index(get_elf_arch(elf), pre_guard, options.hyp) if p_index not in pt: raise Exception('stack region of TCB %s in ' \ 'group %s does not appear to be backed by a frame' \ % (tcb.name, group)) frame = pt[p_index].referent del pt[p_index] obj_space.remove(frame) # ...and the following guard page. stack_region_size = get_symbol_size(elf, stack_symbol) assert stack_region_size % PAGE_SIZE == 0, \ 'stack region is not page-aligned' assert stack_region_size >= 3 * PAGE_SIZE, \ 'stack region has no room for guard pages' post_guard = pre_guard + stack_region_size - PAGE_SIZE pt_index = page_table_index(get_elf_arch(elf), post_guard, options.hyp) if pt_index not in pd: raise Exception('stack region of TCB %s in ' \ 'group %s does not appear to be backed by a frame' \ % (tcb.name, group)) pt = pd[pt_index].referent p_index = page_index(get_elf_arch(elf), post_guard, options.hyp) if p_index not in pt: raise Exception('stack region of TCB %s in ' \ 'group %s does not appear to be backed by a frame' \ % (tcb.name, group)) frame = pt[p_index].referent del pt[p_index] obj_space.remove(frame)
def collapse_shared_frames(ast, obj_space, cspaces, elfs, options, **_): """Find regions in virtual address spaces that are intended to be backed by shared frames and adjust the capability distribution to reflect this.""" if not elfs: # If we haven't been passed any ELF files this step is not relevant yet. return assembly = find_assembly(ast) # We want to track the frame objects backing shared regions with a dict # keyed on the name of the connection linking the regions. shared_frames = {} for i in (x for x in assembly.composition.instances if not x.type.hardware): perspective = Perspective(instance=i.name, group=i.address_space) elf_name = perspective['elf_name'] assert elf_name in elfs elf = elfs[elf_name] # Find this instance's page directory. pd_name = perspective['pd'] pds = [x for x in obj_space.spec.objs if x.name == pd_name] assert len(pds) == 1 pd, = pds for d in i.type.dataports: # Find the connection that associates this dataport with another. connections = [x for x in assembly.composition.connections if \ ((x.from_instance == i and x.from_interface == d) or \ (x.to_instance == i and x.to_interface == d))] if len(connections) == 0: # This dataport is unconnected. continue #assert len(connections) == 1 conn_name = connections[0].name if connections[0].from_instance == i and \ connections[0].from_interface == d: direction = 'from' else: assert connections[0].to_instance == i assert connections[0].to_interface == d direction = 'to' # Reverse the logic in the Makefile template. p = Perspective(instance=i.name, dataport=d.name) sym = p['dataport_symbol'] vaddr = get_symbol_vaddr(elf, sym) assert vaddr is not None, 'failed to find dataport symbol \'%s\'' \ ' in ELF %s' % (sym, elf_name) assert vaddr != 0 assert vaddr % PAGE_SIZE == 0, 'dataport %s not page-aligned' % sym sz = get_symbol_size(elf, sym) assert sz != 0 # Infer the page table(s) and page(s) that back this region. pts, p_indices = zip(*[\ (pd[page_table_index(options.architecture, v)].referent, page_index(options.architecture, v)) \ for v in xrange(vaddr, vaddr + sz, PAGE_SIZE)]) # Determine the rights this mapping should have. We use these to # recreate the mapping below. Technically we may not need to # recreate this mapping if it's already correct, but do it anyway # for simplicity. # FIXME: stop hard coding this name mangling. rights_setting = assembly.configuration[conn_name].get('%s_access' % direction) if rights_setting is not None and \ re.match(r'^"R?W?(G|X)?"$', rights_setting): read = 'R' in rights_setting write = 'W' in rights_setting execute = 'X' in rights_setting or 'G' in rights_setting else: # default read = True write = True execute = False # Check if the dataport is connected *TO* a hardware component. if connections[0].to_instance.type.hardware: p = Perspective(to_interface=connections[0].to_interface.name) hardware_attribute = p['hardware_attribute'] conf = assembly.configuration[connections[0].to_instance.name].get(hardware_attribute) assert conf is not None, "%s.%s not found in configuration" % \ (connections[0].to_instance.name, hardware_attribute) paddr, size = conf.strip('"').split(':') # Round up the MMIO size to PAGE_SIZE try: paddr = int(paddr, 0) except ValueError: raise Exception("Invalid physical address specified for %s.%s: %s\n" % (me.to_instance.name, me.to_interface.name, paddr)) try: size = int(size, 0) except ValueError: raise Exception("Invalid size specified for %s.%s: %s\n" % (me.to_instance.name, me.to_interface.name, size)) hardware_cached = p['hardware_cached'] cached = assembly.configuration[connections[0].to_instance.name].get(hardware_cached) if cached is None: cached = False elif cached.lower() == 'true': cached = True elif cached.lower() == 'false': cached = False else: raise Exception("Value of %s.%s_cached must be either 'true' or 'false'. Got '%s'." % (me.to_instance.name, me.to_interface.name, cached)) instance_name = connections[0].to_instance.name if size == 0: raise Exception('Hardware dataport %s.%s has zero size!' % (instance_name, connections[0].to_interface.name)) # determine the size of a large frame, and the type of kernel # object that will be used, both of which depend on the architecture if get_elf_arch(elf) == 'ARM': large_size = 1024 * 1024 large_object_type = seL4_ARM_SectionObject else: large_size = 4 * 1024 * 1024 large_object_type = seL4_IA32_4M # Check if MMIO start and end is aligned to page table coverage. # This will indicate that we should use pagetable-sized pages # to back the device region to be consistent with the kernel. if paddr % large_size == 0 and size % large_size == 0: # number of page tables backing device memory n_pts = size / large_size # index of first page table in page directory backing the device memory base_pt_index = page_table_index(options.architecture, vaddr) pt_indices = xrange(base_pt_index, base_pt_index + n_pts) # loop over all the page table indices and replace the page tables # with large frames for count, pt_index in enumerate(pt_indices): # look up the page table at the current index pt = pd[pt_index].referent offset = count * large_size frame_paddr = paddr + offset # lookup the frame, already allocated by a template frame_cap = find_hardware_frame_in_cspace( cspaces[i.address_space], frame_paddr, connections[0].to_instance.name, connections[0].to_interface.name) frame_obj = frame_cap.referent # create a new cap for the frame to use for its mapping mapping_frame_cap = Cap(frame_obj, read, write, execute) mapping_frame_cap.set_cached(cached) # add the mapping to the spec pd[pt_index] = mapping_frame_cap # add the mapping information to the original cap frame_cap.set_mapping(pd, pt_index) # remove all the small frames from the spec for p_index in pt: small_frame = pt[p_index].referent obj_space.remove(small_frame) # remove the page table from the spec obj_space.remove(pt) else: # If the MMIO start and end are not aligned to page table coverage, # loop over all the frames and set their paddrs based on the # paddr in the spec. for idx in xrange(0, (size + PAGE_SIZE - 1) / PAGE_SIZE): try: frame_obj = pts[idx][p_indices[idx]].referent except IndexError: raise Exception('MMIO attributes specify device ' \ 'memory that is larger than the dataport it is ' \ 'associated with') offset = idx * PAGE_SIZE frame_paddr = paddr + offset # lookup the frame, already allocated by a template frame_cap = find_hardware_frame_in_cspace( cspaces[i.address_space], frame_paddr, connections[0].to_instance.name, connections[0].to_interface.name) frame_obj = frame_cap.referent # create a new cap for the frame to use for its mapping mapping_frame_cap = Cap(frame_obj, read, write, execute) mapping_frame_cap.set_cached(cached) # add the mapping to the spec pt = pts[idx] slot = p_indices[idx] pt.slots[slot] = mapping_frame_cap # add the mapping information to the original cap frame_cap.set_mapping(pt, slot) obj_space.relabel(conn_name, frame_obj) continue # If any objects still have names indicating they are part of a hardware # dataport, it means that dataport hasn't been given a paddr or size. # This indicates an error, and the object name is invalid in capdl, # so catch the error here rather than having the capdl translator fail. for cap in (v for v in cspaces[i.address_space].cnode.slots.values() if v is not None): obj = cap.referent match = HARDWARE_FRAME_NAME_PATTERN.match(obj.name) assert (match is None or match.group(2) != connections[0].to_instance.name), \ "Missing hardware attributes for %s.%s" % (match.group(2), match.group(3)) shm_keys = [] for c in connections: shm_keys.append('%s_%s' % (c.from_instance.name, c.from_interface.name)) shm_keys.append('%s_%s' % (c.to_instance.name, c.to_interface.name)) mapped = [x for x in shm_keys if x in shared_frames] if mapped: # We've already encountered the other side of this dataport. # The region had better be the same size in all address spaces. for key in mapped: assert len(shared_frames[key]) == sz / PAGE_SIZE # This is the first side of this dataport. # Save all the frames backing this region. for key in shm_keys: if mapped: shared_frames[key] = shared_frames[mapped[0]] else: shared_frames[key] = [pt[p_index].referent \ for (pt, p_index) in zip(pts, p_indices)] # Overwrite the caps backing this region with caps to the shared # frames. for j, f in enumerate(shared_frames[shm_keys[0]]): existing = pts[j].slots[p_indices[j]].referent if existing != f: # We're actually modifying this mapping. Delete the # unneeded frame. obj_space.remove(existing) pts[j].slots[p_indices[j]] = Cap(f, read, write, execute) obj_space.relabel(conn_name, f)