def root_mappings(self): if self.file_type == Image.ROOT_PROGRAM: # FIXME (benno) 0x1000 is hard coded, bad! virt_addr = align_down(self.attrs.virt_addr, 0x1000) phys_addr = align_down(self.attrs.phys_addr, 0x1000) if (self.attrs.virt_addr - virt_addr) != \ (self.attrs.phys_addr - phys_addr): raise Exception("Non congruent segment mappings, can't deal") size = self.attrs.size + self.attrs.virt_addr - virt_addr return (virt_addr, phys_addr, size) else: return None
def mark(self, base, end): """ mark(base, end) -> base Remove the given range from the free list. This is used to record were fixed address objects are located. An exception is raised if the range crosses a free/used border. It is *not* an error if the range is wholly outside the freelist. """ base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) - 1 # if self is phys_alloc: # print "Mark: 0x%x 0x%x" % (base, end) for (free_start, free_end) in self.freelist: # if used range is in this free range if base >= free_start and base <= free_end: # check used block not going past free range if end > free_end: raise AllocatorException, "Used block (0x%x, 0x%x) outside avail range (0x%x, 0x%x)." % ( base, end, free_start, free_end) index = self.freelist.index((free_start, free_end)) self.freelist.remove((free_start, free_end)) # align ranges to page size free_start = align_down(free_start, self.min_alloc) free_end = align_up(free_end, self.min_alloc) base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) # need to divide free range into zero, one or two # new ranges if free_start < base: self.freelist.insert(index, (free_start, base - 1)) index += 1 if end < free_end: self.freelist.insert(index, (end, free_end - 1)) # if self is phys_alloc: # print " =free:", [(hex(x), hex(y)) for (x, y) in self.freelist] return base
def mark(self, base, end): """ mark(base, end) -> base Remove the given range from the free list. This is used to record were fixed address objects are located. An exception is raised if the range crosses a free/used border. It is *not* an error if the range is wholly outside the freelist. """ base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc)-1 # if self is phys_alloc: # print "Mark: 0x%x 0x%x" % (base, end) for (free_start, free_end) in self.freelist: # if used range is in this free range if base >= free_start and base <= free_end: # check used block not going past free range if end > free_end: raise AllocatorException, "Used block (0x%x, 0x%x) outside avail range (0x%x, 0x%x)." % (base, end, free_start, free_end) index = self.freelist.index((free_start, free_end)) self.freelist.remove((free_start, free_end)) # align ranges to page size free_start = align_down(free_start, self.min_alloc) free_end = align_up(free_end, self.min_alloc) base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) # need to divide free range into zero, one or two # new ranges if free_start < base: self.freelist.insert(index, (free_start, base-1)) index += 1 if end < free_end: self.freelist.insert(index, (end, free_end-1)) # if self is phys_alloc: # print " =free:", [(hex(x), hex(y)) for (x, y) in self.freelist] return base
def write_struct(self, section): """Write the binary form of the segment mapping struct.""" section.write_word(len(self.mappings)) for (num, _, attrs) in self.mappings: # # Align the segments to nearest page boundaries. If we have to move # 'virt_addr' backwards, we need to compensate by increasing 'size' # by the same amount. # # This is also done in kernel.py when writing out kernel mapping # operations. # if attrs.virt_addr is None: virt_addr = -1 size = _0(attrs.size) else: virt_addr = align_down(_0(attrs.virt_addr), self.min_page_size) alignment_slack = _0(attrs.virt_addr) - virt_addr size = _0(attrs.size) + alignment_slack size = align_up(size, self.min_page_size) section.write_word(virt_addr) section.write_word(num) # seg section.write_word(0) # offset section.write_word(size) section.write_word(attrs.attach) section.write_word(attrs.cache_policy)
def add_memory(self, base, size, mem_type): """ Add a region of free memory to the pool. The memory must be ia multiple of <min_alloc_size> bytes in size, and aligned on a <min_alloc_size> boundary. Memory addresses may only be added to the allocator once. """ # Check that the memory is page aligned. assert align_down(base, self.min_alloc) == base assert size % self.min_alloc == 0 end = base + size - 1 # Check that the memory does not overlap any of the regions # already in use. for list_base, list_end, list_mem_type in self.fulllist: if (base >= list_base and base <= list_end) or \ (end >= list_base and end <= list_end): raise AllocatorException, \ "Cannot add overlapping memory regions to the " \ "Allocator. Address (0x%x--0x%x) already in " \ "(0x%x--0x%x)" % \ (base, end, list_base, list_end) self.fulllist.append((base, end, mem_type)) self.freelist.append((base, end, mem_type)) self.sort()
def _prepare_segments(self, ofs, sh_string_table, all_sections): """Prepare the segments in the file for writing.""" ph_class = ELF_PH_CLASSES[self.wordsize] ph_header_size = ph_class.size() * len(self.segments) ofs += ph_header_size for segment in self.segments: #round down new_ofs = align_down(ofs, segment.align) if segment.align != 0: new_ofs += segment.vaddr % segment.align # add extra if new_ofs <= ofs: new_ofs += segment.align assert new_ofs >= ofs, "New offset must be greater " \ "than the old offset" # check + add pagesize ofs = new_ofs if segment.align != 0: assert ofs % segment.align == segment.vaddr % segment.align, \ "Must be congruent" if segment.type == PT_PHDR: segment.prepare(ofs, ph_header_size) else: segment.prepare(ofs) # Now can prepare the sections inside it if it has any if segment.has_sections(): segment.prepare_sections(all_sections, sh_string_table) ofs += segment.get_filesz() return ofs
def __init__(self, segment, segment_index, file_type, attrs, pools): ImageObject.__init__(self, attrs) self.segment = segment self.segment_index = segment_index self.file_type = file_type self.attrs.size = segment.get_memsz() # Set direct addressing, if that's what's wanted. if self.attrs.direct: self.attrs.phys_addr = segment.vaddr # Declare the segment's physical memory range in use. marked = pools.mark_physical(self.attrs.abs_name(), self.attrs.phys_addr, self.attrs.size) if self.attrs.phys_addr is not None and not marked: raise MergeError, \ 'Segment "%s": Cannot reserve physical addresses ' \ '%#x--%#x.' % \ (self.attrs.abs_name(), self.attrs.phys_addr, self.attrs.phys_addr + self.attrs.size - 1) # If it's a protected segment, reserve everything in the same SECTION if self.attrs.protected: base = align_down(segment.vaddr, 1024 * 1024) size = align_up(self.attrs.size + base - segment.vaddr, 1024 * 1024) else: base = segment.vaddr size = self.attrs.size pools.mark_virtual(self.attrs.abs_name(), base, size)
def __init__(self, segment, segment_index, file_type, attrs, pools): ImageObject.__init__(self, attrs) self.segment = segment self.segment_index = segment_index self.file_type = file_type self.attrs.size = segment.get_memsz() # Set direct addressing, if that's what's wanted. if self.attrs.direct: self.attrs.phys_addr = segment.vaddr self.attrs.virtpool = 'direct' # Declare the segment's physical memory range in use. marked = pools.mark_physical(self.attrs.abs_name(), self.attrs.phys_addr, self.attrs.size, self.attrs.cache_policy) if self.attrs.phys_addr is not None and not marked: raise MergeError, \ 'Segment "%s": Cannot reserve physical addresses ' \ '%#x--%#x.' % \ (self.attrs.abs_name(), self.attrs.phys_addr, self.attrs.phys_addr + self.attrs.size - 1) # If it's a protected segment, reserve everything in the same SECTION if self.attrs.protected: base = align_down(segment.vaddr, 1024 * 1024) size = align_up(self.attrs.size + base - segment.vaddr, 1024 * 1024) else: base = segment.vaddr size = self.attrs.size pools.mark_virtual(self.attrs.abs_name(), base, size, self.attrs.cache_policy)
def __simple_alloc(self, freelist, size, alignment, offset): """ Allocate a single block of memory or raise an exception. Search for a place in the freelist to allocate the requested block of memory. Returns a tuple with the following values: - before_free - The freelist up to the place where the memory was allocated. - after_free - The freelist after the place where the memory was allocated - alloc_start - The allocated addresses. The free lists are not necessarily page aligned. Returning the tuple allows the abort and retry semantics of group allocation to be implemented. """ before_free = [] after_free = [] free_iter = iter(freelist) # Search the freelist for a suitable block. for (free_start, free_end, mem_type) in free_iter: # Calculate the proposed address. alloc_start = align_down(free_start, alignment) + offset alloc_end = alloc_start + size - 1 # If alignment adjustments push the block below the # start of the free region, bump it up. if alloc_start < free_start: alloc_start += alignment alloc_end += alignment # If the range is within free memory, we've found it. if alloc_start >= free_start and alloc_end <= free_end: # Put the remaining parts of the region back into the # correct freelists. if free_start < alloc_start - 1: before_free.append((free_start, alloc_start - 1, mem_type)) if alloc_end + 1 < free_end: after_free.append((alloc_end + 1, free_end, mem_type)) break else: # Not useful, so add to the before list. before_free.append((free_start, free_end, mem_type)) else: # Abort if nothing suitable was found. raise AllocatorException, "Out of memory" # Copy any remaining free list records into the after_freelist for curr_free in free_iter: after_free.append(curr_free) return (before_free, after_free, alloc_start, mem_type)
def prime_windows(self, pool, ranges): # Sort ranges to find those that are in the same window. ranges.sort(key=lambda x: x[0]) # Hack: Fails if there is more than one range in a single window. holes = [] hole_base = None window_size = None for (base, size) in ranges: range_base = align_down(base, self.WINDOW_SIZE) if hole_base is None: assert len(holes) == 0 hole_base = range_base window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes.append((base, size)) elif range_base == hole_base: window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes.append((base, size)) else: assert len(holes) is not 0 window_size = align_up(size, self.WINDOW_SIZE) free_mem = \ self.pool.mark_window(hole_base, window_size, holes) # Add the free parts of the window to our freelist. for (free_base, free_end, mem_type) in free_mem: self.add_memory(free_base, free_end - free_base + 1, mem_type) # Add the window to the list of claimed windows. self.windows.append((hole_base, hole_base + self.WINDOW_SIZE - 1)) hole_base = range_base window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes = [(base, size)] # Clean up if hole_base is not None: assert len(holes) is not 0 free_mem = \ pool.mark_window(hole_base, window_size, holes) # Add the free parts of the window to our freelist. for (free_base, free_end, mem_type) in free_mem: self.add_memory(free_base, free_end - free_base + 1, mem_type) # Add the window to the list of claimed windows. self.windows.append((hole_base, hole_base + self.WINDOW_SIZE - 1))
def prime_windows(self, pool, ranges): # Sort ranges to find those that are in the same window. ranges.sort(key=lambda x: x[0]) # Hack: Fails if there is more than one range in a single window. holes = [] hole_base = None window_size = None for (base, size) in ranges: range_base = align_down(base, self.WINDOW_SIZE) if hole_base is None: assert len(holes) == 0 hole_base = range_base window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes.append((base, size)) elif range_base == hole_base: window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes.append((base, size)) else: assert len(holes) is not 0 window_size = align_up(size, self.WINDOW_SIZE) free_mem = \ self.pool.mark_window(hole_base, window_size, holes) # Add the free parts of the window to our freelist. for (free_base, free_end, mem_type) in free_mem: self.add_memory(free_base, free_end - free_base + 1, mem_type) # Add the window to the list of claimed windows. self.windows.append( (hole_base, hole_base + self.WINDOW_SIZE - 1)) hole_base = range_base window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes = [(base, size)] # Clean up if hole_base is not None: assert len(holes) is not 0 free_mem = \ pool.mark_window(hole_base, window_size, holes) # Add the free parts of the window to our freelist. for (free_base, free_end, mem_type) in free_mem: self.add_memory(free_base, free_end - free_base + 1, mem_type) # Add the window to the list of claimed windows. self.windows.append((hole_base, hole_base + self.WINDOW_SIZE - 1))
def mark(self, base, size): """ mark(base, end) -> marked Remove the given range from the free list. This is used to record were fixed address objects are located. Returns whether or not the range was removed from the free list. It is *not* an error if the range is wholly outside the freelist. """ end = base + size - 1 if end < base: raise AllocatorException, \ "Mark end address (0x%x) less than mark base address (0x%x)" \ % (end, base) if base == end: end = end + 1 # Remove all of the pages containing the region. base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) - 1 new_freelist = [] marked = False for free_start, free_end, mem_type in self.freelist: assert free_start % self.min_alloc == 0 assert (free_end + 1) % self.min_alloc == 0 if (free_start <= base and base <= free_end) or \ (base <= free_start and free_start <= end): marked = True # Report the allocation to the tracker, if there is # one. if self.tracker is not None: self.tracker.track_mark(base, end) # Insert into the freelist any remaining parts of the # free memory region. if free_start < base: new_freelist.append((free_start, base - 1, mem_type)) if end < free_end: new_freelist.append((end + 1, free_end, mem_type)) else: new_freelist.append((free_start, free_end, mem_type)) self.freelist = new_freelist return marked
def _prepare_segments(self, ofs, sh_string_table, wordsize, endianess): """Prepare the segments in the file for writing.""" ph_class = ELF_PH_CLASSES[wordsize] ph_header_size = ph_class.size() * len(self.segments) ofs += ph_header_size segments = [] all_sections = self.sections[:] for segment in self.segments: # round down new_ofs = align_down(ofs, segment.align) if segment.align != 0: new_ofs += segment.vaddr % segment.align # add extra if new_ofs <= ofs: new_ofs += segment.align assert new_ofs >= ofs, "New offset must be greater " "than the old offset" # check + add pagesize ofs = new_ofs if segment.align != 0: assert ofs % segment.align == segment.vaddr % segment.align, "Must be congruent" if segment.type == PT_PHDR: segment.prepare(ofs, ph_header_size) else: segment.prepare(ofs) # Now can prepare the sections inside it if it has any # Layout using offsets if the segment is a # scatter-load one, otherwise use virtual addresses. # Note: The sections are sorted by virtual address # but their segment offset is used. This is # probably safe. if segment.has_sections(): sections = [] for section in segment.sections: if section not in all_sections: section = section.prepared_to else: name_offset = sh_string_table.add_string(section.name) sh_index = all_sections.index(section) if segment.is_scatter_load(): if section.get_in_segment_offset() is None: section.set_in_segment_offset(section.address - segment.vaddr) offset = ofs + section.calc_in_segment_offset(segment) else: offset = ofs + (section.address - segment.vaddr) section = section.prepare(offset, sh_index, name_offset, wordsize, endianess) all_sections[sh_index] = section sections.append(section) segment.sections = sections segments.append(segment) ofs += segment.get_filesz() return ofs, segments, all_sections
def create_mapping(self, attrs, remaining_size, page_size, is_minimum): """ Map as many pages for the specified page size. Return the mapping n-tuple and the size of the mapping. If page size is the minimum page size, we need to worry about some extra checks. """ # If minimum page size, we need to consider some extra conditions if is_minimum: phys_addr = align_down(_0(attrs.phys_addr), page_size) virt_addr = align_down(_0(attrs.virt_addr), page_size) # Calculate the shift cause by aligning the phys_addr alignment_diff = 0 if attrs.phys_addr is not None: alignment_diff = attrs.phys_addr - phys_addr size = 0 num_pages = 0 if attrs.size != None: # In certain cases, phys alignments can leave us a # page short. To account for this we add alignment # differences to the size. size = align_up(remaining_size + alignment_diff, page_size) num_pages = size / page_size # for all other pages, we map as many as we can else: phys_addr = _0(attrs.phys_addr) virt_addr = _0(attrs.virt_addr) size = 0 if attrs.size != None: num_pages = remaining_size / page_size size = num_pages * page_size #print "ceating mapping: size %x, pages %x" % (size, num_pages) mapping = (virt_addr, phys_addr, page_size, num_pages, attrs.attach, attrs.cache_policy) return mapping, size
# tracked, but that is: # a) really hard. # b) not what the original version did. if self.tracker is not None and addr != item.get_addr(): self.tracker.track_alloc(item.get_name(), addr, addr + item.get_size() - 1) item.set_addr(addr) # Restore the invariant by removing all partial pages from the # freelist. self.freelist = [] for (base, end, mem_type) in new_freelist: if base % self.min_alloc != 0: base = align_up(base, self.min_alloc) if ((end + 1) % self.min_alloc) != 0: end = align_down(end, self.min_alloc) - 1 # Don't include pages that have been rounded out of # existence. if base < end: assert mem_type is not None self.freelist.append((base, end, mem_type)) def next_avail(self): """ Return the base address of the biggest block of memory in the free list. """ if len(self.freelist) == 0: raise AllocatorException, "next_avail(): Free list empty."
def _prepare_segments(self, ofs, sh_string_table, wordsize, endianess): """Prepare the segments in the file for writing.""" ph_class = ELF_PH_CLASSES[wordsize] ph_header_size = ph_class.size() * len(self.segments) ofs += ph_header_size segments = [] all_sections = self.sections[:] for segment in self.segments: #round down new_ofs = align_down(ofs, segment.align) if segment.align != 0: new_ofs += segment.vaddr % segment.align # add extra if new_ofs <= ofs: new_ofs += segment.align assert new_ofs >= ofs, "New offset must be greater " \ "than the old offset" # check + add pagesize ofs = new_ofs if segment.align != 0: assert ofs % segment.align == segment.vaddr % segment.align, \ "Must be congruent" if segment.type == PT_PHDR: segment.prepare(ofs, ph_header_size) else: segment.prepare(ofs) # Now can prepare the sections inside it if it has any # Layout using offsets if the segment is a # scatter-load one, otherwise use virtual addresses. # Note: The sections are sorted by virtual address # but their segment offset is used. This is # probably safe. if segment.has_sections(): sections = [] for section in segment.sections: if section not in all_sections: section = section.prepared_to else: name_offset = sh_string_table.add_string( section.name) sh_index = all_sections.index(section) if segment.is_scatter_load(): if section.get_in_segment_offset() is None: section.set_in_segment_offset( section.address - segment.vaddr) offset = ofs + section.calc_in_segment_offset( segment) else: offset = ofs + (section.address - segment.vaddr) section = section.prepare(offset, sh_index, name_offset, wordsize, endianess) all_sections[sh_index] = section sections.append(section) segment.sections = sections segments.append(segment) ofs += segment.get_filesz() return ofs, segments, all_sections
def mark(self, base, size, cache_policy): """ mark(base, end) -> marked Remove the given range from the free list. This is used to record were fixed address objects are located. Returns whether or not the range was removed from the free list. It is *not* an error if the range is wholly outside the freelist. """ end = base + size - 1 if end < base: raise AllocatorException, "Mark end address (0x%x) less than mark base address (0x%x)" % (end, base) if base == end: end = end + 1 # Remove all of the pages containing the region. base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) - 1 new_freelist = [] marked = False for free_start, free_end, mem_type in self.freelist: assert free_start % self.min_alloc == 0 assert (free_end + 1) % self.min_alloc == 0 if (free_start <= base <= free_end) or (base <= free_start <= end): marked = True if ( self.tracker is None or not self.tracker.marking_direct ) and not self.machine.check_cache_permissions(cache_policy, mem_type): raise AllocatorException( 'Mark: Cache policy "%s" is not' ' allowed by the cache rights "%s"' % (self.machine.cache_policy_to_str(cache_policy), self.machine.cache_perms_to_str(mem_type)) ) # Report the allocation to the tracker, if there is # one. if self.tracker is not None: self.tracker.track_mark(base, end) # Insert into the freelist any remaining parts of the # free memory region. if free_start < base: new_freelist.append((free_start, base - 1, mem_type)) if end < free_end: new_freelist.append((end + 1, free_end, mem_type)) else: new_freelist.append((free_start, free_end, mem_type)) self.freelist = new_freelist return marked
def create_ops(self, kernel, image, machine): """ Create in init script for Micro kernel initialisation. """ op_list = [] offset = [0] def add_op(op_func, *args): op = op_func(None, None, None, (args), image, machine) op_list.append(op) my_offset = offset[0] offset[0] += op.sizeof() return my_offset f = StringIO() # We just use the cells in order, hopefully the first cell has a # large enough heap for soc/kernel. No longer do sorting cells = kernel.cells.values() ## PHASE ONE ## add_op(InitScriptHeader, []) add_op(InitScriptCreateHeap, _0(cells[0].heap_phys_base), cells[0].heap_size) # Declare total sizes. The must be a minimum of 1. add_op(InitScriptInitIds, max(kernel.total_spaces, 1), max(kernel.total_clists, 1), max(kernel.total_mutexes, 1)) needs_heap = False add_op(InitScriptCreateThreadHandles, _0(kernel.thread_array_base), kernel.thread_array_count) op_list[-1].set_eop() ## PHASE TWO ## for cell in cells: # No need to encode the heap of the first cell. if needs_heap: add_op(InitScriptCreateHeap, _0(cell.heap_phys_base), cell.heap_size) else: needs_heap = True cell.clist_offset = \ add_op(InitScriptCreateClist, cell.clist_id, cell.max_caps) for space in cell.get_static_spaces(): utcb_base = 0xdeadbeef # something obvious if we ever use it! utcb_size = 0x11 if space.utcb is not None: utcb_base = space.utcb.virt_addr if utcb_base is None: utcb_base = 0 utcb_size = 0 else: utcb_size = int(log(space.utcb.size, 2)) add_op( InitScriptCreateSpace, space.id, space.space_id_base, _0(space.max_spaces), space.clist_id_base, _0(space.max_clists), space.mutex_id_base, _0(space.max_mutexes), space.max_phys_segs, utcb_base, utcb_size, space.is_privileged, #XXX: A space's max priority is currently hardcoded! #XXX: For now, use the kernel's max priority instead. self.MAX_PRIORITY) #space.max_priority) # Grant the space access to the platform control # system call. if space.plat_control: add_op(InitScriptAllowPlatformControl, []) # Assign any irqs to the space. for irq in space.irqs: add_op(InitScriptAssignIrq, irq) for thread in space.get_static_threads(): # FIXME: Need to deal with entry and user_start thread.offset = \ add_op(InitScriptCreateThread, thread.cap_slot, thread.priority, thread.entry, thread.get_sp(), utcb_base, cell.get_mr1()) for mutex in space.get_static_mutexes(): mutex.offset = \ add_op(InitScriptCreateMutex, mutex.id) for (num, name, attrs) in space.mappings: map_pg_sz = machine.min_page_size() map_pg_sz_log2 = int(log(map_pg_sz, 2)) phys_addr = align_down(_0(attrs.phys_addr), map_pg_sz) virt_addr = align_down(_0(attrs.virt_addr), map_pg_sz) # Calculate the shift cause by aligning the phys_addr alignment_diff = 0 if attrs.has_phys_addr(): alignment_diff = attrs.phys_addr - phys_addr size = 0 num_pages = 0 if attrs.size != None: # In certain cases, phys alignments can leave us a # page short. To account for this we add alignment # differences to the size. size = align_up(attrs.size + alignment_diff, map_pg_sz) num_pages = size / map_pg_sz # Attributes are 0xff => All cache policies are valid! if attrs.has_phys_addr(): add_op(InitScriptCreateSegment, num, phys_addr, 0xff, size, attrs.attach) if attrs.need_mapping(): add_op(InitScriptMapMemory, num, 0, attrs.attach, map_pg_sz_log2, num_pages, attrs.cache_policy, virt_addr) # Dump any caps for cell in cells: for space in cell.get_static_spaces(): for cap in space.ipc_caps: add_op(InitScriptCreateIpcCap, _0(cap.clist.clist_offset), _0(cap.cap_slot), cap.obj.offset) for cap in space.mutex_caps: add_op(InitScriptCreateMutexCap, _0(cap.clist.clist_offset), _0(cap.cap_slot), cap.obj.offset) op_list[-1].set_eop() f.write(''.join([op.encode() for op in op_list])) return f
def alloc(self, size, alignment, virt): """ alloc(size, alignment, virt) -> long Allocate a block of memory from the freelist and return the start address. size - The minimum size of the block in bytes. alignment - The virtual-physical alignment. virt - The virtual address where the data in the block will be stored. """ if alignment < self.min_alloc: raise AllocatorException, \ "Alignment (0x%x) less than the min page size (0x%x)." \ % (alignment, self.min_alloc) if alignment % self.min_alloc != 0: raise AllocatorException, \ "Alignment (0x%x) is not a multiple of min page size (0x%x)." \ % (alignment, self.min_alloc) offset = virt % alignment alloc_start = -1 # if self is phys_alloc: # print "Alloc: size: 0x%x, alignment: 0x%x" % (size, alignment) for (free_start, free_end) in self.freelist: alloc_start = align_down(free_start, alignment) + offset alloc_end = alloc_start + size - 1 # print "alloc_start: 0x%x, free_start: 0x%x, alloc_end: 0x%x, free_end: 0x%x" % (alloc_start, free_start, alloc_end, free_end) # If alignment pushes the block below the start of # the free region, bump it up if alloc_start < free_start: alloc_start += alignment alloc_end += alignment # print "alloc_start2: 0x%x, free_start2: 0x%x, alloc_end2: 0x%x, free_end2: 0x%x" % (alloc_start, free_start, alloc_end, free_end) # If the range is within free memory... if alloc_start >= free_start and alloc_end <= free_end: index = self.freelist.index((free_start, free_end)) self.freelist.remove((free_start, free_end)) # need to divide free range into zero, one or two # new ranges if free_start < alloc_start: self.freelist.insert(index, (free_start, align_down(alloc_start, alignment) - 1)) index += 1 if alloc_end <= free_end: self.freelist.insert(index, (alloc_end, free_end)) else: alloc_start += alignment break else: alloc_start = -1 if alloc_start == -1: raise AllocatorException, "Out of Memory" # if self is phys_alloc: # print " -free:", [(hex(x), hex(y)) for (x, y) in self.freelist] return alloc_start
def mark_window(self, window_base, size, holes): """ Mark a window of memory. A window is region of memory whose addresses must be either in the free list or listed in the holes list. Every address in the region must have been present in the freelist at some stage. The holes variable is a list of (base, size) tuples. Returns the parts of the freelist that were removed. """ if holes is None: holes = [] window_end = window_base + size - 1 if window_end < window_base: raise AllocatorException, \ "alloc_window: Window end address (0x%x) less " \ "than mark base address (0x%x)" % \ (window_end, window_base) if window_base == window_end: window_end = window_end + 1 window_base = align_down(window_base, self.min_alloc) window_end = align_up(window_end, self.min_alloc) - 1 # First check that the proposed window is in the memory that # was originally passed to add_memory(). contained = False for full_start, full_end, mem_type in self.fulllist: # If the window is fully contained within one record, then # we're sweet. if full_start <= window_base <= full_end and \ full_start <= window_end <= full_end: contained = True break # OK, what's the answer? if not contained: raise AllocatorException, \ "alloc_window: Window not in allocator controlled memory." # Transform the hole list from (base, size) to (base, end), # rounded to page boundaries, and sort in increasing order of # address. holes = [(align_down(hole_base, self.min_alloc), align_up(hole_base + hole_size - 1, self.min_alloc) - 1) for (hole_base, hole_size) in holes] holes.sort(key=lambda x: x[0]) # Holes must be in the range of the window and can't overlap. for hole_base, hole_end in holes: assert window_base <= hole_base <= window_end and \ window_base <= hole_end <= window_end free_iter = iter(self.freelist) hole_iter = iter(holes) curr_addr = window_base free_done = False holes_done = False curr_free = None curr_hole = None new_freelist = [] removed_freelist = [] while curr_addr <= window_end: assert curr_addr % self.min_alloc == 0 if not free_done and curr_free is None: try: curr_free = free_iter.next() # If the freelist range is outside where we are # working, then loop and get another one. if curr_free[1] < curr_addr or \ curr_free[0] > window_end: new_freelist.append(curr_free) curr_free = None continue except StopIteration: free_done = True curr_free = None if not holes_done and curr_hole is None: try: curr_hole = hole_iter.next() except StopIteration: holes_done = True curr_hole = None if curr_free is not None and \ curr_free[0] <= curr_addr and \ curr_free[1] >= curr_addr: if curr_hole is not None and \ curr_hole[0] <= curr_free[1]: raise AllocatorException, \ "alloc_window: Hole (0x%x-0x%x) overlaps " \ "with free block (0x%x-0x%x)." % \ (curr_hole[0], curr_hole[1], curr_free[0], curr_free[1]) else: # Remove the part we're interested in from the # freelist. Add the excess. if curr_free[0] < curr_addr: new_freelist.append((curr_free[0], curr_addr - 1, curr_free[2])) if curr_free[1] > window_end: new_freelist.append((window_end + 1, curr_free[1], curr_free[2])) removed_freelist.append((curr_addr, window_end, curr_free[2])) else: removed_freelist.append((curr_addr, curr_free[1], curr_free[2])) curr_addr = curr_free[1] + 1 curr_free = None elif curr_hole is not None and \ curr_hole[0] == curr_addr: if curr_free is not None and \ curr_free[0] <= curr_hole[1]: raise AllocatorException, \ "alloc_window: Hole (0x%x-0x%x) overlaps " \ "with free block (0x%x-0x%x)." % \ (curr_hole[0], curr_hole[1], curr_free[0], curr_free[1]) else: curr_addr = curr_hole[1] + 1 curr_hole = None else: raise AllocatorException, \ "Address %#x should be in a zone but is neither " \ "free or in an already allocated block. Is it part " \ "of a direct addressing pool?" % \ curr_addr # Copy any remaining free list records into the new freelist. for curr_free in free_iter: new_freelist.append(curr_free) self.freelist = new_freelist return removed_freelist
class Allocator(object): """ A memory allocator. This allocator is based on the first fit algorithm with an interface tuned to the rather strange requirements of elfweaver. The allocator keeps a sorted list of free address ranges. Each range is a multiple of <min_alloc_size> bytes (aka a page), and is aligned on a <min_alloc_size> boundary. Separate calls to the allocator will result in items being allocated in different pages. However it is possible to allocate items on adjacent bytes, provided that that are part of the same allocation group. Allocation groups are a method of allocating multiple items at one time and ensuring that they are placed sufficiently close to one another. Some items in a group may already have their addresses set (for instance, allocating a stack near a program's text segment). These items must have addresses that are not in the allocator's free list. An individual item will be placed at the lowest address that satify its size, offset and alignment requirements. Items in a group are allocated in increasing order of address. Memory at a particular address can be removed from the free list with the mark() method. It is not an error to mark memory that is wholly or in part missing from the free list. A window of memory can also be marked with the mark_window() method. A window is similar to regular marking, except that it is an error for the region of memory to contain memory outside of the freelist unless those regions are present in a list supplied to the method. The entire memory region must have once been present in the allocator's freelist. Windows are used to implement zones. """ def __init__(self, min_alloc_size, tracker = None): """ __init__(min_alloc_size) min_alloc_size is the smallest size, in bytes, that can be allocated. Alignment must be a multiple of this size. """ # Alloc size must be a power of 2. assert (min_alloc_size & (min_alloc_size - 1)) == 0 self.freelist = [] self.fulllist = [] self.min_alloc = min_alloc_size self.tracker = tracker def __merge(self, the_list): """ Merge adjacent regions into one. Merging regions simplifies the logic of the rest of the allocation code. """ last = None new_list = [] for region in the_list: if last is None: last = region else: if last[1] == region[0] - 1: last = (last[0], region[1], last[2]) else: new_list.append(last) last = region if last is not None: new_list.append(last) return new_list def sort(self): """ Sort the free and full memory lists and merge any adjacent memory regions. """ self.freelist.sort(key=lambda x: x[0]) self.freelist = self.__merge(self.freelist) self.fulllist.sort(key=lambda x: x[0]) self.fulllist = self.__merge(self.fulllist) def get_freelist(self): """Return the current free list.""" return self.freelist def add_memory(self, base, size, mem_type): """ Add a region of free memory to the pool. The memory must be ia multiple of <min_alloc_size> bytes in size, and aligned on a <min_alloc_size> boundary. Memory addresses may only be added to the allocator once. """ # Check that the memory is page aligned. assert align_down(base, self.min_alloc) == base assert size % self.min_alloc == 0 end = base + size - 1 # Check that the memory does not overlap any of the regions # already in use. for list_base, list_end, list_mem_type in self.fulllist: if (base >= list_base and base <= list_end) or \ (end >= list_base and end <= list_end): raise AllocatorException, \ "Cannot add overlapping memory regions to the " \ "Allocator. Address (0x%x--0x%x) already in " \ "(0x%x--0x%x)" % \ (base, end, list_base, list_end) self.fulllist.append((base, end, mem_type)) self.freelist.append((base, end, mem_type)) self.sort() def mark(self, base, size): """ mark(base, end) -> marked Remove the given range from the free list. This is used to record were fixed address objects are located. Returns whether or not the range was removed from the free list. It is *not* an error if the range is wholly outside the freelist. """ end = base + size - 1 if end < base: raise AllocatorException, \ "Mark end address (0x%x) less than mark base address (0x%x)" \ % (end, base) if base == end: end = end + 1 # Remove all of the pages containing the region. base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) - 1 new_freelist = [] marked = False for free_start, free_end, mem_type in self.freelist: assert free_start % self.min_alloc == 0 assert (free_end + 1) % self.min_alloc == 0 if (free_start <= base and base <= free_end) or \ (base <= free_start and free_start <= end): marked = True # Report the allocation to the tracker, if there is # one. if self.tracker is not None: self.tracker.track_mark(base, end) # Insert into the freelist any remaining parts of the # free memory region. if free_start < base: new_freelist.append((free_start, base - 1, mem_type)) if end < free_end: new_freelist.append((end + 1, free_end, mem_type)) else: new_freelist.append((free_start, free_end, mem_type)) self.freelist = new_freelist return marked def mark_window(self, window_base, size, holes): """ Mark a window of memory. A window is region of memory whose addresses must be either in the free list or listed in the holes list. Every address in the region must have been present in the freelist at some stage. The holes variable is a list of (base, size) tuples. Returns the parts of the freelist that were removed. """ if holes is None: holes = [] window_end = window_base + size - 1 if window_end < window_base: raise AllocatorException, \ "alloc_window: Window end address (0x%x) less " \ "than mark base address (0x%x)" % \ (window_end, window_base) if window_base == window_end: window_end = window_end + 1 window_base = align_down(window_base, self.min_alloc) window_end = align_up(window_end, self.min_alloc) - 1 # First check that the proposed window is in the memory that # was originally passed to add_memory(). contained = False for full_start, full_end, mem_type in self.fulllist: # If the window is fully contained within one record, then # we're sweet. if full_start <= window_base <= full_end and \ full_start <= window_end <= full_end: contained = True break # OK, what's the answer? if not contained: raise AllocatorException, \ "alloc_window: Window not in allocator controlled memory." # Transform the hole list from (base, size) to (base, end), # rounded to page boundaries, and sort in increasing order of # address. holes = [(align_down(hole_base, self.min_alloc), align_up(hole_base + hole_size - 1, self.min_alloc) - 1) for (hole_base, hole_size) in holes] holes.sort(key=lambda x: x[0]) # Holes must be in the range of the window and can't overlap. for hole_base, hole_end in holes: assert window_base <= hole_base <= window_end and \ window_base <= hole_end <= window_end free_iter = iter(self.freelist) hole_iter = iter(holes) curr_addr = window_base free_done = False holes_done = False curr_free = None curr_hole = None new_freelist = [] removed_freelist = [] while curr_addr <= window_end: assert curr_addr % self.min_alloc == 0 if not free_done and curr_free is None: try: curr_free = free_iter.next() # If the freelist range is outside where we are # working, then loop and get another one. if curr_free[1] < curr_addr or \ curr_free[0] > window_end: new_freelist.append(curr_free) curr_free = None continue except StopIteration: free_done = True curr_free = None if not holes_done and curr_hole is None: try: curr_hole = hole_iter.next() except StopIteration: holes_done = True curr_hole = None if curr_free is not None and \ curr_free[0] <= curr_addr and \ curr_free[1] >= curr_addr: if curr_hole is not None and \ curr_hole[0] <= curr_free[1]: raise AllocatorException, \ "alloc_window: Hole (0x%x-0x%x) overlaps " \ "with free block (0x%x-0x%x)." % \ (curr_hole[0], curr_hole[1], curr_free[0], curr_free[1]) else: # Remove the part we're interested in from the # freelist. Add the excess. if curr_free[0] < curr_addr: new_freelist.append((curr_free[0], curr_addr - 1, curr_free[2])) if curr_free[1] > window_end: new_freelist.append((window_end + 1, curr_free[1], curr_free[2])) removed_freelist.append((curr_addr, window_end, curr_free[2])) else: removed_freelist.append((curr_addr, curr_free[1], curr_free[2])) curr_addr = curr_free[1] + 1 curr_free = None elif curr_hole is not None and \ curr_hole[0] == curr_addr: if curr_free is not None and \ curr_free[0] <= curr_hole[1]: raise AllocatorException, \ "alloc_window: Hole (0x%x-0x%x) overlaps " \ "with free block (0x%x-0x%x)." % \ (curr_hole[0], curr_hole[1], curr_free[0], curr_free[1]) else: curr_addr = curr_hole[1] + 1 curr_hole = None else: raise AllocatorException, \ "Address %#x should be in a zone but is neither " \ "free or in an already allocated block. Is it part " \ "of a direct addressing pool?" % \ curr_addr # Copy any remaining free list records into the new freelist. for curr_free in free_iter: new_freelist.append(curr_free) self.freelist = new_freelist return removed_freelist def __simple_alloc(self, freelist, size, alignment, offset): """ Allocate a single block of memory or raise an exception. Search for a place in the freelist to allocate the requested block of memory. Returns a tuple with the following values: - before_free - The freelist up to the place where the memory was allocated. - after_free - The freelist after the place where the memory was allocated - addr - The allocated addresses. The free lists are not necessarily page aligned. Returning the tuple allows the abort and retry semantics of group allocation to be implemented. """ before_free = [] after_free = [] addr = None free_iter = iter(freelist) # Search the freelist for a suitable block. for (free_start, free_end, mem_type) in free_iter: # Calculate the proposed address. alloc_start = align_down(free_start, alignment) + offset alloc_end = alloc_start + size - 1 # If alignment adjustments push the block below the # start of the free region, bump it up. if alloc_start < free_start: alloc_start += alignment alloc_end += alignment # If the range is within free memory, we've found it. if alloc_start >= free_start and alloc_end <= free_end: # Put the remaining parts of the region back into the # correct freelists. if free_start < alloc_start - 1: before_free.append((free_start, alloc_start - 1, mem_type)) if alloc_end + 1 < free_end: after_free.append((alloc_end + 1, free_end, mem_type)) addr = alloc_start break else: # Not useful, so add to the before list. before_free.append((free_start, free_end, mem_type)) # Abort if nothing suitable was found. if addr is None: raise AllocatorException, "Out of memory" # Copy any remaining free list records into the after_freelist for curr_free in free_iter: after_free.append(curr_free) return (before_free, after_free, alloc_start) def __group_alloc(self, group, freelist): """ Allocate memory for a group of items and ensure that they have been allocated sufficiently close together. Items in the group are allocted with increasing addresses. Returns a tuple with the following values: new_freelist - The revised freelist addrs - A list of the addresses of the items, in item order. """ addrs = [] new_freelist = [] last_item = None for i in group.get_entries(): # If the address is not fixed, then allocate if i.get_addr() is None: (before, freelist, addr) = \ self.__simple_alloc(freelist, i.get_size(), i.get_alignment(), i.get_offset()) new_freelist.extend(before) else: addr = i.get_addr() # Split the freelist around the fixed address to that # the next item will be allocated at a higher # addresses. Problems will arise fixed items # are placed in the list unsorted. updated_freelist = [] for (base, end, mem_type) in freelist: if end < addr: new_freelist.append((base, end, mem_type)) else: assert base > addr updated_freelist.append((base, end, mem_type)) # Use the freelist above the fixed item for further # allocations. freelist = updated_freelist # Check the distance between the items and throw if # they're too far apart. if last_item is not None and group.get_distance() is not None: if addr - (addrs[-1] + last_item.get_size()) > \ group.get_distance(): err_txt = { 'last_item' : last_item.get_name(), 'this_item' : i.get_name(), 'distance' : group.get_distance() } raise AllocatorGroupException, \ group.get_error_message() % err_txt addrs.append(addr) last_item = i # Add remaining parts of the freelist to the final freelist. new_freelist.extend(freelist) return (new_freelist, addrs) def alloc(self, group): """ Allocate memory for a group of items. Items within the group will be allocated no more that group.get_distance() bytes appart, otherwise an exception will be raised. """ new_freelist = [] curr_freelist = self.freelist addrs = [] completed = False # How to allocate a group of items: # # 1) Try to allocate the group from the bottom of the free # list. If that works, good! # 2) If an AllocatorException is thrown, then an individual # item could not find the memory it needs, so give up now # and throw again. # 3) If an AllocatorGroupException is thrown, then the group's # distance requirements could now be fulfilled. This could # be as a result of freelist fragmentation, so remove the # first item from the freelist and try again. while not completed: try: (ret_freelist, addrs) = self.__group_alloc(group, curr_freelist) except AllocatorException: raise AllocatorException, group.get_error_message() except AllocatorGroupException, agex: # Remove the first freelist record and try again. If # there is fragmentation in the freelist, this may # work around it. This exception may be raised again # if the group failed for another reason (for # instance, the could not be satisfied at all). if len(curr_freelist) <= 1: raise AllocatorException, str(agex) else: new_freelist.append(curr_freelist.pop(0)) else: # Success! completed = True # Rebuild the freelist. new_freelist.extend(ret_freelist) # Assign to each item its address. for (item, addr) in zip(group.get_entries(), addrs): # Report the allocation to the tracker, if there # is one. For completely accurate coverage the # pages that were allocated should really be # tracked, but that is: # a) really hard. # b) not what the original version did. if self.tracker is not None and \ addr != item.get_addr(): self.tracker.track_alloc(item.get_name(), addr, addr + item.get_size() - 1) item.set_addr(addr) # Restore the invariant by removing all partial pages from the # freelist. self.freelist = [] for (base, end, mem_type) in new_freelist: if base % self.min_alloc != 0: base = align_up(base, self.min_alloc) if ((end + 1) % self.min_alloc) != 0: end = align_down(end, self.min_alloc) - 1 # Don't include pages that have been rounded out of # existence. if base < end: self.freelist.append((base, end, mem_type))
def create_ops(self, kernel, image, machine): """ Create in init script for Micro kernel initialisation. """ op_list = [] offset = [0] def add_op(op_func, *args): op = op_func(None, None, None, (args), image, machine) op_list.append(op) my_offset = offset[0] offset[0] += op.sizeof() return my_offset f = StringIO() # We just use the cells in order, hopefully the first cell has a # large enough heap for soc/kernel. No longer do sorting cells = kernel.cells.values() ## PHASE ONE ## add_op(InitScriptHeader, []) add_op(InitScriptCreateHeap, _0(cells[0].heap_phys_base), cells[0].heap_size) # Declare total sizes. The must be a minimum of 1. add_op( InitScriptInitIds, max(kernel.total_spaces, 1), max(kernel.total_clists, 1), max(kernel.total_mutexes, 1) ) needs_heap = False add_op(InitScriptCreateThreadHandles, _0(kernel.thread_array_base), kernel.thread_array_count) op_list[-1].set_eop() ## PHASE TWO ## for cell in cells: # No need to encode the heap of the first cell. if needs_heap: add_op(InitScriptCreateHeap, _0(cell.heap_phys_base), cell.heap_size) else: needs_heap = True cell.clist_offset = add_op(InitScriptCreateClist, cell.clist_id, cell.max_caps) for space in cell.get_static_spaces(): utcb_base = 0xDEADBEEF # something obvious if we ever use it! utcb_size = 0x11 if space.utcb is not None: utcb_base = space.utcb.virt_addr if utcb_base is None: utcb_base = 0 utcb_size = 0 else: utcb_size = int(log(space.utcb.size, 2)) add_op( InitScriptCreateSpace, space.id, space.space_id_base, _0(space.max_spaces), space.clist_id_base, _0(space.max_clists), space.mutex_id_base, _0(space.max_mutexes), space.max_phys_segs, utcb_base, utcb_size, space.is_privileged, # XXX: A space's max priority is currently hardcoded! # XXX: For now, use the kernel's max priority instead. self.MAX_PRIORITY, ) # space.max_priority) # Grant the space access to the platform control # system call. if space.plat_control: add_op(InitScriptAllowPlatformControl, []) # Assign any irqs to the space. for irq in space.irqs: add_op(InitScriptAssignIrq, irq) for thread in space.get_static_threads(): # FIXME: Need to deal with entry and user_start thread.offset = add_op( InitScriptCreateThread, thread.cap_slot, thread.priority, thread.entry, thread.get_sp(), utcb_base, cell.get_mr1(), ) for mutex in space.get_static_mutexes(): mutex.offset = add_op(InitScriptCreateMutex, mutex.id) for (num, name, attrs) in space.mappings: map_pg_sz = machine.min_page_size() map_pg_sz_log2 = int(log(map_pg_sz, 2)) phys_addr = align_down(_0(attrs.phys_addr), map_pg_sz) virt_addr = align_down(_0(attrs.virt_addr), map_pg_sz) # Calculate the shift cause by aligning the phys_addr alignment_diff = 0 if attrs.has_phys_addr(): alignment_diff = attrs.phys_addr - phys_addr size = 0 num_pages = 0 if attrs.size != None: # In certain cases, phys alignments can leave us a # page short. To account for this we add alignment # differences to the size. size = align_up(attrs.size + alignment_diff, map_pg_sz) num_pages = size / map_pg_sz # Attributes are 0xff => All cache policies are valid! if attrs.has_phys_addr(): add_op(InitScriptCreateSegment, num, phys_addr, 0xFF, size, attrs.attach) if attrs.need_mapping(): add_op( InitScriptMapMemory, num, 0, attrs.attach, map_pg_sz_log2, num_pages, attrs.cache_policy, virt_addr, ) # Dump any caps for cell in cells: for space in cell.get_static_spaces(): for cap in space.ipc_caps: add_op(InitScriptCreateIpcCap, _0(cap.clist.clist_offset), _0(cap.cap_slot), cap.obj.offset) for cap in space.mutex_caps: add_op(InitScriptCreateMutexCap, _0(cap.clist.clist_offset), _0(cap.cap_slot), cap.obj.offset) op_list[-1].set_eop() f.write("".join([op.encode() for op in op_list])) return f
# b) not what the original version did. if self.tracker is not None and \ addr != item.get_addr(): self.tracker.track_alloc(item.get_name(), addr, addr + item.get_size() - 1) item.set_addr(addr) # Restore the invariant by removing all partial pages from the # freelist. self.freelist = [] for (base, end, mem_type) in new_freelist: if base % self.min_alloc != 0: base = align_up(base, self.min_alloc) if ((end + 1) % self.min_alloc) != 0: end = align_down(end, self.min_alloc) - 1 # Don't include pages that have been rounded out of # existence. if base < end: assert (mem_type is not None) self.freelist.append((base, end, mem_type)) def next_avail(self): """ Return the base address of the biggest block of memory in the free list. """ if len(self.freelist) == 0: raise AllocatorException, \