def parse(self): if Command.verbose: print "Section object" sect = Command.elf.find_section_named(self.base_sect) if sect == None: if Command.verbose: print "\tNot creating section, unable to find base section %s" \ % self.base_sect return elif Command.verbose: print "\tCreating section %s based on base section %s" % \ (self.name, self.base_sect) Command.section = Command.elf.clone_section(sect, Command.addr) Command.sections.append(Command.section) # Now set the address of the section given the current address Command.section.address = align_up(Command.addr, Command.section.addralign) Command.addr = Command.section.address # XXX: This checks works for the current rvct but if we are merging # sections that aren't going into a segment they should not affect # the address value at all. Fix later. (Related to merging all the # 8000+ .debug_foo$$$morefoo sections in RVCT) if Command.section.flags & SHF_ALLOC: if len(Command.segment[2]) == 0: Command.segment[1] = Command.addr Command.segment[2].append(Command.section) if self.name != self.base_sect: Command.section.name = self.name if Command.verbose: print "\tSection address is %x" % Command.section.address print "\tParsing commands..." for command in self.commands: command.parse() if self.eos: Command.addr = align_up(Command.addr, SEGMENT_ALIGN) Command.segment = [None, Command.addr, []] Command.segments.append(Command.segment) if Command.verbose: print "Section %s marks end of segment" % self.name print "\tSetting address to %x" % Command.addr print "\tAdding new segment at index %d" % \ Command.segments.index(Command.segment) Command.section = None if Command.verbose: print "Finished section %s" % self.name
def prime_windows(self, pool, ranges): # Sort ranges to find those that are in the same window. ranges.sort(key=lambda x: x[0]) # Hack: Fails if there is more than one range in a single window. holes = [] hole_base = None window_size = None for (base, size) in ranges: range_base = align_down(base, self.WINDOW_SIZE) if hole_base is None: assert len(holes) == 0 hole_base = range_base window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes.append((base, size)) elif range_base == hole_base: window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes.append((base, size)) else: assert len(holes) is not 0 window_size = align_up(size, self.WINDOW_SIZE) free_mem = \ self.pool.mark_window(hole_base, window_size, holes) # Add the free parts of the window to our freelist. for (free_base, free_end, mem_type) in free_mem: self.add_memory(free_base, free_end - free_base + 1, mem_type) # Add the window to the list of claimed windows. self.windows.append((hole_base, hole_base + self.WINDOW_SIZE - 1)) hole_base = range_base window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes = [(base, size)] # Clean up if hole_base is not None: assert len(holes) is not 0 free_mem = \ pool.mark_window(hole_base, window_size, holes) # Add the free parts of the window to our freelist. for (free_base, free_end, mem_type) in free_mem: self.add_memory(free_base, free_end - free_base + 1, mem_type) # Add the window to the list of claimed windows. self.windows.append((hole_base, hole_base + self.WINDOW_SIZE - 1))
def prime_windows(self, pool, ranges): # Sort ranges to find those that are in the same window. ranges.sort(key=lambda x: x[0]) # Hack: Fails if there is more than one range in a single window. holes = [] hole_base = None window_size = None for (base, size) in ranges: range_base = align_down(base, self.WINDOW_SIZE) if hole_base is None: assert len(holes) == 0 hole_base = range_base window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes.append((base, size)) elif range_base == hole_base: window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes.append((base, size)) else: assert len(holes) is not 0 window_size = align_up(size, self.WINDOW_SIZE) free_mem = \ self.pool.mark_window(hole_base, window_size, holes) # Add the free parts of the window to our freelist. for (free_base, free_end, mem_type) in free_mem: self.add_memory(free_base, free_end - free_base + 1, mem_type) # Add the window to the list of claimed windows. self.windows.append( (hole_base, hole_base + self.WINDOW_SIZE - 1)) hole_base = range_base window_size = max(align_up(size, self.WINDOW_SIZE), self.WINDOW_SIZE) holes = [(base, size)] # Clean up if hole_base is not None: assert len(holes) is not 0 free_mem = \ pool.mark_window(hole_base, window_size, holes) # Add the free parts of the window to our freelist. for (free_base, free_end, mem_type) in free_mem: self.add_memory(free_base, free_end - free_base + 1, mem_type) # Add the window to the list of claimed windows. self.windows.append((hole_base, hole_base + self.WINDOW_SIZE - 1))
def mark(self, base, end): """ mark(base, end) -> base Remove the given range from the free list. This is used to record were fixed address objects are located. An exception is raised if the range crosses a free/used border. It is *not* an error if the range is wholly outside the freelist. """ base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) - 1 # if self is phys_alloc: # print "Mark: 0x%x 0x%x" % (base, end) for (free_start, free_end) in self.freelist: # if used range is in this free range if base >= free_start and base <= free_end: # check used block not going past free range if end > free_end: raise AllocatorException, "Used block (0x%x, 0x%x) outside avail range (0x%x, 0x%x)." % ( base, end, free_start, free_end) index = self.freelist.index((free_start, free_end)) self.freelist.remove((free_start, free_end)) # align ranges to page size free_start = align_down(free_start, self.min_alloc) free_end = align_up(free_end, self.min_alloc) base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) # need to divide free range into zero, one or two # new ranges if free_start < base: self.freelist.insert(index, (free_start, base - 1)) index += 1 if end < free_end: self.freelist.insert(index, (end, free_end - 1)) # if self is phys_alloc: # print " =free:", [(hex(x), hex(y)) for (x, y) in self.freelist] return base
def mark(self, base, end): """ mark(base, end) -> base Remove the given range from the free list. This is used to record were fixed address objects are located. An exception is raised if the range crosses a free/used border. It is *not* an error if the range is wholly outside the freelist. """ base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc)-1 # if self is phys_alloc: # print "Mark: 0x%x 0x%x" % (base, end) for (free_start, free_end) in self.freelist: # if used range is in this free range if base >= free_start and base <= free_end: # check used block not going past free range if end > free_end: raise AllocatorException, "Used block (0x%x, 0x%x) outside avail range (0x%x, 0x%x)." % (base, end, free_start, free_end) index = self.freelist.index((free_start, free_end)) self.freelist.remove((free_start, free_end)) # align ranges to page size free_start = align_down(free_start, self.min_alloc) free_end = align_up(free_end, self.min_alloc) base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) # need to divide free range into zero, one or two # new ranges if free_start < base: self.freelist.insert(index, (free_start, base-1)) index += 1 if end < free_end: self.freelist.insert(index, (end, free_end-1)) # if self is phys_alloc: # print " =free:", [(hex(x), hex(y)) for (x, y) in self.freelist] return base
def write_struct(self, section): """Write the binary form of the kernel info struct.""" section.write_word(self.base) section.write_word(self.size) section.write_word(0) # pos_guess bits_per_word = self.machine.sizeof_word * 8 # Write out the bitmap rounded_up_size = align_up(self.size, bits_per_word) current_word = 0 for pos in range(0, rounded_up_size): # The current bit should be '0' if the resource is free. This is # true if (i) we are past the 'used' range and (ii) we are still in # the valid range. current_bit = 0 if pos < self.preallocated or pos >= self.size: current_bit = 1 # Write out the bit current_word |= (current_bit << (pos % bits_per_word)) # If we have finished a word, write it out if pos % bits_per_word == bits_per_word - 1: section.write_word(current_word) current_word = 0 # Finally, we must make sure that we always write out at least one # word. if self.size == 0: section.write_word(0)
def _update_data(self): """Update the note data based on our changes.""" if not (self.note_name is None or self.note_type is None or \ self._desc_data is None): # Round the data up to a multiple of word size. desc_data = self._desc_data if (len(desc_data) % self.bytes_per_word) != 0: desc_data += ByteArray('\0' * (len(desc_data) % self.bytes_per_word)) data = ByteArray( struct.pack(self.endianess + self._format_chr * 3, len(self.note_name) + 1, len(desc_data), self.note_type)) data += ByteArray( struct.pack( self.endianess + str(align_up(len(self.note_name) + 1, self.bytes_per_word)) + 's', self.note_name + '\0')) data += desc_data self._data = data
def write_struct(self, section): """Write the binary form of the segment mapping struct.""" section.write_word(len(self.mappings)) for (num, _, attrs) in self.mappings: # # Align the segments to nearest page boundaries. If we have to move # 'virt_addr' backwards, we need to compensate by increasing 'size' # by the same amount. # # This is also done in kernel.py when writing out kernel mapping # operations. # if attrs.virt_addr is None: virt_addr = -1 size = _0(attrs.size) else: virt_addr = align_down(_0(attrs.virt_addr), self.min_page_size) alignment_slack = _0(attrs.virt_addr) - virt_addr size = _0(attrs.size) + alignment_slack size = align_up(size, self.min_page_size) section.write_word(virt_addr) section.write_word(num) # seg section.write_word(0) # offset section.write_word(size) section.write_word(attrs.attach) section.write_word(attrs.cache_policy)
def __init__(self, segment, segment_index, file_type, attrs, pools): ImageObject.__init__(self, attrs) self.segment = segment self.segment_index = segment_index self.file_type = file_type self.attrs.size = segment.get_memsz() # Set direct addressing, if that's what's wanted. if self.attrs.direct: self.attrs.phys_addr = segment.vaddr # Declare the segment's physical memory range in use. marked = pools.mark_physical(self.attrs.abs_name(), self.attrs.phys_addr, self.attrs.size) if self.attrs.phys_addr is not None and not marked: raise MergeError, \ 'Segment "%s": Cannot reserve physical addresses ' \ '%#x--%#x.' % \ (self.attrs.abs_name(), self.attrs.phys_addr, self.attrs.phys_addr + self.attrs.size - 1) # If it's a protected segment, reserve everything in the same SECTION if self.attrs.protected: base = align_down(segment.vaddr, 1024 * 1024) size = align_up(self.attrs.size + base - segment.vaddr, 1024 * 1024) else: base = segment.vaddr size = self.attrs.size pools.mark_virtual(self.attrs.abs_name(), base, size)
def calc_thread_array_sizes(self): self.thread_array_count = self.total_threads self.thread_array_size = self.thread_array_count * self.machine.sizeof_pointer if self.dynamic_heap_size: self.heap_attrs.size += align_up(self.thread_array_size, self.machine.min_page_size())
def __init__(self, segment, segment_index, file_type, attrs, pools): ImageObject.__init__(self, attrs) self.segment = segment self.segment_index = segment_index self.file_type = file_type self.attrs.size = segment.get_memsz() # Set direct addressing, if that's what's wanted. if self.attrs.direct: self.attrs.phys_addr = segment.vaddr self.attrs.virtpool = 'direct' # Declare the segment's physical memory range in use. marked = pools.mark_physical(self.attrs.abs_name(), self.attrs.phys_addr, self.attrs.size, self.attrs.cache_policy) if self.attrs.phys_addr is not None and not marked: raise MergeError, \ 'Segment "%s": Cannot reserve physical addresses ' \ '%#x--%#x.' % \ (self.attrs.abs_name(), self.attrs.phys_addr, self.attrs.phys_addr + self.attrs.size - 1) # If it's a protected segment, reserve everything in the same SECTION if self.attrs.protected: base = align_down(segment.vaddr, 1024 * 1024) size = align_up(self.attrs.size + base - segment.vaddr, 1024 * 1024) else: base = segment.vaddr size = self.attrs.size pools.mark_virtual(self.attrs.abs_name(), base, size, self.attrs.cache_policy)
def _prepare_sections(self, ofs, sh_string_table, wordsize, endianess, sections): """Prepare sections in the file. Passed the current ofs value, and returns any updated value.""" # We now add all the section that are not part of the segment. if sh_string_table: self._sh_strndx = self.sections.index(sh_string_table) for section in sections: if isinstance(section, UnpreparedElfSection): sh_string_table.add_string(section.name) new_sections = [] for idx, section in enumerate(sections): if isinstance(section, UnpreparedElfSection) and section.name: name_offset = sh_string_table.offset_of(section.name) offset = align_up(ofs, section.addralign) # Note: must prepare before getting size section = section.prepare(offset, idx, name_offset, wordsize, endianess) ofs = offset + section.get_size() elif isinstance(section, UnpreparedElfSection): # NULL sections section = section.prepare(0, 0, 0, wordsize, endianess) new_sections.append(section) # Check that all sections have a valid section index for section in new_sections[1:]: assert section.index < len(sections) return ofs, new_sections
def round_pages(self): """ round_pages() remove any partially used pages from the freelist """ alignment = self.min_alloc scan_list = True while (scan_list): scan_list = False for (free_start, free_end) in self.freelist: alloc_start = free_start align_end = align_up(alloc_start+1, alignment) if (align_end < free_end): alloc_end = align_end else: alloc_end = free_end #print " round:", hex(free_start), "..", hex(free_end) if (alloc_start < alloc_end) and (alloc_end-alloc_start < alignment): #print " - rm:", hex(alloc_start), "..", hex(alloc_end) scan_list = True index = self.freelist.index((free_start, free_end)) self.freelist.remove((free_start, free_end)) if alloc_end < free_end: self.freelist.insert(index, (alloc_end, free_end)) index += 1 return
def get_last_addr(self, addr_type="virtual"): """Return the last memory address used in the file. By default it uses the virtual memory address. This can be changed to use the physical memory address if physical is set to True. Getting last physical can only be used if the file has segments. """ virtual = self._addr_type_is_virt(addr_type) if self.has_segments(): segments = self._get_ptload_segments() if virtual: addrs = [(segment.vaddr + segment.get_memsz(), segment.align) for segment in segments] else: addrs = [(segment.paddr + segment.get_memsz(), segment.align) for segment in segments] else: # Base it on sections instead if not virtual: raise InvalidArgument, "Can't find last physical \ address in an ElfFile without segments." addrs = [(section.address + section.get_size(), section.addralign) for section in self.sections] if addrs: last_addr, last_align = max(addrs) else: last_addr, last_align = 0, 0 return align_up(last_addr, last_align)
def get_last_addr(self, addr_type = "virtual"): """ Return the last memory address used in the file. By default it uses the virtual memory address. This can be changed to use the physical memory address if physical is set to True. Getting last physical can only be used if the file has segments. """ virtual = self._addr_type_is_virt(addr_type) if self.has_segments(): segments = self._get_ptload_segments() if virtual: addrs = [(segment.vaddr + segment.get_memsz(), segment.align) for segment in segments] else: addrs = [(segment.paddr + segment.get_memsz(), segment.align) for segment in segments] else: # Base it on sections instead if not virtual: raise InvalidArgument, "Can't find last physical \ address in an ElfFile without segments." addrs = [(section.address + section.get_size(), section.addralign) for section in self.sections] if addrs: last_addr, last_align = max(addrs) else: last_addr, last_align = 0, 0 return align_up(last_addr, last_align)
def encode(self): """Encode kernel init script create heap""" return ''.join(( self.encode_hdr(KI_OP_CREATE_HEAP, 1, self.eop), self.encode_base_and_size(_0(self.base), (align_up(_0(self.size), 0x1000)) >> \ SHIFT_4K, SHIFT_4K) ))
def calc_thread_array_sizes(self): self.thread_array_count = (self.total_threads) self.thread_array_size = self.thread_array_count * \ self.machine.sizeof_pointer if self.dynamic_heap_size: self.heap_attrs.size += align_up(self.thread_array_size, self.machine.min_page_size())
def mark(self, base, size): """ mark(base, end) -> marked Remove the given range from the free list. This is used to record were fixed address objects are located. Returns whether or not the range was removed from the free list. It is *not* an error if the range is wholly outside the freelist. """ end = base + size - 1 if end < base: raise AllocatorException, \ "Mark end address (0x%x) less than mark base address (0x%x)" \ % (end, base) if base == end: end = end + 1 # Remove all of the pages containing the region. base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) - 1 new_freelist = [] marked = False for free_start, free_end, mem_type in self.freelist: assert free_start % self.min_alloc == 0 assert (free_end + 1) % self.min_alloc == 0 if (free_start <= base and base <= free_end) or \ (base <= free_start and free_start <= end): marked = True # Report the allocation to the tracker, if there is # one. if self.tracker is not None: self.tracker.track_mark(base, end) # Insert into the freelist any remaining parts of the # free memory region. if free_start < base: new_freelist.append((free_start, base - 1, mem_type)) if end < free_end: new_freelist.append((end + 1, free_end, mem_type)) else: new_freelist.append((free_start, free_end, mem_type)) self.freelist = new_freelist return marked
def encode(self): """ Encode kernel init script create thread handles, Size is 4-word multiple. Phys is 1K aligned. """ return ''.join( (self.encode_hdr(KI_OP_CREATE_THREAD_HANDLES, 1, self.eop), self.encode_base_and_size(_0(self.phys), align_up(_0(self.size), 4) / 4, SHIFT_1K)))
def encode(self): """ Encode kernel init script create thread handles, Size is 4-word multiple. Phys is 1K aligned. """ return ''.join(( self.encode_hdr(KI_OP_CREATE_THREAD_HANDLES, 1, self.eop), self.encode_base_and_size(_0(self.phys), align_up(_0(self.size), 4) / 4, SHIFT_1K) ))
def parse(self): if Command.verbose: print "Memory object" Command.addr = align_up(self.base, SEGMENT_ALIGN) Command.segment = [None, Command.addr, []] Command.segments.append(Command.segment) if Command.verbose: print "\tSetting address to %x" % Command.addr print "\tAdding new segment at index %d" % \ Command.segments.index(Command.segment)
def _update_data(self): """Update the note data based on our changes.""" if not (self.note_name is None or self.note_type is None or \ self._desc_data is None): data = ByteArray(struct.pack(self._endianess + self._format_chr * 3, len(self.note_name) + 1, len(self._desc_data), self.note_type)) data += ByteArray(struct.pack(self._endianess + str(align_up(len(self.note_name) + 1, self.bytes_per_word)) + 's', self.note_name)) data += self._desc_data self._data = data
def parse(self): if Command.verbose: print "Align object" old_addr = Command.addr Command.addr = align_up(Command.addr, self.value) if Command.verbose: print "\tSetting address to %x" % Command.addr if Command.section: for _ in range(0, Command.addr - old_addr): Command.section.append_data(ByteArray('\0')) if Command.verbose: print "\tPadding current section by %x bytes" % (Command.addr - old_addr)
def create_dynamic_segments(self, kernel, namespace, image, machine, pools, base_segment): """ Create a segment to store the init script. """ kernel.calc_thread_array_sizes() f = self.create_ops(kernel, image, machine) attrs = image.new_attrs(namespace.add_namespace("initscript")) attrs.attach = PF_R attrs.pager = None attrs.size = align_up(len(f.getvalue()), machine.min_page_size()) attrs.data = ByteArray() self.init_ms = image.add_memsection(attrs, machine, pools) image.add_group(machine.kernel_heap_proximity, (base_segment, self.init_ms)) f.close()
def prepare(self, wordsize, endianess): """Prepare file for writing""" # First we prepare the symbols, as this might in fact add # new sections. self._prepare_symbols() sh_string_table = None if len(self.sections) > 1: # Create section header string table sh_string_table = self.create_and_replace(UnpreparedElfStringTable, ".shstrtab") # We need to make sure the symbol table is the last thing in the file, symtab = self.find_section_named(".symtab") if symtab: i = self.sections.index(symtab) self.sections[-1], self.sections[i] = symtab, self.sections[-1] # First find where our segments should go. if self._ph_offset is None: self.set_ph_offset(ELF_HEADER_CLASSES[wordsize].size()) ofs = self._ph_offset ########### segments, sections = [], [] if self.segments: ofs, segments, sections = self._prepare_segments( ofs, sh_string_table, wordsize, endianess) ofs, sections = self._prepare_sections(ofs, sh_string_table, wordsize, endianess, sections) new_file = PreparedElfFile(wordsize, endianess, sections, segments) new_file._sh_offset = align_up(ofs, new_file.wordsize / 8) new_file._sh_strndx = self._sh_strndx new_file._ph_offset = self._ph_offset new_file.machine = self.machine new_file.elf_type = self.elf_type new_file.entry_point = self.entry_point new_file.osabi = self.osabi new_file.abiversion = self.abiversion new_file.flags = self.flags new_file.has_section_headers = self.has_section_headers return new_file
def _allocate_sym_from_section(self, elf, sect_name): """ Allocate space in the requested section for this symbol. """ sect = elf.allocate_section(sect_name) # We must byte align to the symbols value field sect_end_addr = sect.address + sect.get_size() sect_aligned_addr = align_up(sect_end_addr, self.value) padding = sect_aligned_addr - sect_end_addr sect.increment_size(padding + self.size) # Now update the symbol's info to point to the section self.section = sect self.value = sect_aligned_addr - sect.address self.shndx = 0
def _update_data(self): """Update the note data based on our changes.""" if not (self.note_name is None or self.note_type is None or \ self._desc_data is None): data = ByteArray( struct.pack(self._endianess + self._format_chr * 3, len(self.note_name) + 1, len(self._desc_data), self.note_type)) data += ByteArray( struct.pack( self._endianess + str(align_up(len(self.note_name) + 1, self.bytes_per_word)) + 's', self.note_name)) data += self._desc_data self._data = data
def prepare(self, wordsize, endianess): """Prepare file for writing""" # First we prepare the symbols, as this might in fact add # new sections. self._prepare_symbols() sh_string_table = None if len(self.sections) > 1: # Create section header string table sh_string_table = self.create_and_replace(UnpreparedElfStringTable, ".shstrtab") # We need to make sure the symbol table is the last thing in the file, symtab = self.find_section_named(".symtab") if symtab: i = self.sections.index(symtab) self.sections[-1], self.sections[i] = symtab, self.sections[-1] # First find where our segments should go. if self._ph_offset is None: self.set_ph_offset(ELF_HEADER_CLASSES[wordsize].size()) ofs = self._ph_offset ########### segments, sections = [], [] if self.segments: ofs, segments, sections = self._prepare_segments(ofs, sh_string_table, wordsize, endianess) ofs, sections = self._prepare_sections(ofs, sh_string_table, wordsize, endianess, sections) new_file = PreparedElfFile(wordsize, endianess, sections, segments) new_file._sh_offset = align_up(ofs, new_file.wordsize / 8) new_file._sh_strndx = self._sh_strndx new_file._ph_offset = self._ph_offset new_file.machine = self.machine new_file.elf_type = self.elf_type new_file.entry_point = self.entry_point new_file.osabi = self.osabi new_file.abiversion = self.abiversion new_file.flags = self.flags new_file.has_section_headers = self.has_section_headers return new_file
def merge_sections(base_sect, merge_sect, merged_sects, remove_sects, verbose): if verbose: print "\tMerging in section %s to section %s" % (merge_sect.name, base_sect.name) size = base_sect.get_size() offset = align_up(size, merge_sect.addralign) for _ in range(0, offset - size): base_sect.append_data(ByteArray('\0')) base_sect.append_data(merge_sect.get_data()) merged_sects.append((merge_sect, base_sect, offset)) if remove_sects != None: remove_sects.append(merge_sect) if verbose: print "\tPadded by %x bytes" % (offset - size) print "\tMerged data starts at %x" % offset
def create_mapping(self, attrs, remaining_size, page_size, is_minimum): """ Map as many pages for the specified page size. Return the mapping n-tuple and the size of the mapping. If page size is the minimum page size, we need to worry about some extra checks. """ # If minimum page size, we need to consider some extra conditions if is_minimum: phys_addr = align_down(_0(attrs.phys_addr), page_size) virt_addr = align_down(_0(attrs.virt_addr), page_size) # Calculate the shift cause by aligning the phys_addr alignment_diff = 0 if attrs.phys_addr is not None: alignment_diff = attrs.phys_addr - phys_addr size = 0 num_pages = 0 if attrs.size != None: # In certain cases, phys alignments can leave us a # page short. To account for this we add alignment # differences to the size. size = align_up(remaining_size + alignment_diff, page_size) num_pages = size / page_size # for all other pages, we map as many as we can else: phys_addr = _0(attrs.phys_addr) virt_addr = _0(attrs.virt_addr) size = 0 if attrs.size != None: num_pages = remaining_size / page_size size = num_pages * page_size #print "ceating mapping: size %x, pages %x" % (size, num_pages) mapping = (virt_addr, phys_addr, page_size, num_pages, attrs.attach, attrs.cache_policy) return mapping, size
def _unpack(self): """ Convert our internal raw data into more useful data structures. """ if len(self._data) == 0: return name_start = self.bytes_per_word * 3 namesz, descsz, _type = \ struct.unpack(self.endianess + self._format_chr * 3, self._data[:name_start]) name = struct.unpack(self.endianess + str(namesz) + 's', self._data[name_start:name_start + namesz]) # Remove the terminating 0. self.note_name = name[0].split('\x00')[:-1][0] self.note_type = _type desc_start = name_start + align_up(namesz, self.bytes_per_word) self._desc_data = self._data[desc_start:desc_start + descsz]
def _unpack(self): """ Convert our internal raw data into more useful data structures. """ if len(self._data) == 0: return name_start = self.bytes_per_word * 3 namesz, descsz, _type = \ struct.unpack(self.endianess + self._format_chr * 3, self._data[:name_start]) name = struct.unpack(self.endianess + str(namesz) + 's', self._data[name_start:name_start + namesz]) # Remove the terminating 0. self.note_name = name[0].split('\x00')[:-1][0] self.note_type = _type desc_start = name_start + align_up(namesz, self.bytes_per_word) self._desc_data = self._data[desc_start: desc_start + descsz]
def generate_dynamic_segments(self, namespace, machine, pools, kernel, image): """ Create bootinfo segment and environment buffers. """ utcb_mss = [] for space in self.cell.spaces: space.utcb.size = align_up(space.max_threads * image.utcb_size, machine.min_page_size()) # A space with no threads will get a 0 size as align_up doesn't # change a 0, so we explicity set it to at least one page if space.utcb.size == 0: space.utcb.size = machine.min_page_size() utcb_ms = image.add_utcb_area(space.utcb) utcb_mss.append(utcb_ms) kspace = None for (x, _) in self.env.space_list: if x.space.id == space.id: kspace = x if image.utcb_size: self.env.add_utcb_area_entry("UTCB_AREA_%d" % space.id, space, kspace, image) self.env.add_bitmap_allocator_entry("MAIN_SPACE_ID_POOL", *self.cell.space_list) self.env.add_bitmap_allocator_entry("MAIN_CLIST_ID_POOL", *self.cell.cap_list) self.env.add_bitmap_allocator_entry("MAIN_MUTEX_ID_POOL", *self.cell.mutex_list) self.env.add_int_entry("MAIN_SPACE_ID", self.space.id) self.env.add_int_entry("MAIN_CLIST_ID", self.cell.clist_id) self.env.generate_dynamic_segments(self, image, machine, pools) self.elf_segments.extend(utcb_mss + [self.env.memsect]) self.group_elf_segments(image)
def unpack(self, endianess): self._endianess = endianess self._format_chr = "I" if len(self._data) == 0: return name_start = self.bytes_per_word * 3 namesz, descsz, _type = \ struct.unpack(self._endianess + self._format_chr * 3, self._data[:name_start]) name = struct.unpack(self._endianess + str(namesz) + 's', self._data[name_start:name_start + namesz]) # Remove the terminating 0. self.note_name = name[0].split('\x00')[:-1][0] self.note_type = _type desc_start = name_start + align_up(namesz, self.bytes_per_word) self._desc_data = self._data[desc_start: desc_start + descsz]
def unpack(self, endianess): self._endianess = endianess self._format_chr = "I" if len(self._data) == 0: return name_start = self.bytes_per_word * 3 namesz, descsz, _type = \ struct.unpack(self._endianess + self._format_chr * 3, self._data[:name_start]) name = struct.unpack(self._endianess + str(namesz) + 's', self._data[name_start:name_start + namesz]) # Remove the terminating 0. self.note_name = name[0].split('\x00')[:-1][0] self.note_type = _type desc_start = name_start + align_up(namesz, self.bytes_per_word) self._desc_data = self._data[desc_start:desc_start + descsz]
def _update_data(self): """Update the note data based on our changes.""" if not (self.note_name is None or self.note_type is None or \ self._desc_data is None): # Round the data up to a multiple of word size. desc_data = self._desc_data if (len(desc_data) % self.bytes_per_word) != 0: desc_data += ByteArray('\0' * (len(desc_data) % self.bytes_per_word)) data = ByteArray(struct.pack(self.endianess + self._format_chr * 3, len(self.note_name) + 1, len(desc_data), self.note_type)) data += ByteArray(struct.pack(self.endianess + str(align_up(len(self.note_name) + 1, self.bytes_per_word)) + 's', self.note_name + '\0')) data += desc_data self._data = data
def mark_window(self, window_base, size, holes): """ Mark a window of memory. A window is region of memory whose addresses must be either in the free list or listed in the holes list. Every address in the region must have been present in the freelist at some stage. The holes variable is a list of (base, size) tuples. Returns the parts of the freelist that were removed. """ if holes is None: holes = [] window_end = window_base + size - 1 if window_end < window_base: raise AllocatorException, \ "alloc_window: Window end address (0x%x) less " \ "than mark base address (0x%x)" % \ (window_end, window_base) if window_base == window_end: window_end = window_end + 1 window_base = align_down(window_base, self.min_alloc) window_end = align_up(window_end, self.min_alloc) - 1 # First check that the proposed window is in the memory that # was originally passed to add_memory(). contained = False for full_start, full_end, mem_type in self.fulllist: # If the window is fully contained within one record, then # we're sweet. if full_start <= window_base <= full_end and \ full_start <= window_end <= full_end: contained = True break # OK, what's the answer? if not contained: raise AllocatorException, \ "alloc_window: Window not in allocator controlled memory." # Transform the hole list from (base, size) to (base, end), # rounded to page boundaries, and sort in increasing order of # address. holes = [(align_down(hole_base, self.min_alloc), align_up(hole_base + hole_size - 1, self.min_alloc) - 1) for (hole_base, hole_size) in holes] holes.sort(key=lambda x: x[0]) # Holes must be in the range of the window and can't overlap. for hole_base, hole_end in holes: assert window_base <= hole_base <= window_end and \ window_base <= hole_end <= window_end free_iter = iter(self.freelist) hole_iter = iter(holes) curr_addr = window_base free_done = False holes_done = False curr_free = None curr_hole = None new_freelist = [] removed_freelist = [] while curr_addr <= window_end: assert curr_addr % self.min_alloc == 0 if not free_done and curr_free is None: try: curr_free = free_iter.next() # If the freelist range is outside where we are # working, then loop and get another one. if curr_free[1] < curr_addr or \ curr_free[0] > window_end: new_freelist.append(curr_free) curr_free = None continue except StopIteration: free_done = True curr_free = None if not holes_done and curr_hole is None: try: curr_hole = hole_iter.next() except StopIteration: holes_done = True curr_hole = None if curr_free is not None and \ curr_free[0] <= curr_addr and \ curr_free[1] >= curr_addr: if curr_hole is not None and \ curr_hole[0] <= curr_free[1]: raise AllocatorException, \ "alloc_window: Hole (0x%x-0x%x) overlaps " \ "with free block (0x%x-0x%x)." % \ (curr_hole[0], curr_hole[1], curr_free[0], curr_free[1]) else: # Remove the part we're interested in from the # freelist. Add the excess. if curr_free[0] < curr_addr: new_freelist.append((curr_free[0], curr_addr - 1, curr_free[2])) if curr_free[1] > window_end: new_freelist.append((window_end + 1, curr_free[1], curr_free[2])) removed_freelist.append((curr_addr, window_end, curr_free[2])) else: removed_freelist.append((curr_addr, curr_free[1], curr_free[2])) curr_addr = curr_free[1] + 1 curr_free = None elif curr_hole is not None and \ curr_hole[0] == curr_addr: if curr_free is not None and \ curr_free[0] <= curr_hole[1]: raise AllocatorException, \ "alloc_window: Hole (0x%x-0x%x) overlaps " \ "with free block (0x%x-0x%x)." % \ (curr_hole[0], curr_hole[1], curr_free[0], curr_free[1]) else: curr_addr = curr_hole[1] + 1 curr_hole = None else: raise AllocatorException, \ "Address %#x should be in a zone but is neither " \ "free or in an already allocated block. Is it part " \ "of a direct addressing pool?" % \ curr_addr # Copy any remaining free list records into the new freelist. for curr_free in free_iter: new_freelist.append(curr_free) self.freelist = new_freelist return removed_freelist
def mark(self, base, size, cache_policy): """ mark(base, end) -> marked Remove the given range from the free list. This is used to record were fixed address objects are located. Returns whether or not the range was removed from the free list. It is *not* an error if the range is wholly outside the freelist. """ end = base + size - 1 if end < base: raise AllocatorException, "Mark end address (0x%x) less than mark base address (0x%x)" % (end, base) if base == end: end = end + 1 # Remove all of the pages containing the region. base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) - 1 new_freelist = [] marked = False for free_start, free_end, mem_type in self.freelist: assert free_start % self.min_alloc == 0 assert (free_end + 1) % self.min_alloc == 0 if (free_start <= base <= free_end) or (base <= free_start <= end): marked = True if ( self.tracker is None or not self.tracker.marking_direct ) and not self.machine.check_cache_permissions(cache_policy, mem_type): raise AllocatorException( 'Mark: Cache policy "%s" is not' ' allowed by the cache rights "%s"' % (self.machine.cache_policy_to_str(cache_policy), self.machine.cache_perms_to_str(mem_type)) ) # Report the allocation to the tracker, if there is # one. if self.tracker is not None: self.tracker.track_mark(base, end) # Insert into the freelist any remaining parts of the # free memory region. if free_start < base: new_freelist.append((free_start, base - 1, mem_type)) if end < free_end: new_freelist.append((end + 1, free_end, mem_type)) else: new_freelist.append((free_start, free_end, mem_type)) self.freelist = new_freelist return marked
def create_dynamic_segments(self, kernel, namespace, image, machine, pools, base_segment): data = self.create_ops(kernel, None, machine) kernel.heap_attrs.size = align_up(len(data), machine.min_page_size())
def prepare(self, wordsize, endianess): """Prepare file for writing""" self.wordsize = wordsize self.endianess = endianess # First we prepare the symbols, as this might in fact add # new sections. sh_string_table = None # Sort the sections so they appear as you'd expect from ld old_sections = self.sections[1:] if len(self.sections) > 1: self.sections = [self.sections[0]] else: self.sections = [] for sect in old_sections: if sect.address > 0: self.sections.append(sect) old_sections = [sect for sect in old_sections if sect.address == 0] old_sections.sort(key=lambda x: x.name) self.sections.sort(key=lambda x: x.address) self.sections.extend(old_sections) if len(self.sections) > 1: # Create section header string table sh_string_table = self.create_and_replace(UnpreparedElfStringTable, ".shstrtab") # We need to make sure the symbol table is the last thing in the file, symtab = self.find_section_named(".symtab") if symtab: i = self.sections.index(symtab) self.sections[-1], self.sections[i] = symtab, self.sections[-1] # First find where our segments should go. if self._ph_offset is None or self._ph_offset == 0: self.set_ph_offset(ELF_HEADER_CLASSES[wordsize].size()) ofs = self._ph_offset ########### sections = self.sections[:] if self.segments: ofs = self._prepare_segments(ofs, sh_string_table, sections) ofs, sections = self._prepare_sections(ofs, sh_string_table, sections) new_file = PreparedElfFile(wordsize, endianess, sections, self.segments) new_file._sh_offset = align_up(ofs, new_file.wordsize / 8) new_file._sh_strndx = self._sh_strndx new_file._ph_offset = self._ph_offset new_file.machine = self.machine new_file.elf_type = self.elf_type new_file.entry_point = self.entry_point new_file.osabi = self.osabi new_file.abiversion = self.abiversion new_file.flags = self.flags for section in new_file.sections: section.elffile = new_file return new_file
def create_ops(self, kernel, image, machine): """ Create in init script for Micro kernel initialisation. """ op_list = [] offset = [0] def add_op(op_func, *args): op = op_func(None, None, None, (args), image, machine) op_list.append(op) my_offset = offset[0] offset[0] += op.sizeof() return my_offset f = StringIO() # We just use the cells in order, hopefully the first cell has a # large enough heap for soc/kernel. No longer do sorting cells = kernel.cells.values() ## PHASE ONE ## add_op(InitScriptHeader, []) add_op(InitScriptCreateHeap, _0(cells[0].heap_phys_base), cells[0].heap_size) # Declare total sizes. The must be a minimum of 1. add_op(InitScriptInitIds, max(kernel.total_spaces, 1), max(kernel.total_clists, 1), max(kernel.total_mutexes, 1)) needs_heap = False add_op(InitScriptCreateThreadHandles, _0(kernel.thread_array_base), kernel.thread_array_count) op_list[-1].set_eop() ## PHASE TWO ## for cell in cells: # No need to encode the heap of the first cell. if needs_heap: add_op(InitScriptCreateHeap, _0(cell.heap_phys_base), cell.heap_size) else: needs_heap = True cell.clist_offset = \ add_op(InitScriptCreateClist, cell.clist_id, cell.max_caps) for space in cell.get_static_spaces(): utcb_base = 0xdeadbeef # something obvious if we ever use it! utcb_size = 0x11 if space.utcb is not None: utcb_base = space.utcb.virt_addr if utcb_base is None: utcb_base = 0 utcb_size = 0 else: utcb_size = int(log(space.utcb.size, 2)) add_op( InitScriptCreateSpace, space.id, space.space_id_base, _0(space.max_spaces), space.clist_id_base, _0(space.max_clists), space.mutex_id_base, _0(space.max_mutexes), space.max_phys_segs, utcb_base, utcb_size, space.is_privileged, #XXX: A space's max priority is currently hardcoded! #XXX: For now, use the kernel's max priority instead. self.MAX_PRIORITY) #space.max_priority) # Grant the space access to the platform control # system call. if space.plat_control: add_op(InitScriptAllowPlatformControl, []) # Assign any irqs to the space. for irq in space.irqs: add_op(InitScriptAssignIrq, irq) for thread in space.get_static_threads(): # FIXME: Need to deal with entry and user_start thread.offset = \ add_op(InitScriptCreateThread, thread.cap_slot, thread.priority, thread.entry, thread.get_sp(), utcb_base, cell.get_mr1()) for mutex in space.get_static_mutexes(): mutex.offset = \ add_op(InitScriptCreateMutex, mutex.id) for (num, name, attrs) in space.mappings: map_pg_sz = machine.min_page_size() map_pg_sz_log2 = int(log(map_pg_sz, 2)) phys_addr = align_down(_0(attrs.phys_addr), map_pg_sz) virt_addr = align_down(_0(attrs.virt_addr), map_pg_sz) # Calculate the shift cause by aligning the phys_addr alignment_diff = 0 if attrs.has_phys_addr(): alignment_diff = attrs.phys_addr - phys_addr size = 0 num_pages = 0 if attrs.size != None: # In certain cases, phys alignments can leave us a # page short. To account for this we add alignment # differences to the size. size = align_up(attrs.size + alignment_diff, map_pg_sz) num_pages = size / map_pg_sz # Attributes are 0xff => All cache policies are valid! if attrs.has_phys_addr(): add_op(InitScriptCreateSegment, num, phys_addr, 0xff, size, attrs.attach) if attrs.need_mapping(): add_op(InitScriptMapMemory, num, 0, attrs.attach, map_pg_sz_log2, num_pages, attrs.cache_policy, virt_addr) # Dump any caps for cell in cells: for space in cell.get_static_spaces(): for cap in space.ipc_caps: add_op(InitScriptCreateIpcCap, _0(cap.clist.clist_offset), _0(cap.cap_slot), cap.obj.offset) for cap in space.mutex_caps: add_op(InitScriptCreateMutexCap, _0(cap.clist.clist_offset), _0(cap.cap_slot), cap.obj.offset) op_list[-1].set_eop() f.write(''.join([op.encode() for op in op_list])) return f
def create_ops(self, kernel, image, machine): """ Create in init script for Micro kernel initialisation. """ op_list = [] offset = [0] def add_op(op_func, *args): op = op_func(None, None, None, (args), image, machine) op_list.append(op) my_offset = offset[0] offset[0] += op.sizeof() return my_offset f = StringIO() # We just use the cells in order, hopefully the first cell has a # large enough heap for soc/kernel. No longer do sorting cells = kernel.cells.values() ## PHASE ONE ## add_op(InitScriptHeader, []) add_op(InitScriptCreateHeap, _0(cells[0].heap_phys_base), cells[0].heap_size) # Declare total sizes. The must be a minimum of 1. add_op( InitScriptInitIds, max(kernel.total_spaces, 1), max(kernel.total_clists, 1), max(kernel.total_mutexes, 1) ) needs_heap = False add_op(InitScriptCreateThreadHandles, _0(kernel.thread_array_base), kernel.thread_array_count) op_list[-1].set_eop() ## PHASE TWO ## for cell in cells: # No need to encode the heap of the first cell. if needs_heap: add_op(InitScriptCreateHeap, _0(cell.heap_phys_base), cell.heap_size) else: needs_heap = True cell.clist_offset = add_op(InitScriptCreateClist, cell.clist_id, cell.max_caps) for space in cell.get_static_spaces(): utcb_base = 0xDEADBEEF # something obvious if we ever use it! utcb_size = 0x11 if space.utcb is not None: utcb_base = space.utcb.virt_addr if utcb_base is None: utcb_base = 0 utcb_size = 0 else: utcb_size = int(log(space.utcb.size, 2)) add_op( InitScriptCreateSpace, space.id, space.space_id_base, _0(space.max_spaces), space.clist_id_base, _0(space.max_clists), space.mutex_id_base, _0(space.max_mutexes), space.max_phys_segs, utcb_base, utcb_size, space.is_privileged, # XXX: A space's max priority is currently hardcoded! # XXX: For now, use the kernel's max priority instead. self.MAX_PRIORITY, ) # space.max_priority) # Grant the space access to the platform control # system call. if space.plat_control: add_op(InitScriptAllowPlatformControl, []) # Assign any irqs to the space. for irq in space.irqs: add_op(InitScriptAssignIrq, irq) for thread in space.get_static_threads(): # FIXME: Need to deal with entry and user_start thread.offset = add_op( InitScriptCreateThread, thread.cap_slot, thread.priority, thread.entry, thread.get_sp(), utcb_base, cell.get_mr1(), ) for mutex in space.get_static_mutexes(): mutex.offset = add_op(InitScriptCreateMutex, mutex.id) for (num, name, attrs) in space.mappings: map_pg_sz = machine.min_page_size() map_pg_sz_log2 = int(log(map_pg_sz, 2)) phys_addr = align_down(_0(attrs.phys_addr), map_pg_sz) virt_addr = align_down(_0(attrs.virt_addr), map_pg_sz) # Calculate the shift cause by aligning the phys_addr alignment_diff = 0 if attrs.has_phys_addr(): alignment_diff = attrs.phys_addr - phys_addr size = 0 num_pages = 0 if attrs.size != None: # In certain cases, phys alignments can leave us a # page short. To account for this we add alignment # differences to the size. size = align_up(attrs.size + alignment_diff, map_pg_sz) num_pages = size / map_pg_sz # Attributes are 0xff => All cache policies are valid! if attrs.has_phys_addr(): add_op(InitScriptCreateSegment, num, phys_addr, 0xFF, size, attrs.attach) if attrs.need_mapping(): add_op( InitScriptMapMemory, num, 0, attrs.attach, map_pg_sz_log2, num_pages, attrs.cache_policy, virt_addr, ) # Dump any caps for cell in cells: for space in cell.get_static_spaces(): for cap in space.ipc_caps: add_op(InitScriptCreateIpcCap, _0(cap.clist.clist_offset), _0(cap.cap_slot), cap.obj.offset) for cap in space.mutex_caps: add_op(InitScriptCreateMutexCap, _0(cap.clist.clist_offset), _0(cap.cap_slot), cap.obj.offset) op_list[-1].set_eop() f.write("".join([op.encode() for op in op_list])) return f
class Allocator(object): """ A memory allocator. This allocator is based on the first fit algorithm with an interface tuned to the rather strange requirements of elfweaver. The allocator keeps a sorted list of free address ranges. Each range is a multiple of <min_alloc_size> bytes (aka a page), and is aligned on a <min_alloc_size> boundary. Separate calls to the allocator will result in items being allocated in different pages. However it is possible to allocate items on adjacent bytes, provided that that are part of the same allocation group. Allocation groups are a method of allocating multiple items at one time and ensuring that they are placed sufficiently close to one another. Some items in a group may already have their addresses set (for instance, allocating a stack near a program's text segment). These items must have addresses that are not in the allocator's free list. An individual item will be placed at the lowest address that satify its size, offset and alignment requirements. Items in a group are allocated in increasing order of address. Memory at a particular address can be removed from the free list with the mark() method. It is not an error to mark memory that is wholly or in part missing from the free list. A window of memory can also be marked with the mark_window() method. A window is similar to regular marking, except that it is an error for the region of memory to contain memory outside of the freelist unless those regions are present in a list supplied to the method. The entire memory region must have once been present in the allocator's freelist. Windows are used to implement zones. """ def __init__(self, min_alloc_size, tracker = None): """ __init__(min_alloc_size) min_alloc_size is the smallest size, in bytes, that can be allocated. Alignment must be a multiple of this size. """ # Alloc size must be a power of 2. assert (min_alloc_size & (min_alloc_size - 1)) == 0 self.freelist = [] self.fulllist = [] self.min_alloc = min_alloc_size self.tracker = tracker def __merge(self, the_list): """ Merge adjacent regions into one. Merging regions simplifies the logic of the rest of the allocation code. """ last = None new_list = [] for region in the_list: if last is None: last = region else: if last[1] == region[0] - 1: last = (last[0], region[1], last[2]) else: new_list.append(last) last = region if last is not None: new_list.append(last) return new_list def sort(self): """ Sort the free and full memory lists and merge any adjacent memory regions. """ self.freelist.sort(key=lambda x: x[0]) self.freelist = self.__merge(self.freelist) self.fulllist.sort(key=lambda x: x[0]) self.fulllist = self.__merge(self.fulllist) def get_freelist(self): """Return the current free list.""" return self.freelist def add_memory(self, base, size, mem_type): """ Add a region of free memory to the pool. The memory must be ia multiple of <min_alloc_size> bytes in size, and aligned on a <min_alloc_size> boundary. Memory addresses may only be added to the allocator once. """ # Check that the memory is page aligned. assert align_down(base, self.min_alloc) == base assert size % self.min_alloc == 0 end = base + size - 1 # Check that the memory does not overlap any of the regions # already in use. for list_base, list_end, list_mem_type in self.fulllist: if (base >= list_base and base <= list_end) or \ (end >= list_base and end <= list_end): raise AllocatorException, \ "Cannot add overlapping memory regions to the " \ "Allocator. Address (0x%x--0x%x) already in " \ "(0x%x--0x%x)" % \ (base, end, list_base, list_end) self.fulllist.append((base, end, mem_type)) self.freelist.append((base, end, mem_type)) self.sort() def mark(self, base, size): """ mark(base, end) -> marked Remove the given range from the free list. This is used to record were fixed address objects are located. Returns whether or not the range was removed from the free list. It is *not* an error if the range is wholly outside the freelist. """ end = base + size - 1 if end < base: raise AllocatorException, \ "Mark end address (0x%x) less than mark base address (0x%x)" \ % (end, base) if base == end: end = end + 1 # Remove all of the pages containing the region. base = align_down(base, self.min_alloc) end = align_up(end, self.min_alloc) - 1 new_freelist = [] marked = False for free_start, free_end, mem_type in self.freelist: assert free_start % self.min_alloc == 0 assert (free_end + 1) % self.min_alloc == 0 if (free_start <= base and base <= free_end) or \ (base <= free_start and free_start <= end): marked = True # Report the allocation to the tracker, if there is # one. if self.tracker is not None: self.tracker.track_mark(base, end) # Insert into the freelist any remaining parts of the # free memory region. if free_start < base: new_freelist.append((free_start, base - 1, mem_type)) if end < free_end: new_freelist.append((end + 1, free_end, mem_type)) else: new_freelist.append((free_start, free_end, mem_type)) self.freelist = new_freelist return marked def mark_window(self, window_base, size, holes): """ Mark a window of memory. A window is region of memory whose addresses must be either in the free list or listed in the holes list. Every address in the region must have been present in the freelist at some stage. The holes variable is a list of (base, size) tuples. Returns the parts of the freelist that were removed. """ if holes is None: holes = [] window_end = window_base + size - 1 if window_end < window_base: raise AllocatorException, \ "alloc_window: Window end address (0x%x) less " \ "than mark base address (0x%x)" % \ (window_end, window_base) if window_base == window_end: window_end = window_end + 1 window_base = align_down(window_base, self.min_alloc) window_end = align_up(window_end, self.min_alloc) - 1 # First check that the proposed window is in the memory that # was originally passed to add_memory(). contained = False for full_start, full_end, mem_type in self.fulllist: # If the window is fully contained within one record, then # we're sweet. if full_start <= window_base <= full_end and \ full_start <= window_end <= full_end: contained = True break # OK, what's the answer? if not contained: raise AllocatorException, \ "alloc_window: Window not in allocator controlled memory." # Transform the hole list from (base, size) to (base, end), # rounded to page boundaries, and sort in increasing order of # address. holes = [(align_down(hole_base, self.min_alloc), align_up(hole_base + hole_size - 1, self.min_alloc) - 1) for (hole_base, hole_size) in holes] holes.sort(key=lambda x: x[0]) # Holes must be in the range of the window and can't overlap. for hole_base, hole_end in holes: assert window_base <= hole_base <= window_end and \ window_base <= hole_end <= window_end free_iter = iter(self.freelist) hole_iter = iter(holes) curr_addr = window_base free_done = False holes_done = False curr_free = None curr_hole = None new_freelist = [] removed_freelist = [] while curr_addr <= window_end: assert curr_addr % self.min_alloc == 0 if not free_done and curr_free is None: try: curr_free = free_iter.next() # If the freelist range is outside where we are # working, then loop and get another one. if curr_free[1] < curr_addr or \ curr_free[0] > window_end: new_freelist.append(curr_free) curr_free = None continue except StopIteration: free_done = True curr_free = None if not holes_done and curr_hole is None: try: curr_hole = hole_iter.next() except StopIteration: holes_done = True curr_hole = None if curr_free is not None and \ curr_free[0] <= curr_addr and \ curr_free[1] >= curr_addr: if curr_hole is not None and \ curr_hole[0] <= curr_free[1]: raise AllocatorException, \ "alloc_window: Hole (0x%x-0x%x) overlaps " \ "with free block (0x%x-0x%x)." % \ (curr_hole[0], curr_hole[1], curr_free[0], curr_free[1]) else: # Remove the part we're interested in from the # freelist. Add the excess. if curr_free[0] < curr_addr: new_freelist.append((curr_free[0], curr_addr - 1, curr_free[2])) if curr_free[1] > window_end: new_freelist.append((window_end + 1, curr_free[1], curr_free[2])) removed_freelist.append((curr_addr, window_end, curr_free[2])) else: removed_freelist.append((curr_addr, curr_free[1], curr_free[2])) curr_addr = curr_free[1] + 1 curr_free = None elif curr_hole is not None and \ curr_hole[0] == curr_addr: if curr_free is not None and \ curr_free[0] <= curr_hole[1]: raise AllocatorException, \ "alloc_window: Hole (0x%x-0x%x) overlaps " \ "with free block (0x%x-0x%x)." % \ (curr_hole[0], curr_hole[1], curr_free[0], curr_free[1]) else: curr_addr = curr_hole[1] + 1 curr_hole = None else: raise AllocatorException, \ "Address %#x should be in a zone but is neither " \ "free or in an already allocated block. Is it part " \ "of a direct addressing pool?" % \ curr_addr # Copy any remaining free list records into the new freelist. for curr_free in free_iter: new_freelist.append(curr_free) self.freelist = new_freelist return removed_freelist def __simple_alloc(self, freelist, size, alignment, offset): """ Allocate a single block of memory or raise an exception. Search for a place in the freelist to allocate the requested block of memory. Returns a tuple with the following values: - before_free - The freelist up to the place where the memory was allocated. - after_free - The freelist after the place where the memory was allocated - addr - The allocated addresses. The free lists are not necessarily page aligned. Returning the tuple allows the abort and retry semantics of group allocation to be implemented. """ before_free = [] after_free = [] addr = None free_iter = iter(freelist) # Search the freelist for a suitable block. for (free_start, free_end, mem_type) in free_iter: # Calculate the proposed address. alloc_start = align_down(free_start, alignment) + offset alloc_end = alloc_start + size - 1 # If alignment adjustments push the block below the # start of the free region, bump it up. if alloc_start < free_start: alloc_start += alignment alloc_end += alignment # If the range is within free memory, we've found it. if alloc_start >= free_start and alloc_end <= free_end: # Put the remaining parts of the region back into the # correct freelists. if free_start < alloc_start - 1: before_free.append((free_start, alloc_start - 1, mem_type)) if alloc_end + 1 < free_end: after_free.append((alloc_end + 1, free_end, mem_type)) addr = alloc_start break else: # Not useful, so add to the before list. before_free.append((free_start, free_end, mem_type)) # Abort if nothing suitable was found. if addr is None: raise AllocatorException, "Out of memory" # Copy any remaining free list records into the after_freelist for curr_free in free_iter: after_free.append(curr_free) return (before_free, after_free, alloc_start) def __group_alloc(self, group, freelist): """ Allocate memory for a group of items and ensure that they have been allocated sufficiently close together. Items in the group are allocted with increasing addresses. Returns a tuple with the following values: new_freelist - The revised freelist addrs - A list of the addresses of the items, in item order. """ addrs = [] new_freelist = [] last_item = None for i in group.get_entries(): # If the address is not fixed, then allocate if i.get_addr() is None: (before, freelist, addr) = \ self.__simple_alloc(freelist, i.get_size(), i.get_alignment(), i.get_offset()) new_freelist.extend(before) else: addr = i.get_addr() # Split the freelist around the fixed address to that # the next item will be allocated at a higher # addresses. Problems will arise fixed items # are placed in the list unsorted. updated_freelist = [] for (base, end, mem_type) in freelist: if end < addr: new_freelist.append((base, end, mem_type)) else: assert base > addr updated_freelist.append((base, end, mem_type)) # Use the freelist above the fixed item for further # allocations. freelist = updated_freelist # Check the distance between the items and throw if # they're too far apart. if last_item is not None and group.get_distance() is not None: if addr - (addrs[-1] + last_item.get_size()) > \ group.get_distance(): err_txt = { 'last_item' : last_item.get_name(), 'this_item' : i.get_name(), 'distance' : group.get_distance() } raise AllocatorGroupException, \ group.get_error_message() % err_txt addrs.append(addr) last_item = i # Add remaining parts of the freelist to the final freelist. new_freelist.extend(freelist) return (new_freelist, addrs) def alloc(self, group): """ Allocate memory for a group of items. Items within the group will be allocated no more that group.get_distance() bytes appart, otherwise an exception will be raised. """ new_freelist = [] curr_freelist = self.freelist addrs = [] completed = False # How to allocate a group of items: # # 1) Try to allocate the group from the bottom of the free # list. If that works, good! # 2) If an AllocatorException is thrown, then an individual # item could not find the memory it needs, so give up now # and throw again. # 3) If an AllocatorGroupException is thrown, then the group's # distance requirements could now be fulfilled. This could # be as a result of freelist fragmentation, so remove the # first item from the freelist and try again. while not completed: try: (ret_freelist, addrs) = self.__group_alloc(group, curr_freelist) except AllocatorException: raise AllocatorException, group.get_error_message() except AllocatorGroupException, agex: # Remove the first freelist record and try again. If # there is fragmentation in the freelist, this may # work around it. This exception may be raised again # if the group failed for another reason (for # instance, the could not be satisfied at all). if len(curr_freelist) <= 1: raise AllocatorException, str(agex) else: new_freelist.append(curr_freelist.pop(0)) else: # Success! completed = True # Rebuild the freelist. new_freelist.extend(ret_freelist) # Assign to each item its address. for (item, addr) in zip(group.get_entries(), addrs): # Report the allocation to the tracker, if there # is one. For completely accurate coverage the # pages that were allocated should really be # tracked, but that is: # a) really hard. # b) not what the original version did. if self.tracker is not None and \ addr != item.get_addr(): self.tracker.track_alloc(item.get_name(), addr, addr + item.get_size() - 1) item.set_addr(addr) # Restore the invariant by removing all partial pages from the # freelist. self.freelist = [] for (base, end, mem_type) in new_freelist: if base % self.min_alloc != 0: base = align_up(base, self.min_alloc) if ((end + 1) % self.min_alloc) != 0: end = align_down(end, self.min_alloc) - 1 # Don't include pages that have been rounded out of # existence. if base < end: self.freelist.append((base, end, mem_type))
# Report the allocation to the tracker, if there # is one. For completely accurate coverage the # pages that were allocated should really be # tracked, but that is: # a) really hard. # b) not what the original version did. if self.tracker is not None and addr != item.get_addr(): self.tracker.track_alloc(item.get_name(), addr, addr + item.get_size() - 1) item.set_addr(addr) # Restore the invariant by removing all partial pages from the # freelist. self.freelist = [] for (base, end, mem_type) in new_freelist: if base % self.min_alloc != 0: base = align_up(base, self.min_alloc) if ((end + 1) % self.min_alloc) != 0: end = align_down(end, self.min_alloc) - 1 # Don't include pages that have been rounded out of # existence. if base < end: assert mem_type is not None self.freelist.append((base, end, mem_type)) def next_avail(self): """ Return the base address of the biggest block of memory in the free list. """
def prepare(self, wordsize, endianess): """Prepare file for writing""" self.wordsize = wordsize self.endianess = endianess # First we prepare the symbols, as this might in fact add # new sections. sh_string_table = None # Sort the sections so they appear as you'd expect from ld old_sections = self.sections[1:] if len(self.sections) > 1: self.sections = [self.sections[0]] else: self.sections = [] for sect in old_sections: if sect.address > 0: self.sections.append(sect) old_sections = [sect for sect in old_sections if sect.address == 0] old_sections.sort(key = lambda x: x.name) self.sections.sort(key = lambda x: x.address) self.sections.extend(old_sections) if len(self.sections) > 1: # Create section header string table sh_string_table = self.create_and_replace(UnpreparedElfStringTable, ".shstrtab") # We need to make sure the symbol table is the last thing in the file, symtab = self.find_section_named(".symtab") if symtab: i = self.sections.index(symtab) self.sections[-1], self.sections[i] = symtab, self.sections[-1] # First find where our segments should go. if self._ph_offset is None or self._ph_offset == 0: self.set_ph_offset(ELF_HEADER_CLASSES[wordsize].size()) ofs = self._ph_offset ########### sections = self.sections[:] if self.segments: ofs = self._prepare_segments(ofs, sh_string_table, sections) ofs, sections = self._prepare_sections(ofs, sh_string_table, sections) new_file = PreparedElfFile(wordsize, endianess, sections, self.segments) new_file._sh_offset = align_up(ofs, new_file.wordsize / 8) new_file._sh_strndx = self._sh_strndx new_file._ph_offset = self._ph_offset new_file.machine = self.machine new_file.elf_type = self.elf_type new_file.entry_point = self.entry_point new_file.osabi = self.osabi new_file.abiversion = self.abiversion new_file.flags = self.flags for section in new_file.sections: section.elffile = new_file return new_file