def test_set_ph_offset(self): ef = UnpreparedElfFile() ef.set_ph_offset(100, True) self.assertEqual(ef._ph_offset, 100) self.assertRaises(InvalidArgument, ef.set_ph_offset, 1, True) ef = ef.prepare(32, "<") self.assertEqual(hasattr(ef, "set_ph_offset"), False)
def _init_elf_file(pd_el, image, pd_namespace, machine, pools, bootinfo, pd, kernel): elf = UnpreparedElfFile(filename=os.path.join(pd_el._path, pd_el.file)) if elf.elf_type != ET_EXEC: raise MergeError, "All the merged ELF files must be of EXEC type." segment_els = pd_el.find_all_children("segment") segs = collect_elf_segments(elf, image.PROGRAM, segment_els, pd_el.name, [], pd_namespace, image, machine, pools) segs_ms = [bootinfo.record_segment_info(pd_el.name, seg, image, machine, pools) for seg in segs] for seg_ms in segs_ms: pd.attach_memsection(seg_ms) # Record any patches being made to the program. patch_els = pd_el.find_children("patch") collect_patches(elf, patch_els, os.path.join(pd_el._path, pd_el.file), image) # Collect threads in the PD. elf = pd.elf = elf.prepare(image.wordsize, image.endianess) return elf, segs
def link_kernel(kernel_el, linker_script, linker_wrapper, kernel, soc, libraries): link_type = get_linker_type(linker_script) kernel_tmp = None if link_type == 'rvct': # Current elfweaver linker does not support RVCT so shell out to an # external linker kernel_tmp = tempfile.NamedTemporaryFile() # Remove the file at exit. atexit.register(os.remove, kernel_tmp.name) command = [ os.path.abspath(linker_wrapper), kernel, kernel_tmp.name, linker_script, soc ] + libraries ret = subprocess.Popen(command, cwd=os.path.dirname(linker_script)).wait() if ret != 0: raise MergeError("Failed to link kernel, return code %d" % ret) elf = UnpreparedElfFile(kernel_tmp.name) kernel_out_name = kernel_tmp.name else: elf = link([kernel, soc]) # Just getting a random temp file name, there must be a nicer way # to do this? tmp = tempfile.NamedTemporaryFile() # Remove the file at exit. atexit.register(os.remove, tmp.name) kernel_out_name = tmp.name tmp.close() kernel_out = elf.prepare(elf.wordsize, elf.endianess) kernel_out.to_filename(kernel_out_name) elf = UnpreparedElfFile(kernel_out_name) # Kernel is linked, now add the segment names (as per old elfadorn) # Kernel elements seg_names overwrites defaults if hasattr(kernel_el, "seg_names"): seglist = [seg.strip() for seg in kernel_el.seg_names.split(",")] else: seglist = None link_type = get_linker_type(linker_script) if link_type == "rvct": scripts = [linker_script] else: scripts = [] add_segment_names(elf, get_segment_names(elf, seglist, scripts, link_type)) return elf, kernel_out_name, kernel_tmp
def test_copy_into(self): elf_from = UnpreparedElfFile() elf_to = UnpreparedElfFile() sect = BaseElfSection(elf_from, "test") # FIXME #self.assertRaises(NotImplementedError, sect.copy_into, elf_to) sect = UnpreparedElfSection(elf_from, "test") new_sect = sect.copy_into(elf_to) self.assertEquals(sect.name, new_sect.name) prep_sect = sect.prepare(0, 0, 0) # FIXME #self.assertRaises(NotImplementedError, prep_sect.copy_into, elf_to) sect = UnpreparedElfSection(elf_from, "test", SHT_NOBITS) new_sect = sect.copy_into(elf_to) self.assertEquals(sect.name, new_sect.name) sect = UnpreparedElfStringTable(elf_from, "string") strings = ["foo", "bar", "baz"] for string in strings: sect.add_string(string) new_sect = sect.copy_into(elf_to) for i in range(len(strings)): self.assertEquals(sect.get_string_idx(i), new_sect.get_string_idx(i))
def list_contents(self): print "# Addr Value" elf = UnpreparedElfFile( filename=os.path.join(base_dir, 'build', self.bin)) text = elf.find_section_named(".text") for offset in self.addrs: print "%8.8x %8.8x" % (text.address + offset, text.get_word_at(offset))
def collect_extension_element(extension_el, pd, namespace, rp_elf, image, machine, bootinfo, pools): # New namespace for objects living in the extension. extn_namespace = namespace.add_namespace(extension_el.name) elf = None start = None name = None physpool = getattr(extension_el, 'physpool', None) pager = getattr(extension_el, 'pager', None) direct = getattr(extension_el, 'direct', None) # Push the overriding pools for the extension. image.push_attrs(physical=physpool, pager=pager, direct=direct) if hasattr(extension_el, "file"): elf = UnpreparedElfFile(filename=extension_el.file) if elf.elf_type != ET_EXEC: raise MergeError, "All the merged ELF files must be of EXEC type." segment_els = extension_el.find_children("segment") segs = collect_elf_segments(elf, image.EXTENSION, segment_els, extension_el.name, extn_namespace, image, machine, pools) segs_ms = [ bootinfo.record_segment_info(extension_el.name, seg, image, machine, pools) for seg in segs ] for seg_ms in segs_ms: pd.attach_memsection(seg_ms) # Record any patches being made to the program. patch_els = extension_el.find_children("patch") collect_patches(elf, patch_els, extension_el.file, image) start = elf.entry_point name = extension_el.file if hasattr(extension_el, "start"): start = extension_el.start name = extension_el.name # If no file is supplied, look for symbols in the root # program. if elf is None: elf = rp_elf elf = elf.prepare(elf.wordsize, elf.endianess) start = start_to_value(start, elf) bootinfo.add_elf_info(name=name, elf_type=image.EXTENSION, entry_point=start) image.pop_attrs()
def link_kernel(kernel_el, linker_script, linker_wrapper, kernel, soc, libraries): link_type = get_linker_type(linker_script) kernel_tmp = None if link_type == 'rvct': # Current elfweaver linker does not support RVCT so shell out to an # external linker kernel_tmp = tempfile.NamedTemporaryFile() # Remove the file at exit. atexit.register(os.remove, kernel_tmp.name) command = [os.path.abspath(linker_wrapper), kernel, kernel_tmp.name, linker_script, soc] + libraries ret = subprocess.Popen(command, cwd = os.path.dirname(linker_script)).wait() if ret != 0: raise MergeError("Failed to link kernel, return code %d" % ret) elf = UnpreparedElfFile(kernel_tmp.name) kernel_out_name = kernel_tmp.name else: elf = link([kernel, soc]) # Just getting a random temp file name, there must be a nicer way # to do this? tmp = tempfile.NamedTemporaryFile() # Remove the file at exit. atexit.register(os.remove, tmp.name) kernel_out_name = tmp.name tmp.close() kernel_out = elf.prepare(elf.wordsize, elf.endianess) kernel_out.to_filename(kernel_out_name) elf = UnpreparedElfFile(kernel_out_name) # Kernel is linked, now add the segment names (as per old elfadorn) # Kernel elements seg_names overwrites defaults if hasattr(kernel_el, "seg_names"): seglist = [seg.strip() for seg in kernel_el.seg_names.split(",")] else: seglist = None link_type = get_linker_type(linker_script) if link_type == "rvct": scripts = [linker_script] else: scripts = [] add_segment_names(elf, get_segment_names(elf, seglist, scripts, link_type)) return elf, kernel_out_name, kernel_tmp
def test_prepare(self): ef = UnpreparedElfFile() sect = UnpreparedElfSection(ef, "test_sect") symtab = UnpreparedElfSymbolTable(ef, ".symtab") for name in ["foo", "bar"]: symtab.add_symbol(ElfSymbol(name, sect)) ef.wordsize = 32 ef.endianess = "<" symtab = symtab.prepare(0x1000, 1, 0)
def test_add_reloc(self): ef = UnpreparedElfFile() ef.wordsize = 32 reloc_sect = UnpreparedElfReloc(ef) for offset in range(0, 0x10000, 0x1000): reloc = ElfReloc(offset) reloc_sect.add_reloc(reloc) # we expect to have the relocs we added self.assertEqual(len(reloc_sect.relocs), 16)
def test_add_section(self): ef = UnpreparedElfFile() sect = UnpreparedElfSection() ef.add_section(sect) self.assertEqual(ef.sections[-1], sect) seg = HeaderElfSegment() self.assertRaises(InvalidArgument, seg.add_section, sect) seg = DataElfSegment() self.assertRaises(InvalidArgument, seg.add_section, sect)
def test_replace_section(self): elf_from = UnpreparedElfFile() elf_to = UnpreparedElfFile() seg = SectionedElfSegment(elf_from, sections=[UnpreparedElfSection(elf_from, "test")]) old_section = seg.get_sections()[0] new_section = UnpreparedElfSection(elf_to, "new") seg.replace_section(old_section, new_section) self.assertEqual(seg.get_sections(), [new_section]) new_seg = seg.copy_into(elf_to) self.assertRaises(InvalidArgument, seg.replace_section, None, new_section)
def test_prepare(self): ef = UnpreparedElfFile() segment = SectionedElfSegment(None, align=0x1) new_sect = UnpreparedElfSection(None, "pants") ef.add_section(new_sect) segment.add_section(new_sect) ef.add_segment(segment) ef.add_segment(HeaderElfSegment(None)) ef = ef.prepare(32, '<')
def test_add_symbols(self): ef = UnpreparedElfFile() sect = UnpreparedElfSection(ef, "test_sect") symtab = UnpreparedElfSymbolTable(ef, ".symtab") for name in ["foo", "bar"]: symtab.add_symbol(ElfSymbol(name, sect)) ef = UnpreparedElfFile() sect = UnpreparedElfSection(ef, "test_ect") strtab = UnpreparedElfStringTable(ef, ".strtab") symtab = UnpreparedElfSymbolTable(ef, ".symtab", link=strtab) for name in ["foo", "bar"]: symtab.add_symbol(ElfSymbol(name, sect))
def set_kernel(self, kernel): """ Record the kernel.""" self.elf = UnpreparedElfFile() self.endianess = kernel.endianess self.wordsize = kernel.wordsize self.elf.elf_type = ET_EXEC self.elf.machine = kernel.machine self.elf.osabi = kernel.osabi self.elf.abiversion = kernel.abiversion self.elf.flags = kernel.flags self.elf.entry_point = kernel.entry_point if self.ph_offset is not None: self.elf.set_ph_offset(self.ph_offset, fixed=True)
def _update_elf_file(target, seglist, scripts, linker_type, orphaned_sections): # load the elf file elf = UnpreparedElfFile(filename=target) if elf.elf_type != ET_EXEC: return seglist = get_segment_names(elf, seglist, scripts, linker_type, orphaned_sections) add_segment_names(elf, seglist) elf = elf.prepare(elf.wordsize, elf.endianess) # write the file elf.to_filename(target)
def test_null_elf(self): for endianess, wordsize, expectedsize in [("<", 32, 52), (">", 32, 52), ("<", 64, 64), (">", 64, 64)]: ef = UnpreparedElfFile() self.assertEqual(len(ef.sections), 1) self.assertEqual(ef.has_segments(), False) ef = ef.prepare(wordsize, endianess) f = File("test.elf", "wb") data = ef.todata() for offset, _dat in data: f.seek(offset) _dat.tofile(f) f.flush() self.assertEqual(f.size(), expectedsize) f.close() os.remove("test.elf")
def check_relocs(self): for src in self.srcs: file = src.replace(".s", ".o") print "[REL ] %s" % file, elf = UnpreparedElfFile(os.path.join(base_dir, 'build', file)) try: for sec in filter(lambda s: s.type in (SHT_REL, SHT_RELA), elf.sections): for reloc in sec.relocs: self.addrs.append(reloc.offset) if self.relocs[file][reloc.offset] != \ (reloc.offset, reloc.type, reloc.symdx, sec.info.get_word_at(reloc.offset)): print " -> FAILED!" self.list_relocs(file) raise Exception, \ ("Input relocation differs from expected! " + \ "Got: %s\nExpected: %s") % \ ((reloc.offset, reloc.type, reloc.symdx, sec.info.get_word_at(reloc.offset)), (reloc, self.relocs[file][reloc.offset])) print " -> OK" except KeyError: print " -> FAILED!" self.list_relocs()
def get_kernel_file(kernel_el, machine): def relative(file): """Return a path taking relativity to XML include directories into account""" return os.path.join(kernel_el._path, file) if has_kernel_attr(kernel_el, KERNEL_PRELINK_TAGS): elf = UnpreparedElfFile(filename = relative(kernel_el.file)) return (elf, kernel_el.file, None) elif has_kernel_attr(kernel_el, KERNEL_SDKLINK_TAGS): kernel_dir = os.path.join(relative(kernel_el.sdk), "kernel", machine.get_cpu()) object_dir = os.path.join(kernel_dir, kernel_el.configuration, "object") lib_dir = os.path.join(kernel_dir, kernel_el.configuration, "libs") linker_script = os.path.abspath(os.path.join(object_dir, "linker.lds")) linker_wrapper = os.path.abspath(os.path.join(object_dir, "linker.sh")) kernel = os.path.abspath(os.path.join(object_dir, machine.get_cpu() + ".o")) soc = os.path.abspath(os.path.join(object_dir, kernel_el.platform + ".o")) libs = [os.path.abspath(lib) for lib in glob.glob(os.path.join(lib_dir, "*.a"))] return link_kernel(kernel_el, linker_script, linker_wrapper, kernel, soc, libs) elif has_kernel_attr(kernel_el, KERNEL_LINK_TAGS): libs = [relative(lib.strip()) for lib in kernel_el.libs.split(",")] return link_kernel(kernel_el, relative(kernel_el.linker_script), relative(kernel_el.linker_wrapper), relative(kernel_el.kernel), relative(kernel_el.soc), libs) raise MergeError("Invalid kernel tags; a prelinked kernel, SDK information or linking information must be provided")
def test_remove_nobits(self): for file_name in ["data/null_elf", "data/arm_exec", "data/arm_stripped_exec", "data/arm_exec_nosect", "data/arm_object", "data/mips64_exec", "data/mips32_exec", "data/ia32_exec", "data/amd64_exec", "data/ia64_exec", ]: ef = PreparedElfFile(filename=file_name) if ef.segments: seg_sizes = [] for segment in ef.segments: seg_sizes.append((segment.get_memsz(), segment.get_filesz())) ef.remove_nobits() for segment, seg_size in zip(ef.segments, seg_sizes): self.assertEqual(segment.get_memsz(), segment.get_filesz()) self.assertEqual(segment.get_memsz(), seg_size[0]) ef = UnpreparedElfFile() sec = UnpreparedElfSection(None, section_type=SHT_NOBITS, data=10) self.assertEqual(sec.get_size(), 10) ef.add_section(sec) ef.remove_nobits() sec = ef.sections[1] self.assertEqual(sec.type, SHT_PROGBITS) self.assertEqual(sec.get_size(), 10) self.assertEqual(sec.get_file_data(), ByteArray('\0' * 10))
def test_prepare(self): ef = UnpreparedElfFile() segment = SectionedElfSegment(align=0x1) new_sect = UnpreparedElfSection("pants") ef.add_section(new_sect) segment.add_section(new_sect) ef.add_segment(segment) ef.add_segment(HeaderElfSegment()) ef = ef.prepare(32, "<")
def test_null_elf(self): for endianess, wordsize, expectedsize in \ [ ('<', 32, 52), ('>', 32, 52), ('<', 64, 64), ('>', 64, 64) ]: ef = UnpreparedElfFile() self.assertEqual(len(ef.sections), 1) self.assertEqual(ef.has_segments(), False) ef = ef.prepare(wordsize, endianess) f = File("test.elf", "wb") data = ef.todata() for offset, _dat in data: f.seek(offset) _dat.tofile(f) f.flush() self.assertEqual(f.size(), expectedsize) f.close() os.remove("test.elf")
def check_contents(self): print "[CONT] %s" % self.bin, elf = UnpreparedElfFile(filename = os.path.join(base_dir, 'build', self.bin)) text = elf.find_section_named(".text") try: # for offset in self.addrs: for offset, _ in self.relocs[self.bin].items(): if text.get_word_at(offset - text.address) != self.relocs[self.bin][offset][1]: print " -> FAILED!" self.list_contents() raise Exception, \ ("ELF contents differs from expected! " + \ # "Got: %8.8x\nExpected: %8.8x") % (text.get_word_at(offset), "Got: %s\nExpected: %s") % (text.get_word_at(offset - text.address), self.relocs[self.bin][offset][1]) print " -> OK" except KeyError: print " -> FAILED!" self.list_contents()
def test_copy_into(self): elf_from = UnpreparedElfFile() elf_to = UnpreparedElfFile() seg = DataElfSegment(elf_from, ByteArray("pants")) new_seg = seg.copy_into(elf_to) seg = SectionedElfSegment(elf_from) seg.sections = [UnpreparedElfSection(elf_from, "test")] new_seg = seg.copy_into(elf_to) seg = DataElfSegment(elf_from, ByteArray("pants")) seg._data = ByteArray() new_seg = seg.copy_into(elf_to) seg = DataElfSegment(elf_from, ByteArray("pants")) seg.prepare(34) new_seg = seg.copy_into(elf_to) seg = HeaderElfSegment(elf_from) new_seg = seg.copy_into(elf_to)
def test_long_round_trip(self): for file_name in [ "data/null_elf", "data/arm_exec", "data/arm_exec_nosect", "data/arm_object", "data/arm_scatter_load", "data/mips64_exec", "data/mips32_exec", "data/ia32_exec", "data/amd64_exec", "data/ia64_exec", ]: ef = UnpreparedElfFile(filename=file_name) ef = ef.prepare(ef.wordsize, ef.endianess) ef.to_filename("elf.tmp") # FIXME: We can't be sure that the files produced will be byte for # byte equal at this point. We need to come up with a test for # equivalance independant of things such as section ordering. # self.assertEqual(open("elf.tmp", "rb").read(), open(file_name, "rb").read(), "%s: failed to read back correctly" % file_name) os.remove("elf.tmp")
def test_long_round_trip(self): for file_name in ["data/null_elf", "data/arm_exec", "data/arm_stripped_exec", "data/arm_exec_nosect", "data/arm_object", "data/arm_scatter_load", "data/mips64_exec", "data/mips32_exec", "data/ia32_exec", "data/amd64_exec", "data/ia64_exec", ]: ef = UnpreparedElfFile(filename=file_name) ef = ef.prepare(ef.wordsize, ef.endianess) ef.to_filename("elf.tmp") # FIXME: We can't be sure that the files produced will be byte for # byte equal at this point. We need to come up with a test for # equivalance independant of things such as section ordering. # self.assertEqual(open("elf.tmp", "rb").read(), open(file_name, "rb").read(), "%s: failed to read back correctly" % file_name) os.remove("elf.tmp")
def list_relocs(self, src=None): if src is None: srcs = self.srcs else: srcs = [src] for src in srcs: print "# Offset Type Symdx Value" elf = UnpreparedElfFile( os.path.join(base_dir, 'build', src.replace(".s", ".o"))) for sec in filter(lambda s: s.type in (SHT_REL, SHT_RELA), elf.sections): for reloc in sec.relocs: print "%8.8x %8.8x %8.8x %8.8x" % \ (reloc.offset, reloc.type, reloc.symdx, sec.info.get_word_at(reloc.offset))
def check_contents(self): print "[CONT] %s" % self.bin, elf = UnpreparedElfFile( filename=os.path.join(base_dir, 'build', self.bin)) text = elf.find_section_named(".text") try: # for offset in self.addrs: for offset, _ in self.relocs[self.bin].items(): if text.get_word_at(offset - text.address) != self.relocs[ self.bin][offset][1]: print " -> FAILED!" self.list_contents() raise Exception, \ ("ELF contents differs from expected! " + \ # "Got: %8.8x\nExpected: %8.8x") % (text.get_word_at(offset), "Got: %s\nExpected: %s") % (text.get_word_at(offset - text.address), self.relocs[self.bin][offset][1]) print " -> OK" except KeyError: print " -> FAILED!" self.list_contents()
def _get_segments(self): elf_file = UnpreparedElfFile() sections = [UnpreparedElfSection(elf_file, "test1", data=ByteArray("test1 data")), UnpreparedElfSection(elf_file, "test2", data=ByteArray("test2 data"))] empty_sec_seg = SectionedElfSegment(None) full_sec_seg = SectionedElfSegment(elf_file, sections=sections) head_seg = HeaderElfSegment(None) prep_head_seg = HeaderElfSegment(None) prep_head_seg.prepare(37, PROG_HEADER_SIZE) data = ByteArray("pants") full_data_seg = DataElfSegment(None, vaddr=DATA_BASE, paddr=PHYS_BASE, data=data) nobits_data_seg = DataElfSegment(None, vaddr=DATA_BASE, paddr=PHYS_BASE, data=data, memsz=10) return empty_sec_seg, full_sec_seg, head_seg, prep_head_seg, full_data_seg, nobits_data_seg
def test_binary_scatter_load(self): """ Scatter-load files should be the same as the segment size. This is different to objcopy, which leaves out the final BSS section, but binutils can't handle scatter load files anyway. """ test_files = ("data/arm_scatter_load", ) for tf in test_files: # Then run through elfweaver modify exit_value, ret_stdout, ret_stderr = self._run_command( [tf, "--binary", "-o", "test_output"]) self.assertEquals(exit_value, 0) # The file size should be the size as the segment size. elf = UnpreparedElfFile(filename=tf) self.assertEquals( os.stat("test_output").st_size, elf.segments[0].get_memsz())
def list_contents(self): print "# Addr Value" elf = UnpreparedElfFile(filename = os.path.join(base_dir, 'build', self.bin)) text = elf.find_section_named(".text") for offset in self.addrs: print "%8.8x %8.8x" % (text.address + offset, text.get_word_at(offset))
def main(args): """Main program entry point.""" # We should be able ot use 'elfadorn ld' as a drop-in replacement for 'ld' # Here we detect if this is the case, and patch the command line args appropriately. # This way we avoid maintainid two different methods of dealing with args if "--" not in args: args = [args[0] , "--"] + args[1:] parser = optparse.OptionParser("%prog [options] -- <linker> [linker_options]", add_help_option=0) parser.add_option("-H", "--help", action="help") parser.add_option("-o", "--output", dest="target", metavar="FILE", help="Linker will create FILE.") parser.add_option("-f", "--file-segment-list", dest="file_segment_list", metavar="FILE", help="File containing segment names to be added to .segment_names, \ one per line") parser.add_option("-c", "--cmd-segment-list", dest="cmd_segment_list", help="quoted list of comma separated segment names to be added to .segment_names,") parser.add_option("-s", "--create-segments", dest="create_segments", action="store_true", help="Set to enable gathering orphaned sections and placing each in a new segment") (options, args) = parser.parse_args(args) if not options.target: i = 0 for a in args: if a == "-o": options.target = args[i+1] break i = i + 1 if not options.target: print "Error: -o flag must be supplied." sys.exit(1) # we need to parse the options # we are interested in any -T, --scatter or --script= options # plus the ordinary files specified on the command line scripts = get_script_names(args) objects = remove_arguments(args) linker_name = objects[1] objects = objects[2:] linker_type = get_linker_name(args, linker_name) if linker_type == "rvct": if options.create_segments: print "Warning: creating segments from sections not applicable to RVCT. Disabling option." options.create_segments = False # next get section names (sections, additional_scripts) = get_section_names(objects) scripts = scripts + additional_scripts # then get the text of the linker script script_text = get_linker_script_text(linker_name, scripts, additional_scripts, []) if options.create_segments: # get rid of any sections named in the script_text mentioned_sections = linker_script_sections(script_text) orphaned_sections = sections for section in mentioned_sections: # Our grammar is not perfect, sometimes it gets confused and gives back * # as a section but it is actually a filename if section != "*": remove_sections_wildcard(orphaned_sections, section) # mips-ld treats .reginfo sections somewhat magically, we do not want to treat this # as an orphan and create a segment for him, else ld will drop the text data and bss # sections completely. Magic. if '.reginfo' in orphaned_sections: orphaned_sections.remove('.reginfo') # work out the new linker command line if scripts == []: default = get_linker_script_text(args[1], [], [], args[2:]) open("default.lds", "w").write(default) if len(orphaned_sections) != 0: args += ["--script=default.lds"] # write out an additional linker script file to pass to the linker if len(orphaned_sections) != 0: write_linker_script(orphaned_sections, "additional.lds") additional_scripts.append("additional.lds") args += ["--script=additional.lds"] else: # if we dont care about these, just say there are none. orphaned_sections = [] # execute the linker if os.spawnvp(os.P_WAIT, args[1], args[1:]) != 0: sys.exit(1) # load the elf file elf = UnpreparedElfFile(filename=options.target) wordsize = elf.wordsize endianess = elf.endianess seglist = get_segment_names(options, elf, scripts, linker_type, orphaned_sections) # create the string table segname_tab = UnpreparedElfStringTable(".segment_names") # add the segment names for segname in seglist: segname = segname.strip() segname_tab.add_string("%s" % segname) # add the table to the file elf.add_section(segname_tab) elf = elf.prepare(wordsize, endianess) # write the file elf.to_filename(options.target)
def modify_cmd(args): """Merge command call from main. This parses command line arguments and calls merge, which does all the main work.""" parser = optparse.OptionParser("%prog modify file [options]", add_help_option=0) parser.add_option("-H", "--help", action="help") parser.add_option("--adjust", nargs=2, dest="adjust", action="append", default=[]) parser.add_option("--physical", dest="physical", action="store_true") parser.add_option("--physical_entry", dest="physical_entry", action="store_true") parser.add_option("--change", dest="change", action="append", nargs=2, default=[]) parser.add_option("--merge_sections", dest="merge_sections", action="append", default=[]) parser.add_option("--remove_nobits", dest="remove_nobits", action="store_true") parser.add_option("-o", "--output", dest="output", metavar="FILE", help="Display the ELF file header") parser.add_option("--no-section-headers", dest="remove_section_headers", action="store_true") (options, args) = parser.parse_args(args) if len(args) != 1: parser.error("Must supply a file to operate on.") filename = args[0] elf = UnpreparedElfFile(filename=filename) for (field_desc, offset) in options.adjust: absolute = True if offset.startswith("+") or offset.startswith("-"): absolute = False offset = long(offset, 0) field_desc = field_desc.split(".") adjust(elf, field_desc, offset, absolute) for (field_desc, mod) in options.change: field_desc = field_desc.split(".") (old, new) = [long(x, 0) for x in mod.split("=")] change(elf, field_desc, old, new) for section_name in options.merge_sections: merge_sections(elf, section_name) if options.physical_entry: entry = elf.entry_point for segment in elf.segments: if segment.contains_vaddr(entry): offset = segment.vaddr - segment.paddr elf.entry_point = entry - offset if options.physical: for segment in elf.segments: offset = segment.vaddr - segment.paddr segment.vaddr = segment.paddr if segment.has_sections(): for section in segment.get_sections(): section.address -= offset if options.remove_nobits: elf.remove_nobits() if options.remove_section_headers: elf.remove_section_headers() elf = elf.prepare(elf.wordsize, elf.endianess) outfile = filename if options.output: outfile = options.output elf.to_filename(outfile)
def test_et_ph_offset(self): ef = UnpreparedElfFile() ef.set_ph_offset(100, True) self.assertEqual(ef.get_ph_offset(), 100) ef = ef.prepare(32, "<") self.assertEqual(ef.get_ph_offset(), 100)
class Image: """Representation of the contents of the final image.""" # Different types of segments. # NOTE: PROGRAM and EXTENSION must have the values 1 and 2 to # maintain binary compatibility with the iguana library function # get_elf_info(). PROGRAM = 1 EXTENSION = 2 KERNEL = 3 ROOT_PROGRAM = 4 class Patch: """A Class for describing patches to segments.""" def __init__(self, addr, size, value): self.addr = addr self.size = size self.value = value def get_addr(self): """Return the address to patch.""" return self.addr def get_size(self): """Return the number of bytes to change.""" return self.size def get_value(self): """Return the value to patch in.""" return self.value class AttrStack: """ Class for holding a stack of attribute values. Virtpool, physpool and pagers operate in that way. These stacks differ from regular stacks in that the top of the stack is defined to be the last non-None entry in the list. """ def __init__(self): self.stack = [] self.top = None def __getitem__(self, index): return self.stack[index] def set(self, value): """ Set the stack to only contain the given value. """ assert value is not None self.stack = [value] self.top = 0 assert self.top is None or self.top < len(self.stack) def push(self, value): """ Push an item onto the stack. If the item is not None, then then will become the top of the stack. """ self.stack.append(value) if value is not None: self.top = len(self.stack) - 1 if self.top < 0: self.top = None assert self.top is None or \ (self.top < len(self.stack) and \ self.stack[self.top] is not None) def pop(self): """ Pop on item off the stack. If the item is non-None, then the top of the stack is moved to the last non-None value in the list. """ value = self.stack.pop() if value is not None: # Recalculate the top of the stack. self.top = None i = 0 for item in self.stack: if item is not None: self.top = i i += 1 assert self.top is None or \ self.stack[self.top] is not None assert self.top is None or self.top < len(self.stack) def tos(self): """ Return the item at the top of the stack, or None if there is no such item. """ if self.top is None: return None else: return self.stack[self.top] def __init__(self, ph_offset): self.ph_offset = ph_offset self.kconfig = KernelInit() self.objects = None self.kernel_segments = [] self.kernel_heap = None self.kernel_arrays = [] self.segments = [] self.memsections = [] self.zones = [] self.elf = None self.endianess = None self.wordsize = None self.patches = [] self.virt_pool_stack = Image.AttrStack() self.phys_pool_stack = Image.AttrStack() self.pager_stack = Image.AttrStack() self.direct_stack = Image.AttrStack() self.protected_segment = None self.groups = [] def get_elf(self): """Return the ELF file that will contain the image.""" return self.elf def remove_section_headers(self): self.elf.remove_section_headers() def current_pools(self): """Return the current virtual and physical pools.""" return (self.virt_pool_stack.tos(), self.phys_pool_stack.tos()) def new_attrs(self, namespace, for_segment = False): """ Create a new attribute object. The attributes are initialised with the current values from the attribute stack and the supplied namespace. """ def_direct = False if for_segment: def_direct = self.direct_stack.tos() if namespace is None: path = '/' else: path = namespace.abs_name('.') return ImageAttrs(path = path, virtpool = self.virt_pool_stack.tos(), physpool = self.phys_pool_stack.tos(), pager = self.pager_stack.tos(), direct = def_direct) def set_attrs_stack(self, def_virt = None, def_phys = None, def_pager = None, def_direct = None): """ Prime the attribute stack with initial values. """ if def_virt is not None: self.virt_pool_stack.set(def_virt) if def_phys is not None: self.phys_pool_stack.set(def_phys) if def_pager is not None: self.pager_stack.set(def_pager) if def_direct is not None: self.direct_stack.set(def_direct) def push_attrs(self, virtual = None, physical = None, pager = None, direct = None): """Push values onto the attribute stack.""" self.virt_pool_stack.push(virtual) self.phys_pool_stack.push(physical) self.pager_stack.push(pager) self.direct_stack.push(direct) def pop_attrs(self): """Pop values from the attribute stack.""" self.virt_pool_stack.pop() self.phys_pool_stack.pop() self.pager_stack.pop() self.direct_stack.pop() def prepare(self, machine): """Prepare the ELF file for writing to disk.""" self.elf = self.elf.prepare(self.wordsize, self.endianess) def set_rootserver_stack(self, stack): """Record the root-servers stack pointer.""" self.kconfig.set_rootserver_stack(stack) def write_out_image(self, output_file, machine): """Write out the final ELF file.""" # Record the physical properties of the root server. # Note: Groovy functional programming! root_mappings = [o.root_mappings() for o in self.objects if o.root_mappings() is not None] assert len(root_mappings) > 0 self.kconfig.set_rootserver_mappings(root_mappings) # Record memory descriptors for those objects that need them. for obj in self.objects: descs = obj.make_memdesc() if descs is not None: for desc in descs: self.kconfig.add_mem_descriptor(desc) # Now write out the data. self.kconfig.update_elf(self.elf, machine) #self.elf = self.elf.prepare(self.wordsize, self.endianess) self.elf.to_filename(output_file) def make_single_list(self): """ Place all of the objects into a single list to generate a good layout. Items that will be written to the ELF file are placed together to try and reduce the size of the image. """ # # Approximate proper support for proximity by placing the # kernel heap close to the kernel and memsections with data # close to the segments. # # Proper support should be added to ensure that wombat's # vmlinux memsection is close to the wombat text. # self.objects = [] self.objects.extend(self.kernel_segments) self.objects.extend(self.kernel_arrays) self.objects.append(self.kernel_heap) self.objects.extend(self.segments) self.objects.extend(self.memsections) def layout(self, machine, pools): """Layout the image in memory.""" self.make_single_list() for obj in self.zones: obj.prime(self.virt_pool_stack[0], self.phys_pool_stack[0], pools) for obj in self.groups: obj.layout(self.virt_pool_stack[0], self.phys_pool_stack[0], machine, pools) def apply_patches(self): """Apply registered patches.""" for segment in self.elf.segments: for section in segment.sections: patches = [patch for patch in self.patches if patch.addr >= section.address and patch.addr < section.address + section.get_size()] for patch in patches: offset = patch.addr - section.address if isinstance(patch.value, weaver.image.ImageObject): value = patch.value.attrs.phys_addr else: value = patch.value section.get_data().set_data(offset, value, patch.size, self.endianess) def get_value(self, address, size, endianess=None): """get a value from the image.""" if self.elf.machine == ElfMachine(8): if self.elf.flags & EF_MIPS_ABI_O64: if address & 0x80000000: address |= 0xffffffff00000000L for segment in self.elf.segments: for section in segment.get_sections(): if address > section.address and \ address < (section.address + section.get_size()): offset = address - section.address if endianess is None: endianess = self.elf.endianess return section.get_data().get_data(offset, size, endianess) raise MergeError, "Could not find address %x in Image." % address def set_kernel(self, kernel): """ Record the kernel.""" self.elf = UnpreparedElfFile() self.endianess = kernel.endianess self.wordsize = kernel.wordsize self.elf.elf_type = ET_EXEC self.elf.machine = kernel.machine self.elf.osabi = kernel.osabi self.elf.abiversion = kernel.abiversion self.elf.flags = kernel.flags self.elf.entry_point = kernel.entry_point if self.ph_offset is not None: self.elf.set_ph_offset(self.ph_offset, fixed=True) def patch(self, addr, size, value): """Record the details of a patch to a segment.""" if self.elf.machine == ElfMachine(8): if self.elf.flags & EF_MIPS_ABI_O64: if addr & 0x80000000: addr |= 0xffffffff00000000L self.patches.append(self.Patch(addr, size, value)) def set_kernel_heap(self, attrs, pools): """ Record the details of the kernel heap. """ self.kernel_heap = ImageKernelHeap(attrs, pools) return self.kernel_heap def add_kernel_array(self, attrs, pools): """Record the details of the kernel array.""" array = ImageKernelArray(attrs, pools) self.kernel_arrays.append(array) return array def add_segment(self, segment_index, section_prefix, segment, file_type, attrs, machine, pools): """Create a segment for inclusion in the image.""" if not valid_segment(segment): return None # Remove any pathname components from the prefix. section_prefix = os.path.basename(section_prefix) # Prepare the image for inclusion. new_segment = segment.copy() # Align segments to the page boundary if is safe to do so. # RVCT tends to give very conservative alignment (1 word) to # segments that could be paged aligned. if new_segment.vaddr % machine.min_page_size() == 0 and \ new_segment.align < machine.min_page_size(): new_segment.align = machine.min_page_size() # Rename the sections in the segment, giving each the supplied # prefix if new_segment.has_sections(): for section in new_segment.get_sections(): assert section.link is None sec_name = section.name #strip GNU leading dots in section names if sec_name[0] == ".": sec_name = sec_name[1:] section.name = "%s.%s" % (section_prefix, sec_name) if section_prefix != "kernel": for symbol in section.symbols: symbol.name = "%s-%s" % (section_prefix, symbol.name) self.elf.add_section(section) iseg = ImageSegment(new_segment, segment_index, file_type, attrs, pools) if attrs.protected: if self.protected_segment is not None: raise MergeError, \ 'Only one segment can be declared protected. ' \ 'Found "%s" and "%s".' % \ (self.protected_segment.get_attrs().abs_name(), attrs.abs_name()) self.protected_segment = iseg # Kernel segments need to be at the start of the memory pools # to place them in a different list to keep track of them. if file_type == Image.KERNEL: self.kernel_segments.append(iseg) else: self.segments.append(iseg) self.elf.add_segment(new_segment) return iseg def add_memsection(self, attrs, machine, pools): """ Create a memsection for inclusion in the image. If the data or file attributes of 'attr' are non-None, then a ELF segment will be created, otherwise the memsection will will be included in the address layout process, but will be created at runtime by Iguana server. """ new_segment = None in_image = False if attrs.file is not None or attrs.data is not None: if attrs.file is not None: the_file = open(attrs.file, 'r') data = ByteArray(the_file.read()) the_file.close() else: data = attrs.data if attrs.size is not None and len(data) < attrs.size: data.extend([0] * (attrs.size - len(data))) attrs.size = data.buffer_info()[1] * data.itemsize sect = UnpreparedElfSection(attrs.name, SHT_PROGBITS, attrs.virt_addr, data = data, flags = SHF_WRITE | SHF_ALLOC) self.elf.add_section(sect) new_segment = SectionedElfSegment(PT_LOAD, attrs.virt_addr, attrs.phys_addr, PF_R | PF_W, machine.min_page_size(), sections=[sect]) self.elf.add_segment(new_segment) in_image = True obj = ImageMemsection(new_segment, attrs, pools) # If the memsection has data that goes into the image, then # put it at the front of the list so that it will be near the # code segments. if in_image: self.memsections = [obj] + self.memsections else: self.memsections.append(obj) return obj def add_zone(self, attrs, zone): """Create a zone for inclusion in the image.""" izone = ImageZone(attrs, zone) self.zones.append(izone) return izone def add_group(self, distance, items, error_message = None): """Add an image group.""" # Generate a static group for virtual addresses. virt_group = [i.get_allocator_item(is_virtual = True) for i in items if i.get_allocator_item(is_virtual = True) is not None] if len(virt_group) != 0: group = ImageGroup(distance, virt_group, error_message, is_virtual = True) self.groups.append(group) # Generate a static group for physical addresses. phys_group = [i.get_allocator_item(is_virtual = False) for i in items if i.get_allocator_item(is_virtual = False) is not None] if len(phys_group) != 0: group = ImageGroup(distance, phys_group, error_message, is_virtual = False) self.groups.append(group) def dump(self): """ Print out a virtual and physical memory map of the final image. """ virtual_objects = {} physical_objects = {} for obj in self.objects: if obj.attrs.virt_addr is not None: vbase = obj.attrs.virt_addr vend = vbase + obj.attrs.size - 1 virtual_objects[vbase, vend] = obj.attrs.abs_name() if obj.attrs.phys_addr is not None: pbase = obj.attrs.phys_addr pend = pbase + obj.attrs.size - 1 physical_objects[pbase, pend] = obj.attrs.abs_name() print "VIRTUAL:" for (base, end), name in sorted(virtual_objects.items()): print " <%08x:%08x> %s" % (base, end, name) print "PHYSICAL:" for (base, end), name in sorted(physical_objects.items()): print " <%08x:%08x> %s" % (base, end, name)
def collect_xml(self, okl4_el, ignore_name, namespace, machine, pools, kernel, image): """Handle an Iguana Server Compound Object""" self.cell = \ kernel.register_cell(okl4_el.name, okl4_el.kernel_heap, max_caps = getattr(okl4_el, "caps", None), max_priority = getattr(okl4_el, "max_priority", None)) self.name = okl4_el.name self.namespace = namespace.add_namespace(self.name) self.space = \ self.cell.register_space(self.namespace, "MAIN", is_privileged = True, max_clists = getattr(okl4_el, "clists", None), max_spaces = getattr(okl4_el, "spaces", None), max_mutexes = getattr(okl4_el, "mutexes", None), max_threads = getattr(okl4_el, "threads", None), max_priority = getattr(okl4_el, "max_priority", None), plat_control = \ getattr(okl4_el, "platform_control", False)) image.push_attrs( virtual = getattr(okl4_el, "virtpool", None), physical = getattr(okl4_el, "physpool", None), pager = make_pager_attr(getattr(okl4_el, "pager", None)), direct = getattr(okl4_el, "direct", None)) (self.def_virtpool, self.def_physpool) = image.current_pools() self.collect_mutexes(okl4_el, self.namespace, self.space) env_el = okl4_el.find_child("environment") self.env = CellEnvironment(okl4_el.name, self.namespace, machine, image, kernel, self.space.mappings) if env_el != None: self._collect_environment(env_el, self.env) # Set these up now even though we can't actually assign values # till later self.phys_attrs = image.new_attrs(self.namespace.add_namespace("default_physpool")) self.phys_attrs.attach = PF_R | PF_W | PF_X self.phys_attrs.mem_type = self.phys_attrs.unmapped mapping = self.space.register_mapping(self.phys_attrs) self.env.add_physmem_segpool_entry("MAIN_PHYSMEM_SEGPOOL", mapping) self.virt_attrs = image.new_attrs(self.namespace.add_namespace("default_virtpool")) self.env.add_virtmem_pool_entry("MAIN_VIRTMEM_POOL", self.virt_attrs) self.space.utcb = image.new_attrs(self.namespace.add_namespace("main_utcb_area")) self.space.utcb.attach = PF_R filename = os.path.join(okl4_el._path, okl4_el.file) self.elf = UnpreparedElfFile(filename=filename) if self.elf.elf_type != ET_EXEC: raise MergeError("All the merged ELF files must be of EXEC type.") # Find out which version of libokl4 that the cell was built # against sym = self.elf.find_symbol("okl4_api_version") if sym == None: raise MergeError("Unable to locate the symbol 'okl4_api_version' in file \"%s\". Cells must link with libokl4." % filename) self.api_version = self.elf.get_value(sym.value, sym.size, self.elf.endianess) if self.api_version == None: raise MergeError("Unable to read the symbol 'okl4_api_version' in file \"%s\". Cells must link with libokl4." % filename) self.env.add_elf_info_entry(os.path.basename(okl4_el.file), image.PROGRAM, self.elf.entry_point) segment_els = okl4_el.find_children("segment") segs = collect_elf_segments(self.elf, image.PROGRAM, segment_els, filename, [], self.namespace, image, machine, pools) self.elf_prog_segments = segs for seg in segs: self.env.add_elf_segment_entry(okl4_el.name + '.' + seg.attrs.ns_node.name, seg.segment) seg_ns = seg.attrs.ns_node mapping = self.space.register_mapping(seg.attrs) self.add_standard_mem_caps(seg_ns, mapping, seg.attrs) patch_els = okl4_el.find_children("patch") collect_patches(self.elf, patch_els, filename, image) # Record any IRQs that are assigned to the initial program. for irq_el in okl4_el.find_children("irq"): self.space.register_irq(irq_el.value) self.env.add_device_irq_list("NO_DEVICE", [irq_el.value for irq_el \ in okl4_el.find_children("irq")]) # Collect the implicit thread if not hasattr(okl4_el, 'priority'): okl4_el.priority = kernel.kernel.MAX_PRIORITY threads = [] threads.append(self.collect_thread(self.elf, okl4_el, self.namespace, image, machine, pools, kernel, self.space, self.elf.entry_point, "main", True)) # FIXME: Need to check up on actual entry point's for thread_el in okl4_el.find_children("thread"): threads.append(self.collect_thread(self.elf, thread_el, self.namespace, image, machine, pools, kernel, self.space, "thread_start", cell_create_thread = True)) device_mem = \ self.collect_use_devices(okl4_el, self.space, self.namespace, image, machine, pools) memsection_objs = \ self.collect_memsections(okl4_el, self.space, self.namespace, image, machine, pools) # Collect all data for any extra spaces defined in the XML for space_el in okl4_el.find_children("space"): space_ns = self.namespace.add_namespace(space_el.name) space = self.cell.register_space(space_ns, space_el.name, max_priority = getattr(space_el, "max_priority", \ getattr(okl4_el, "max_priority", None))) image.push_attrs( virtual = getattr(space_el, "virtpool", None), physical = getattr(space_el, "physpool", None), pager = make_pager_attr(getattr(space_el, "pager", None)), direct = getattr(space_el, "direct", None)) for thread_el in space_el.find_children("thread"): threads.append(self.collect_thread(self.elf, thread_el, space_ns, image, machine, pools, kernel, space, "thread_start", cell_create_thread = True)) self.collect_mutexes(space_el, space_ns, space) device_mem.extend( self.collect_use_devices(space_el, space, space_ns, image, machine, pools)) memsection_objs.extend( self.collect_memsections(space_el, space, space_ns, image, machine, pools)) self.env.add_kspace_entry(space_el.name + "_KSPACE", space) space.utcb = image.new_attrs(space_ns.add_namespace(space_el.name + "_utcb_area")) space.utcb.attach = PF_R # Weave a kclist for the main space. main_kclist = self.env.add_kclist_entry("MAIN_KCLIST", self.cell) # Weave the root kspace object. main_kspace = self.env.add_kspace_entry("MAIN_KSPACE", self.space) # Weave the root protection domain object. self.env.add_pd_entry("MAIN_PD", self.space, main_kspace, 32, machine.min_page_size(), self.elf) # Find heap and add it heap_el = okl4_el.find_child('heap') if heap_el is None: heap_el = ParsedElement('heap') heap_attr = collect_memobj_attrs(heap_el, self.namespace, image, machine) if heap_attr.size == None: heap_attr.size = DEFAULT_HEAP_SIZE self.heap_ms = image.add_memsection(heap_attr, machine, pools) self.cell.heap = self.heap_ms.attrs mapping = self.space.register_mapping(self.heap_ms.attrs) self.add_standard_mem_caps(heap_attr.ns_node, mapping, heap_attr) self.elf_segments = \ [thread.get_stack_ms() for thread in threads] + \ [self.heap_ms] + memsection_objs + device_mem self.env.add_kernel_info_entry(0, 0, kernel) # Add command line arguments commandline_el = okl4_el.find_child("commandline") if commandline_el is not None: args = [arg_el.value for arg_el in commandline_el.find_children("arg")] else: args = [] self.env.add_arg_list(args) self.cell.env = self.env
def test_add_symbols(self): # Adding symbols to a file with no symbol table # should create a new symbol table symbols = [] ef = UnpreparedElfFile() self.assertEqual(ef.get_symbol_table(), None) self.assertEqual(ef.get_symbols(), []) ef.add_symbols(symbols) self.assertNotEqual(ef.get_symbol_table(), None) # Apart from the opening null symbol, we should have nothing self.assertEqual(ef.get_symbols()[1:], []) sect_a = UnpreparedElfSection(ef) symbols = [ElfSymbol("a", sect_a), ElfSymbol("b", sect_a)] ef.add_symbols(symbols) self.assertEqual(ef.get_symbols()[1:], symbols) sect_b = UnpreparedElfSection(ef) symbols_b = [ElfSymbol("c", sect_b), ElfSymbol("d", sect_b)] ef.add_symbols(symbols_b) self.assertEqual(ef.section_symbols(sect_a), symbols) self.assertEqual(ef.section_symbols(sect_b), symbols_b) symbol_dict = {} for name in ["foo", "bar"]: symbol = ElfSymbol(name, sect_a) symbol_dict[name] = symbol ef.add_symbols(symbol_dict.values()) for name in symbol_dict.keys(): #full string match self.assertEqual(ef.find_symbol(name), symbol_dict[name]) # partial suffix match self.assertEqual(ef.find_symbol(name[1:]), symbol_dict[name]) self.assertEqual(ef.find_symbol("missing"), None)
def collect_extension_element(extension_el, pd, namespace, rp_elf, image, machine, bootinfo, pools): # New namespace for objects living in the extension. extn_namespace = namespace.add_namespace(extension_el.name) elf = None start = None name = None physpool = getattr(extension_el, 'physpool', None) pager = getattr(extension_el, 'pager', None) direct = getattr(extension_el, 'direct', None) # Push the overriding pools for the extension. image.push_attrs(physical = physpool, pager = pager, direct = direct) if hasattr(extension_el, "file"): elf = UnpreparedElfFile(filename=extension_el.file) if elf.elf_type != ET_EXEC: raise MergeError, "All the merged ELF files must be of EXEC type." segment_els = extension_el.find_children("segment") segs = collect_elf_segments(elf, image.EXTENSION, segment_els, extension_el.name, extn_namespace, image, machine, pools) segs_ms = [bootinfo.record_segment_info(extension_el.name, seg, image, machine, pools) for seg in segs] for seg_ms in segs_ms: pd.attach_memsection(seg_ms) # Record any patches being made to the program. patch_els = extension_el.find_children("patch") collect_patches(elf, patch_els, extension_el.file, image) start = elf.entry_point name = extension_el.file if hasattr(extension_el, "start"): start = extension_el.start name = extension_el.name # If no file is supplied, look for symbols in the root # program. if elf is None: elf = rp_elf elf = elf.prepare(elf.wordsize, elf.endianess) start = start_to_value(start, elf) bootinfo.add_elf_info(name = name, elf_type = image.EXTENSION, entry_point = start) image.pop_attrs()
def test_init(self): ef = UnpreparedElfFile() symtab = UnpreparedElfSymbolTable(ef, ".symtab")
def test_empty_reloca(self): ef = UnpreparedElfFile() ef.wordsize = 32 reloc_sect = UnpreparedElfReloc(ef) self.assertEqual(reloc_sect.get_size(), 0)
class OKL4Cell(Cell): # disable: Too many arguments # pylint: disable-msg=R0913 """ Cell for iguana programs, pds, drivers and other matters. """ element = OKL4_el def __init__(self): Cell.__init__(self) self.name = "" self.heap_ms = None self.stack_ms = None self.utcb_ms = None self.elf = None self.api_version = None self.elf_segments = [] self.elf_prog_segments = None self.def_virtpool = None self.def_physpool = None self.phys_addr = None self.virt_addr = None self.phys_attrs = None self.virt_attrs = None self.env = None self.cell = None self.namespace = None self.space = None def _collect_environment(self, env_el, env): """ Collect the details of the environmen element. """ # Collect any custom entries in the environment. if env_el is not None: for entry_el in env_el.find_children('entry'): cap_name = None attach = None if hasattr(entry_el, 'value'): env.add_value_entry(entry_el.key, entry_el.value) else: if not hasattr(entry_el, 'cap'): raise MergeError, 'Value or cap attribute required.' cap_name = entry_el.cap if hasattr(entry_el, 'attach'): attach = attach_to_elf_flags(entry_el.attach) env.add_cap_entry(entry_el.key, cap_name = cap_name, attach = attach) def collect_xml(self, okl4_el, ignore_name, namespace, machine, pools, kernel, image): """Handle an Iguana Server Compound Object""" self.cell = \ kernel.register_cell(okl4_el.name, okl4_el.kernel_heap, max_caps = getattr(okl4_el, "caps", None), max_priority = getattr(okl4_el, "max_priority", None)) self.name = okl4_el.name self.namespace = namespace.add_namespace(self.name) self.space = \ self.cell.register_space(self.namespace, "MAIN", is_privileged = True, max_clists = getattr(okl4_el, "clists", None), max_spaces = getattr(okl4_el, "spaces", None), max_mutexes = getattr(okl4_el, "mutexes", None), max_threads = getattr(okl4_el, "threads", None), max_priority = getattr(okl4_el, "max_priority", None), plat_control = \ getattr(okl4_el, "platform_control", False)) image.push_attrs( virtual = getattr(okl4_el, "virtpool", None), physical = getattr(okl4_el, "physpool", None), pager = make_pager_attr(getattr(okl4_el, "pager", None)), direct = getattr(okl4_el, "direct", None)) (self.def_virtpool, self.def_physpool) = image.current_pools() self.collect_mutexes(okl4_el, self.namespace, self.space) env_el = okl4_el.find_child("environment") self.env = CellEnvironment(okl4_el.name, self.namespace, machine, image, kernel, self.space.mappings) if env_el != None: self._collect_environment(env_el, self.env) # Set these up now even though we can't actually assign values # till later self.phys_attrs = image.new_attrs(self.namespace.add_namespace("default_physpool")) self.phys_attrs.attach = PF_R | PF_W | PF_X self.phys_attrs.mem_type = self.phys_attrs.unmapped mapping = self.space.register_mapping(self.phys_attrs) self.env.add_physmem_segpool_entry("MAIN_PHYSMEM_SEGPOOL", mapping) self.virt_attrs = image.new_attrs(self.namespace.add_namespace("default_virtpool")) self.env.add_virtmem_pool_entry("MAIN_VIRTMEM_POOL", self.virt_attrs) self.space.utcb = image.new_attrs(self.namespace.add_namespace("main_utcb_area")) self.space.utcb.attach = PF_R filename = os.path.join(okl4_el._path, okl4_el.file) self.elf = UnpreparedElfFile(filename=filename) if self.elf.elf_type != ET_EXEC: raise MergeError("All the merged ELF files must be of EXEC type.") # Find out which version of libokl4 that the cell was built # against sym = self.elf.find_symbol("okl4_api_version") if sym == None: raise MergeError("Unable to locate the symbol 'okl4_api_version' in file \"%s\". Cells must link with libokl4." % filename) self.api_version = self.elf.get_value(sym.value, sym.size, self.elf.endianess) if self.api_version == None: raise MergeError("Unable to read the symbol 'okl4_api_version' in file \"%s\". Cells must link with libokl4." % filename) self.env.add_elf_info_entry(os.path.basename(okl4_el.file), image.PROGRAM, self.elf.entry_point) segment_els = okl4_el.find_children("segment") segs = collect_elf_segments(self.elf, image.PROGRAM, segment_els, filename, [], self.namespace, image, machine, pools) self.elf_prog_segments = segs for seg in segs: self.env.add_elf_segment_entry(okl4_el.name + '.' + seg.attrs.ns_node.name, seg.segment) seg_ns = seg.attrs.ns_node mapping = self.space.register_mapping(seg.attrs) self.add_standard_mem_caps(seg_ns, mapping, seg.attrs) patch_els = okl4_el.find_children("patch") collect_patches(self.elf, patch_els, filename, image) # Record any IRQs that are assigned to the initial program. for irq_el in okl4_el.find_children("irq"): self.space.register_irq(irq_el.value) self.env.add_device_irq_list("NO_DEVICE", [irq_el.value for irq_el \ in okl4_el.find_children("irq")]) # Collect the implicit thread if not hasattr(okl4_el, 'priority'): okl4_el.priority = kernel.kernel.MAX_PRIORITY threads = [] threads.append(self.collect_thread(self.elf, okl4_el, self.namespace, image, machine, pools, kernel, self.space, self.elf.entry_point, "main", True)) # FIXME: Need to check up on actual entry point's for thread_el in okl4_el.find_children("thread"): threads.append(self.collect_thread(self.elf, thread_el, self.namespace, image, machine, pools, kernel, self.space, "thread_start", cell_create_thread = True)) device_mem = \ self.collect_use_devices(okl4_el, self.space, self.namespace, image, machine, pools) memsection_objs = \ self.collect_memsections(okl4_el, self.space, self.namespace, image, machine, pools) # Collect all data for any extra spaces defined in the XML for space_el in okl4_el.find_children("space"): space_ns = self.namespace.add_namespace(space_el.name) space = self.cell.register_space(space_ns, space_el.name, max_priority = getattr(space_el, "max_priority", \ getattr(okl4_el, "max_priority", None))) image.push_attrs( virtual = getattr(space_el, "virtpool", None), physical = getattr(space_el, "physpool", None), pager = make_pager_attr(getattr(space_el, "pager", None)), direct = getattr(space_el, "direct", None)) for thread_el in space_el.find_children("thread"): threads.append(self.collect_thread(self.elf, thread_el, space_ns, image, machine, pools, kernel, space, "thread_start", cell_create_thread = True)) self.collect_mutexes(space_el, space_ns, space) device_mem.extend( self.collect_use_devices(space_el, space, space_ns, image, machine, pools)) memsection_objs.extend( self.collect_memsections(space_el, space, space_ns, image, machine, pools)) self.env.add_kspace_entry(space_el.name + "_KSPACE", space) space.utcb = image.new_attrs(space_ns.add_namespace(space_el.name + "_utcb_area")) space.utcb.attach = PF_R # Weave a kclist for the main space. main_kclist = self.env.add_kclist_entry("MAIN_KCLIST", self.cell) # Weave the root kspace object. main_kspace = self.env.add_kspace_entry("MAIN_KSPACE", self.space) # Weave the root protection domain object. self.env.add_pd_entry("MAIN_PD", self.space, main_kspace, 32, machine.min_page_size(), self.elf) # Find heap and add it heap_el = okl4_el.find_child('heap') if heap_el is None: heap_el = ParsedElement('heap') heap_attr = collect_memobj_attrs(heap_el, self.namespace, image, machine) if heap_attr.size == None: heap_attr.size = DEFAULT_HEAP_SIZE self.heap_ms = image.add_memsection(heap_attr, machine, pools) self.cell.heap = self.heap_ms.attrs mapping = self.space.register_mapping(self.heap_ms.attrs) self.add_standard_mem_caps(heap_attr.ns_node, mapping, heap_attr) self.elf_segments = \ [thread.get_stack_ms() for thread in threads] + \ [self.heap_ms] + memsection_objs + device_mem self.env.add_kernel_info_entry(0, 0, kernel) # Add command line arguments commandline_el = okl4_el.find_child("commandline") if commandline_el is not None: args = [arg_el.value for arg_el in commandline_el.find_children("arg")] else: args = [] self.env.add_arg_list(args) self.cell.env = self.env def get_cell_api_version(self): """ Return the libOKL4 API versions that the cell initial program was build against. """ return self.api_version def generate_dynamic_segments(self, namespace, machine, pools, kernel, image): """ Create bootinfo segment and environment buffers. """ utcb_mss = [] for space in self.cell.spaces: space.utcb.size = align_up(space.max_threads * image.utcb_size, machine.min_page_size()) # A space with no threads will get a 0 size as align_up doesn't # change a 0, so we explicity set it to at least one page if space.utcb.size == 0: space.utcb.size = machine.min_page_size() utcb_ms = image.add_utcb_area(space.utcb) utcb_mss.append(utcb_ms) kspace = None for (x, _) in self.env.space_list: if x.space.id == space.id: kspace = x if image.utcb_size: self.env.add_utcb_area_entry("UTCB_AREA_%d" % space.id, space, kspace, image) self.env.add_bitmap_allocator_entry("MAIN_SPACE_ID_POOL", *self.cell.space_list) self.env.add_bitmap_allocator_entry("MAIN_CLIST_ID_POOL", *self.cell.cap_list) self.env.add_bitmap_allocator_entry("MAIN_MUTEX_ID_POOL", *self.cell.mutex_list) self.env.add_int_entry("MAIN_SPACE_ID", self.space.id) self.env.add_int_entry("MAIN_CLIST_ID", self.cell.clist_id) self.env.generate_dynamic_segments(self, image, machine, pools) self.elf_segments.extend(utcb_mss + [self.env.memsect]) self.group_elf_segments(image) def generate_init(self, machine, pools, kernel, image): """ Generate the bootinfo script for the cell, placing it into a segment. """ self.set_free_pools(pools, kernel, image, machine) self.env.generate_init(machine, pools, kernel, image) def set_free_pools(self, pools, kernel, image, machine): phys_free = \ pools.get_physical_pool_by_name(self.def_physpool).get_freelist()[:] virt_free = \ pools.get_virtual_pool_by_name(self.def_virtpool).get_freelist()[:] # Sort biggest to smallest phys_free.sort(key=lambda x: x[0] - x[1]) virt_free.sort(key=lambda x: x[0] - x[1]) # Extract the biggest regions and remove them from the lists. (phys_base, phys_end, _) = phys_free[0] del phys_free[0] (virt_base, virt_end, _) = virt_free[0] del virt_free[0] self.phys_attrs.size = phys_end - phys_base + 1 self.phys_attrs.phys_addr = phys_base def_phys_ms = image.add_memsection(self.phys_attrs, machine, pools) image.add_group(0, [def_phys_ms]) self.virt_attrs.size = virt_end - virt_base + 1 self.virt_attrs.virt_addr = virt_base self.env.add_kernel_info_entry(phys_base, phys_end, kernel) def collect_thread(self, elf, el, namespace, image, machine, pools, kernel, space, entry, thread_name = None, cell_create_thread = False): """Collect the attributes of a thread element.""" if entry is None: raise MergeError, "No entry point specified for thread %s" % el.name user_main = getattr(el, 'start', entry) entry = start_to_value(entry, elf) user_main = start_to_value(user_main, elf) priority = getattr(el, 'priority', kernel.kernel.DEFAULT_PRIORITY) physpool = getattr(el, 'physpool', None) virtpool = getattr(el, 'virtpool', None) # New namespace for objects living in the thread. if thread_name == None: thread_name = el.name thread_namespace = namespace.add_namespace(thread_name) # Push the overriding pools for the thread. image.push_attrs(virtual = virtpool, physical = physpool) utcb = image.new_attrs(thread_namespace.add_namespace("utcb")) # Create the cell thread and assign the entry point thread = space.register_thread(entry, user_main, utcb, priority, create = cell_create_thread) thread_namespace.add('master', ThreadCellCap('master', thread)) # Collect the stack. Is there no element, create a fake one for # the collection code to use. stack_el = el.find_child('stack') if stack_el is None: stack_el = ParsedElement('stack') stack_attr = collect_memobj_attrs(stack_el, thread_namespace, image, machine) if stack_attr.size == None: stack_attr.size = DEFAULT_STACK_SIZE stack_ms = image.add_memsection(stack_attr, machine, pools) mapping = space.register_mapping(stack_ms.attrs) self.add_standard_mem_caps(stack_attr.ns_node, mapping, stack_attr) # Setup the stack for the new cell thread thread.stack = stack_ms.attrs thread.stack_ms = stack_ms # If this is the very first collect_thread call, we assume it is # the cell's main thread and we set the stack_ms accordingly. if self.stack_ms is None: self.stack_ms = stack_ms image.pop_attrs() return thread def collect_use_devices(self, el, space, namespace, image, machine, pools): device_mem = [] for device_el in el.find_children("use_device"): dev = machine.get_phys_device(device_el.name) # Potentially we can have multiple named physical mem sections # each with multiple ranges. for key in dev.physical_mem.keys(): ranges = dev.physical_mem[key] index = 0 for (base, size, rights, cache_policy) in ranges: # If theres only one range just use the key otherwise # append an index to distingiush entries if len(ranges) == 1: name = key else: name = key + '_' + str(index) index += 1 device_ns = namespace.add_namespace(name) attrs = image.new_attrs(device_ns) attrs.attach = PF_R | PF_W if cache_policy is not None: attrs.cache_policy = machine.get_cache_policy(cache_policy) attrs.phys_addr = base attrs.size = size device_ms = image.add_memsection(attrs, machine, pools) device_mem.append(device_ms) mapping = space.register_mapping(device_ms.attrs) self.add_standard_mem_caps(device_ns, mapping, device_ms.attrs) for irq in dev.interrupt.values(): space.register_irq(irq) self.env.add_device_irq_list(dev.name, dev.interrupt.values()) return device_mem def collect_mutexes(self, el, namespace, space): for mutex_el in el.find_children("mutex"): m_ns = namespace.add_namespace(mutex_el.name) mutex = space.register_mutex(mutex_el.name) m_ns.add('master', MutexCellCap('master', mutex)) def add_standard_mem_caps(self, namespace, mapping, attrs): namespace.add('master', PhysSegCellCap('master', mapping)) namespace.add('physical', PhysAddrCellCap('physical', attrs)) def collect_memsections(self, el, space, namespace, image, machine, pools): memsection_objs = [] for memsection_el in el.find_children('memsection'): memsection_attr = \ collect_memobj_attrs(memsection_el, namespace, image, machine) memsection_ns = memsection_attr.ns_node memsection_ms = image.add_memsection(memsection_attr, machine, pools) memsection_objs.append(memsection_ms) mapping = space.register_mapping(memsection_ms.attrs) self.add_standard_mem_caps(memsection_ns, mapping, memsection_ms.attrs) return memsection_objs def group_elf_segments(self, image): """ Group ELF segments together in a way that avoids domain faults and reduces final image size. - Any memsection that is under 1M goes into Group 1. - Any memsection above 1M but has contents, Group 2. - Any memsection above 1M which is empty, Group 3. """ groups = [[], [], []] for seg in self.elf_segments: if seg.get_attrs().size <= 0x100000: idx = 0 elif seg.get_attrs().data or seg.get_attrs().file: idx = 1 else: idx = 2 groups[idx].append(seg) for grp in groups: grp.sort(lambda x, y: int(x.get_attrs().size - y.get_attrs().size)) if grp is groups[0]: grp = self.elf_prog_segments + grp image.add_group(None, grp)
def collect_kernel_element(parsed, kernel_heap_size, namespace, image, machine, pools): """Collect the attributes of the kernel element.""" kernel_el = parsed.find_child("kernel") if kernel_el is None: return # New namespace for objects living in the kernel. kernel_namespace = namespace.add_namespace('kernel') # Will the kernel be run execute in place? do_xip = hasattr(kernel_el, 'xip') and kernel_el.xip physpool = None if hasattr(kernel_el, 'physpool'): physpool = kernel_el.physpool # Push the overriding physical pool for the kernel. image.push_attrs(physical = physpool) segment_els = kernel_el.find_children("segment") patch_els = kernel_el.find_children("patch") elf = UnpreparedElfFile(filename=kernel_el.file) #elf = PreparedElfFile(filename=kernel_el.file) if elf.elf_type != ET_EXEC: raise MergeError, \ "All the merged ELF files must be of EXEC type." image.set_kernel(elf) segs = collect_elf_segments(elf, image.KERNEL, segment_els, 'kernel', kernel_namespace, image, machine, pools) elf = elf.prepare(elf.wordsize, elf.endianess) base_segment = None if do_xip: (addr, size) = get_symbol(elf, '__phys_addr_ram') image.patch(addr, size, segs[1]) base_segment = segs[1] (addr, size) = get_symbol(elf, '__phys_addr_rom') image.patch(addr, size, segs[0]) else: sdata = get_symbol(elf, '__phys_addr_ram', may_not_exist=True) if sdata is not None: (addr, size) = sdata image.patch(addr, size, segs[0]) base_segment = segs[0] # The extra_patches attr may be added by a plugin. for patch in getattr(Kernel_el, "extra_patches", []): addr = get_symbol(elf, patch[0], True) if addr != None: addr = int(addr[0])+ int(patch[1]) new_patch = Patch_el(address=hex(addr), bytes=patch[2], value=patch[3]) patch_els.append(new_patch) collect_patches(elf, patch_els, kernel_el.file, image) dynamic_attrs = image.new_attrs(None) dynamic_attrs.align = machine.kernel_heap_align max_threads = DEFAULT_KERNEL_MAX_THREADS dynamic_el = kernel_el.find_child("dynamic") if dynamic_el is not None: max_threads = getattr(dynamic_el, 'max_threads', max_threads) dynamic_attrs.align = getattr(dynamic_el, 'align', dynamic_attrs.align) dynamic_attrs.size = max_threads * get_tcb_size(elf, image) array = image.add_kernel_array(dynamic_attrs, pools) image.add_group(machine.kernel_heap_proximity, (base_segment, array)) image.patch(get_symbol(elf, "tcb_array")[0], machine.word_size / 8, array) image.patch(get_symbol(elf, "num_tcbs")[0], machine.word_size / 8, max_threads) heap_attrs = image.new_attrs(None) heap_attrs.size = DEFAULT_KERNEL_HEAP_SIZE heap_attrs.align = machine.kernel_heap_align heap_el = kernel_el.find_child("heap") if heap_el is not None: heap_attrs.phys_addr = getattr(heap_el, 'phys_addr', heap_attrs.phys_addr) heap_attrs.size = getattr(heap_el, 'size', heap_attrs.size) heap_attrs.align = getattr(heap_el, 'align', heap_attrs.align) # Override the size with the command line value, if present. if kernel_heap_size != 0: heap_attrs.size = kernel_heap_size heap = image.set_kernel_heap(heap_attrs, pools) image.add_group(machine.kernel_heap_proximity, (base_segment, heap)) config_el = kernel_el.find_child("config") if config_el is not None: for option in config_el.find_children("option"): image.kconfig.add_config(option.key, option.value) image.pop_attrs()
def link(files, section_vaddr=None, kernel_soc=True, rvct=False, verbose=False, verbose_merge=False, verbose_script=False, verbose_relocs=False): """ Perform the actual link, split so that elfweaver merge can call this easily. """ # Handle merging of multiple files. # Use the first provided elf file as the base file. For each additonal file # merge the sections (.text -> .text) but do no other merging i.e. do not # merge any .text.foo into .text. Update the symbol table and any relocs # to take into account the merging then merge in the symbol table and # relocation sections. base_elf = UnpreparedElfFile(files[0]) base_elf.sections = [] if verbose: print "Using %s as base file" % files[0] base_sym_tab = None for merge_file in files: merge_elf = UnpreparedElfFile(merge_file) if verbose: print "Merging in file %s" % merge_file sym_tab = [ sym_tab for sym_tab in merge_elf.sections if sym_tab.type == SHT_SYMTAB ] # Things get really, really ugly if there is more than one symbol table, # fortunately sane compilers / linkers appear to only have one anyway. assert len(sym_tab) == 1 sym_tab = sym_tab[0] merged_sects = [] reloc_sects = [] ind = 1 if base_elf.sections == []: ind = 0 for sect in merge_elf.sections[ind:]: # Symbol table and relocations require more specific magic and get # handled later on if sect.type == SHT_SYMTAB: continue elif sect.type in (SHT_REL, SHT_RELA): reloc_sects.append(sect) continue found_sect = base_elf.find_section_named(sect.name) if found_sect == None: # Don't need to merge this section as there is no corrosponding # entry in the base file, so just go ahead and add it. base_elf.add_section(sect) if verbose_merge: print "\tAdding section %s" % sect.name continue merge_sections(found_sect, sect, merged_sects, None, verbose_merge) # Update any symbols or relocations that relied on a merged section # to correctly point at the new section at the correct offset if verbose: print "\tUpdating relocation sections with merged data" sym_tab.update_merged_sections(merged_sects) for sect in reloc_sects: sect.update_merged_sections(merged_sects) # Merge the symbol tables, this is more just tricky than any deep magic # * For each undefined symbol in the base file try to find a match # in the input file. If we find one then replace the base file's # symbol with the defined one. Keep a list of the mappings from the # input files symbols to the new base file symbol index. # * Merge the two symbol tables. For each symbol in the input file's # symbol table; # * If it is undefined, try to find a match in the base file's symbol # table. If found record the mapping from old symbol to new index. # * If it is defined or there is no match copy it over, again keeping # a mapping from old symbol to new index. # * Update all the relocations in the input file to correctly point at # the new symbol table and the correct symbol index. And merge in # the relocations sections if a section already exists or add them. if base_sym_tab: if verbose: print "\tMerging symbol tables" merged_syms = base_sym_tab.resolve(sym_tab) merged_syms += base_sym_tab.merge(sym_tab) for sect in reloc_sects: sect.update_merged_symbols(base_sym_tab, merged_syms) else: if verbose: print "\tAdding symbol table" base_elf.add_section(sym_tab) base_sym_tab = sym_tab for sect in reloc_sects: found_sect = base_elf.find_section_named(sect.name) if found_sect == None: base_elf.add_section(sect) if verbose_merge: print "\tAdding relocation section %s" % sect.name else: found_sect.append_relocs(sect.get_relocs()) if verbose_merge: print "\tMerging in relocation section %s" % sect.name # Now before we lay everything out we need to adjust the size of any # sections (such as the .bss or .got) that may increase in size due # to allocation of symbols, etc. if verbose: print "Allocating symbol and relocation data" base_sym_tab.allocate_symbols() reloc_sects = [] for sect in base_elf.sections: if sect.type in (SHT_REL, SHT_RELA): #pylint: disable-msg=E1103 sect.set_verbose(verbose_relocs) sect.allocate_relocs() reloc_sects.append(sect) # Do any linker scripty things we need to do. For the moment we either do # a standard link or a kernel+soc link, the actions performed are in python # functions currently but may be moved into external scripts later. if kernel_soc: if verbose: print "Performing a kernel + soc link, rvct", rvct kernel_link_func_types = KERNEL_SOC_LINKER_SCRIPT[base_elf.machine] if not rvct: kernel_link_func = kernel_link_func_types['gnu'] else: kernel_link_func = kernel_link_func_types['rvct'] segments, merged_sects, discarded_sects = \ perform_link(base_elf, base_sym_tab, kernel_link_func, section_vaddr, verbose_script) else: if verbose: print "Performing standard link" segments, merged_sects, discarded_sects = \ perform_link(base_elf, base_sym_tab, standard_link, section_vaddr, verbose_script) # Remove any symbols relating to discarded sections and update for any of # the merged sections if verbose: print "Updating symbols for discarded and merged sections" discarded_syms = base_sym_tab.update_discarded_sections(discarded_sects) base_sym_tab.update_merged_sections(merged_sects) if verbose: print "Updating relocation sections with new symbols" for sect in reloc_sects: sect.update_discarded_symbols(discarded_syms) sect.update_discarded_sections(discarded_sects) sect.update_merged_sections(merged_sects) # Segments don't have the correct flags yet, go through and update them # based on the types of the included sections. for seginfo in segments: flags = PF_R for sect in seginfo[2]: if sect.flags & SHF_WRITE: flags |= PF_W if sect.flags & SHF_EXECINSTR: flags |= PF_X seginfo[0] = flags for flags, base_addr, sects in segments: if len(sects) == 0: continue new_seg = SectionedElfSegment(base_elf, PT_LOAD, base_addr, base_addr, flags, SEGMENT_ALIGN, sections=sects) base_elf.add_segment(new_seg) if verbose: print "Adding segment to file" print "\tFlags %d (R %d, W %d, X %d)" % ( flags, flags & PF_R > 0, flags & PF_W > 0, flags & PF_X > 0) print "\tBase address %x" % base_addr print "\tSections", [sect.name for sect in sects] # All sections are laid out and at their final size, go through and update # symbol values to their final position if verbose: print "Updating symbols" base_sym_tab.update_symbols() relocated_all = True # Apply all the relocs we know how to handle relocs_remove = [] if verbose: print "Applying relocations" for sect in reloc_sects: if sect.apply(): relocs_remove.append(sect) else: relocated_all = False if verbose: print "Applied all relocations", relocated_all for sect in relocs_remove: base_elf.remove_section(sect) if relocated_all: # Set the ELF entry point to _start base_elf.entry_point = base_elf.find_symbol("_start").value # Set ELF type to an executable base_elf.elf_type = ET_EXEC if verbose: print "Setting entry point to %x" % base_elf.entry_point return base_elf
def test_get_file_data(self): ef = UnpreparedElfFile() symtab = UnpreparedElfSymbolTable(ef, ".symtab") self.assertRaises(NotImplementedError, symtab.get_file_data)
def collect_xml(self, iguana_el, ignore_name, namespace, machine, pools, kernel, image): """Handle an Iguana Server Compound Object""" cell = \ kernel.register_cell(iguana_el.name, iguana_el.kernel_heap, max_caps = getattr(iguana_el, "caps", None)) # New namespace for objects living in the root program's PD. ig_namespace = namespace.add_namespace(iguana_el.name) self.namespace = ig_namespace self.s_namespace = self.namespace self.name = iguana_el.name self.space = \ cell.register_space(self.namespace, "MAIN", is_privileged = True, max_clists = getattr(iguana_el, "clists", None), max_spaces = getattr(iguana_el, "spaces", None), max_mutexes = getattr(iguana_el, "mutexes", None), max_threads = getattr(iguana_el, "threads", None), plat_control = \ getattr(iguana_el, "platform_control", False)) self.setup_bootinfo_pools(namespace, pools) self.setup_device_namespace(namespace, machine) self.env = CellEnvironment(iguana_el.name, self.namespace, machine, image, kernel, self.space.mappings) cell.env = self.env pd = weaver.cells.iguana.bootinfo.RootServerPD(iguana_el.name, ig_namespace) # Declare the default memory pools. def_virtpool = getattr(iguana_el, "virtpool", pools.get_default_virtual_pool()) def_physpool = getattr(iguana_el, "physpool", pools.get_default_physical_pool()) def_pager = getattr(iguana_el, "pager", None) def_direct = getattr(iguana_el, "direct", None) # Record any IRQs that are assigned to Iguana for irq_el in iguana_el.find_children("irq"): self.space.register_irq(irq_el.value) self.bootinfo.set_system_default_attrs(def_virtpool, def_physpool, image, def_pager, def_direct) # Iguana is not aware of segment ids. # The old mapping API uses segment 0 for all mapping ops. # Segment 0 needs to cover all of physical memory. # Subsequent segments will be created but never used, # but must still be mapped! # XXX: VVVVV THIS IS PURE EVIL VVVVV physpool_attrs = \ image.new_attrs(self.s_namespace.add_namespace("physpool_hack")) physpool_attrs.phys_addr = 0 #first_phys_base physpool_attrs.size = 0xfffff000 #first_phys_end physpool_attrs.attach = PF_R | PF_W | PF_X physpool_attrs.cache_policy = 0xff # XXX: Define me properly physpool_attrs.mem_type = physpool_attrs.unmapped self.space.register_mapping(physpool_attrs) # XXX: ^^^^^ THIS IS PURE EVIL ^^^^^ filename = os.path.join(iguana_el._path, iguana_el.file) elf = UnpreparedElfFile(filename=filename) pd.set_default_pools(image, self.bootinfo) # Collect the object environment pd.add_env_ms(image, ig_namespace, machine, pools) env, extra_ms = \ collect_environment_element(iguana_el.find_child('environment'), ig_namespace, machine, pools, image, self.bootinfo) segment_els = iguana_el.find_children("segment") segs = collect_elf_segments(elf, image.ROOT_PROGRAM, segment_els, filename, [], ig_namespace, image, machine, pools) self.elf_prog_segments = segs for seg in segs: self.space.register_mapping(seg.attrs) if elf.elf_type != ET_EXEC: raise MergeError, "All the merged ELF files must be of EXEC type." # Find out which version of libokl4 that iguana was built # against sym = elf.find_symbol("okl4_api_version") if sym == None: raise MergeError("Unable to locate the symbol 'okl4_api_version' " "in file \"%s\". Cells must link with libokl4." % filename) self.api_version = elf.get_value(sym.value, sym.size, elf.endianess) if self.api_version == None: raise MergeError("Unable to read the symbol 'okl4_api_version' in " "file \"%s\". Cells must link with libokl4." % filename) # Record any patches being made to the program. patch_els = iguana_el.find_children("patch") for patch in getattr(Iguana_el, "extra_patches", []): addr = get_symbol(elf, patch[0], True) if addr == None: continue addr = int(addr[0])+ int(patch[1]) new_patch = Patch_el(address=hex(addr), bytes=patch[2], value=patch[3]) patch_els.append(new_patch) collect_patches(elf, patch_els, filename, image) for extension_el in iguana_el.find_children("extension"): if not ignore_name.match(extension_el.name): collect_extension_element(extension_el, pd, ig_namespace, elf, image, machine, self.bootinfo, pools) # Collect the main thread. The root program can only have one # thread, so this call chiefly is used to collect information # about the stack. # # The stack is not set up as a memsection, so it is not put in the # object environment. if not hasattr(iguana_el, 'priority'): iguana_el.priority = 255 thread = collect_thread(elf, iguana_el, ignore_name, ig_namespace, image, machine, pools, self.space, entry = elf.entry_point, name = 'iguana', namespace_thread_name = "main", cell_create_thread = True) pd.add_thread(thread) # Collect the heap. Is there no element, create a fake one for # the collection code to use. # # The heap is not set up as a memsection, so it is not put in the # object environment. heap_el = iguana_el.find_child('heap') if heap_el is None: heap_el = ParsedElement('heap') heap_ms = collect_memsection_element(heap_el, ignore_name, ig_namespace, image, machine, pools) pd.attach_heap(heap_ms) self.space.register_mapping(heap_ms.ms.attrs) self.space.register_mapping(thread.get_stack().get_ms().attrs) self.elf_segments.extend([pd.env_ms.get_ms(), thread.get_stack().get_ms(), heap_ms.get_ms()] + [ms.get_ms() for ms in extra_ms]) pd.add_environment(env) pd.utcb_size = image.utcb_size self.bootinfo.add_rootserver_pd(pd) # And now parse the programs and pd's collect_program_pd_elements(iguana_el, ignore_name, ig_namespace, image, machine, self.bootinfo, pools, kernel, cell)
def test_add_section(self): ef = UnpreparedElfFile() sect = UnpreparedElfSection(None) ef.add_section(sect) self.assertEqual(ef.sections[-1], sect)
def collect_program_element(program_el, ignore_name, namespace, image, machine, bootinfo, pools): """Collect the attributes of a program element.""" # New namespace for objects living in the program's PD. prog_namespace = namespace.add_namespace(program_el.name) pd = weaver.bootinfo.PD(program_el.name, prog_namespace, image, machine, pools) elf = UnpreparedElfFile(filename=program_el.file) if elf.elf_type != ET_EXEC: raise MergeError, "All the merged ELF files must be of EXEC type." bootinfo.add_elf_info(name = program_el.file, elf_type = image.PROGRAM, entry_point = elf.entry_point) pd.elf = elf virtpool = getattr(program_el, 'virtpool', None) physpool = getattr(program_el, 'physpool', None) direct = getattr(program_el, 'direct', None) pd.set_platform_control(getattr(program_el, "platform_control", False)) if hasattr(program_el, 'pager'): pager = make_pager_attr(program_el.pager) else: pager = None # Push the overriding attributes for the program. image.push_attrs(virtual = virtpool, physical = physpool, pager = pager, direct = direct) pd.set_default_pools(image, bootinfo) # Collect the object environment env = collect_environment_element(program_el.find_child('environment'), prog_namespace, machine, pools, image, bootinfo) segment_els = program_el.find_all_children("segment") segs = collect_elf_segments(elf, image.PROGRAM, segment_els, program_el.name, prog_namespace, image, machine, pools) segs_ms = [bootinfo.record_segment_info(program_el.name, seg, image, machine, pools) for seg in segs] for seg_ms in segs_ms: pd.attach_memsection(seg_ms) # Record any patches being made to the program. patch_els = program_el.find_children("patch") collect_patches(elf, patch_els, program_el.file, image) # Collect the main thread. thread = collect_thread(elf, program_el, ignore_name, prog_namespace, image, machine, pools, entry = elf.entry_point, name = program_el.name, namespace_thread_name = "main") pd.add_thread(thread) pd.set_server_thread(thread) # Add the virtual device elements # Virtual devices always get added to the global namespace because they # should be globally unique server_spawn_nvdevs = 0 dev_ns = namespace.root.get_namespace("dev") if dev_ns is None: raise MergeError, "Device namespace does not exist!" for v_el in program_el.find_children('virt_device'): virt_dev = pd.add_virt_dev(v_el.name, program_el.name, pd, thread, server_spawn_nvdevs) create_alias_cap(virt_dev, dev_ns) server_spawn_nvdevs += 1 # Record the main thread and its stack in the environment. env.add_entry(key = "MAIN", cap_name = 'main/master') env.add_entry(key = "MAIN/STACK", cap_name = 'main/stack/master', attach = thread.get_stack().get_attrs().attach) # If marked, sure that the program is exported to every # environment so that it can be found by other programs. # if hasattr(program_el, 'server'): bootinfo.add_server( key = program_el.server, cap_name = prog_namespace.abs_name('main') + '/stack/master') # Collect remaining threads. elf = pd.elf = pd.elf.prepare(image.wordsize, image.endianess) for thread_el in program_el.find_children('thread'): if not ignore_name.match(thread_el.name): thread = collect_thread(elf, thread_el, ignore_name, prog_namespace, image, machine, pools, entry = "_thread_start") pd.add_thread(thread) # Record the thread and its stack in the environment. env.add_entry(key = thread.get_name(), cap_name = thread.get_name() + '/master') env.add_entry(key = thread.get_name() + "/STACK", cap_name = thread.get_name() + '/stack/master', attach = thread.get_stack().get_attrs().attach) # Collect any other memsections in the program. for ms_el in program_el.find_children('memsection'): if not ignore_name.match(ms_el.name): ms = collect_memsection_element(ms_el, ignore_name, prog_namespace, image, machine, pools) pd.attach_memsection(ms) image.add_group(0, [ms.get_ms()]) env.add_entry(key = ms.get_name(), cap_name = ms.get_name() + '/master', attach = ms.get_attrs().attach) # Collect any zones in the program. for zone_el in program_el.find_children('zone'): (zone, non_zone_ms) = \ collect_zone_element(zone_el, ignore_name, prog_namespace, pools, image, bootinfo, machine) pd.attach_zone(zone) # Attach memsections that aren't part of the zone to the program. for ms in non_zone_ms: pd.attach_memsection(ms) image.add_group(0, [ms.get_ms()]) env.add_entry(key = ms.get_name(), cap_name = ms.get_name() + '/master', attach = ms.get_attrs().attach) # Collect the heap. Is there no element, create a fake one for # the collection code to use. heap_el = program_el.find_child('heap') if heap_el is None: heap_el = ParsedElement('heap') heap_ms = collect_memsection_element(heap_el, ignore_name, prog_namespace, image, machine, pools) pd.attach_heap(heap_ms) image.add_group(0, [heap_ms.get_ms()]) # Fill env with default values. env.add_entry(key = "HEAP", cap_name = 'heap/master', attach = heap_ms.get_attrs().attach) env.add_entry(key = "HEAP_BASE", base = heap_ms) env.add_entry(key = "HEAP_SIZE", value = heap_ms.get_attrs().size) pd.add_environment(env) bootinfo.add_pd(pd) image.pop_attrs()
def test_add_segment(self): ef = UnpreparedElfFile() seg = DataElfSegment(data=ByteArray("pants")) ef.add_segment(seg) self.assertEqual(ef.segments[-1], seg)
def link(files, section_vaddr = None, kernel_soc = True, rvct = False, verbose = False, verbose_merge = False, verbose_script = False, verbose_relocs = False): """ Perform the actual link, split so that elfweaver merge can call this easily. """ # Handle merging of multiple files. # Use the first provided elf file as the base file. For each additonal file # merge the sections (.text -> .text) but do no other merging i.e. do not # merge any .text.foo into .text. Update the symbol table and any relocs # to take into account the merging then merge in the symbol table and # relocation sections. base_elf = UnpreparedElfFile(files[0]) base_elf.sections = [] if verbose: print "Using %s as base file" % files[0] base_sym_tab = None for merge_file in files: merge_elf = UnpreparedElfFile(merge_file) if verbose: print "Merging in file %s" % merge_file sym_tab = [sym_tab for sym_tab in merge_elf.sections if sym_tab.type == SHT_SYMTAB] # Things get really, really ugly if there is more than one symbol table, # fortunately sane compilers / linkers appear to only have one anyway. assert len(sym_tab) == 1 sym_tab = sym_tab[0] merged_sects = [] reloc_sects = [] ind = 1 if base_elf.sections == []: ind = 0 for sect in merge_elf.sections[ind:]: # Symbol table and relocations require more specific magic and get # handled later on if sect.type == SHT_SYMTAB: continue elif sect.type in (SHT_REL, SHT_RELA): reloc_sects.append(sect) continue found_sect = base_elf.find_section_named(sect.name) if found_sect == None: # Don't need to merge this section as there is no corrosponding # entry in the base file, so just go ahead and add it. base_elf.add_section(sect) if verbose_merge: print "\tAdding section %s" % sect.name continue merge_sections(found_sect, sect, merged_sects, None, verbose_merge) # Update any symbols or relocations that relied on a merged section # to correctly point at the new section at the correct offset if verbose: print "\tUpdating relocation sections with merged data" sym_tab.update_merged_sections(merged_sects) for sect in reloc_sects: sect.update_merged_sections(merged_sects) # Merge the symbol tables, this is more just tricky than any deep magic # * For each undefined symbol in the base file try to find a match # in the input file. If we find one then replace the base file's # symbol with the defined one. Keep a list of the mappings from the # input files symbols to the new base file symbol index. # * Merge the two symbol tables. For each symbol in the input file's # symbol table; # * If it is undefined, try to find a match in the base file's symbol # table. If found record the mapping from old symbol to new index. # * If it is defined or there is no match copy it over, again keeping # a mapping from old symbol to new index. # * Update all the relocations in the input file to correctly point at # the new symbol table and the correct symbol index. And merge in # the relocations sections if a section already exists or add them. if base_sym_tab: if verbose: print "\tMerging symbol tables" merged_syms = base_sym_tab.resolve(sym_tab) merged_syms += base_sym_tab.merge(sym_tab) for sect in reloc_sects: sect.update_merged_symbols(base_sym_tab, merged_syms) else: if verbose: print "\tAdding symbol table" base_elf.add_section(sym_tab) base_sym_tab = sym_tab for sect in reloc_sects: found_sect = base_elf.find_section_named(sect.name) if found_sect == None: base_elf.add_section(sect) if verbose_merge: print "\tAdding relocation section %s" % sect.name else: found_sect.append_relocs(sect.get_relocs()) if verbose_merge: print "\tMerging in relocation section %s" % sect.name # Now before we lay everything out we need to adjust the size of any # sections (such as the .bss or .got) that may increase in size due # to allocation of symbols, etc. if verbose: print "Allocating symbol and relocation data" base_sym_tab.allocate_symbols() reloc_sects = [] for sect in base_elf.sections: if sect.type in (SHT_REL, SHT_RELA): #pylint: disable-msg=E1103 sect.set_verbose(verbose_relocs) sect.allocate_relocs() reloc_sects.append(sect) # Do any linker scripty things we need to do. For the moment we either do # a standard link or a kernel+soc link, the actions performed are in python # functions currently but may be moved into external scripts later. if kernel_soc: if verbose: print "Performing a kernel + soc link, rvct", rvct kernel_link_func_types = KERNEL_SOC_LINKER_SCRIPT[base_elf.machine] if not rvct: kernel_link_func = kernel_link_func_types['gnu'] else: kernel_link_func = kernel_link_func_types['rvct'] segments, merged_sects, discarded_sects = \ perform_link(base_elf, base_sym_tab, kernel_link_func, section_vaddr, verbose_script) else: if verbose: print "Performing standard link" segments, merged_sects, discarded_sects = \ perform_link(base_elf, base_sym_tab, standard_link, section_vaddr, verbose_script) # Remove any symbols relating to discarded sections and update for any of # the merged sections if verbose: print "Updating symbols for discarded and merged sections" discarded_syms = base_sym_tab.update_discarded_sections(discarded_sects) base_sym_tab.update_merged_sections(merged_sects) if verbose: print "Updating relocation sections with new symbols" for sect in reloc_sects: sect.update_discarded_symbols(discarded_syms) sect.update_discarded_sections(discarded_sects) sect.update_merged_sections(merged_sects) # Segments don't have the correct flags yet, go through and update them # based on the types of the included sections. for seginfo in segments: flags = PF_R for sect in seginfo[2]: if sect.flags & SHF_WRITE: flags |= PF_W if sect.flags & SHF_EXECINSTR: flags |= PF_X seginfo[0] = flags for flags, base_addr, sects in segments: if len(sects) == 0: continue new_seg = SectionedElfSegment(base_elf, PT_LOAD, base_addr, base_addr, flags, SEGMENT_ALIGN, sections=sects) base_elf.add_segment(new_seg) if verbose: print "Adding segment to file" print "\tFlags %d (R %d, W %d, X %d)" % (flags, flags & PF_R > 0, flags & PF_W > 0, flags & PF_X > 0) print "\tBase address %x" % base_addr print "\tSections", [sect.name for sect in sects] # All sections are laid out and at their final size, go through and update # symbol values to their final position if verbose: print "Updating symbols" base_sym_tab.update_symbols() relocated_all = True # Apply all the relocs we know how to handle relocs_remove = [] if verbose: print "Applying relocations" for sect in reloc_sects: if sect.apply(): relocs_remove.append(sect) else: relocated_all = False if verbose: print "Applied all relocations", relocated_all for sect in relocs_remove: base_elf.remove_section(sect) if relocated_all: # Set the ELF entry point to _start base_elf.entry_point = base_elf.find_symbol("_start").value # Set ELF type to an executable base_elf.elf_type = ET_EXEC if verbose: print "Setting entry point to %x" % base_elf.entry_point return base_elf
def test_remove_section(self): ef = UnpreparedElfFile() sect = UnpreparedElfSection() self.assertRaises(InvalidArgument, ef.remove_section, sect) ef.add_section(sect) self.assertEqual(sect in ef.sections, True) ef.remove_section(sect) self.assertEqual(sect in ef.sections, False) seg = SectionedElfSegment() ef.add_segment(seg) ef.add_section(sect) seg.add_section(sect) self.assertEqual(sect in ef.sections, True) self.assertEqual(sect in seg.sections, True) ef.remove_section(sect) self.assertEqual(sect in ef.sections, False) self.assertEqual(sect in seg.sections, False)