def link_kernel(kernel_el, linker_script, linker_wrapper, kernel, soc, libraries): link_type = get_linker_type(linker_script) kernel_tmp = None if link_type == 'rvct': # Current elfweaver linker does not support RVCT so shell out to an # external linker kernel_tmp = tempfile.NamedTemporaryFile() # Remove the file at exit. atexit.register(os.remove, kernel_tmp.name) command = [ os.path.abspath(linker_wrapper), kernel, kernel_tmp.name, linker_script, soc ] + libraries ret = subprocess.Popen(command, cwd=os.path.dirname(linker_script)).wait() if ret != 0: raise MergeError("Failed to link kernel, return code %d" % ret) elf = UnpreparedElfFile(kernel_tmp.name) kernel_out_name = kernel_tmp.name else: elf = link([kernel, soc]) # Just getting a random temp file name, there must be a nicer way # to do this? tmp = tempfile.NamedTemporaryFile() # Remove the file at exit. atexit.register(os.remove, tmp.name) kernel_out_name = tmp.name tmp.close() kernel_out = elf.prepare(elf.wordsize, elf.endianess) kernel_out.to_filename(kernel_out_name) elf = UnpreparedElfFile(kernel_out_name) # Kernel is linked, now add the segment names (as per old elfadorn) # Kernel elements seg_names overwrites defaults if hasattr(kernel_el, "seg_names"): seglist = [seg.strip() for seg in kernel_el.seg_names.split(",")] else: seglist = None link_type = get_linker_type(linker_script) if link_type == "rvct": scripts = [linker_script] else: scripts = [] add_segment_names(elf, get_segment_names(elf, seglist, scripts, link_type)) return elf, kernel_out_name, kernel_tmp
def test_copy_into(self): elf_from = UnpreparedElfFile() elf_to = UnpreparedElfFile() sect = BaseElfSection(elf_from, "test") # FIXME #self.assertRaises(NotImplementedError, sect.copy_into, elf_to) sect = UnpreparedElfSection(elf_from, "test") new_sect = sect.copy_into(elf_to) self.assertEquals(sect.name, new_sect.name) prep_sect = sect.prepare(0, 0, 0) # FIXME #self.assertRaises(NotImplementedError, prep_sect.copy_into, elf_to) sect = UnpreparedElfSection(elf_from, "test", SHT_NOBITS) new_sect = sect.copy_into(elf_to) self.assertEquals(sect.name, new_sect.name) sect = UnpreparedElfStringTable(elf_from, "string") strings = ["foo", "bar", "baz"] for string in strings: sect.add_string(string) new_sect = sect.copy_into(elf_to) for i in range(len(strings)): self.assertEquals(sect.get_string_idx(i), new_sect.get_string_idx(i))
def test_replace_section(self): elf_from = UnpreparedElfFile() elf_to = UnpreparedElfFile() seg = SectionedElfSegment(elf_from, sections=[UnpreparedElfSection(elf_from, "test")]) old_section = seg.get_sections()[0] new_section = UnpreparedElfSection(elf_to, "new") seg.replace_section(old_section, new_section) self.assertEqual(seg.get_sections(), [new_section]) new_seg = seg.copy_into(elf_to) self.assertRaises(InvalidArgument, seg.replace_section, None, new_section)
def test_add_symbols(self): ef = UnpreparedElfFile() sect = UnpreparedElfSection(ef, "test_sect") symtab = UnpreparedElfSymbolTable(ef, ".symtab") for name in ["foo", "bar"]: symtab.add_symbol(ElfSymbol(name, sect)) ef = UnpreparedElfFile() sect = UnpreparedElfSection(ef, "test_ect") strtab = UnpreparedElfStringTable(ef, ".strtab") symtab = UnpreparedElfSymbolTable(ef, ".symtab", link=strtab) for name in ["foo", "bar"]: symtab.add_symbol(ElfSymbol(name, sect))
def test_set_ph_offset(self): ef = UnpreparedElfFile() ef.set_ph_offset(100, True) self.assertEqual(ef._ph_offset, 100) self.assertRaises(InvalidArgument, ef.set_ph_offset, 1, True) ef = ef.prepare(32, "<") self.assertEqual(hasattr(ef, "set_ph_offset"), False)
def _init_elf_file(pd_el, image, pd_namespace, machine, pools, bootinfo, pd, kernel): elf = UnpreparedElfFile(filename=os.path.join(pd_el._path, pd_el.file)) if elf.elf_type != ET_EXEC: raise MergeError, "All the merged ELF files must be of EXEC type." segment_els = pd_el.find_all_children("segment") segs = collect_elf_segments(elf, image.PROGRAM, segment_els, pd_el.name, [], pd_namespace, image, machine, pools) segs_ms = [bootinfo.record_segment_info(pd_el.name, seg, image, machine, pools) for seg in segs] for seg_ms in segs_ms: pd.attach_memsection(seg_ms) # Record any patches being made to the program. patch_els = pd_el.find_children("patch") collect_patches(elf, patch_els, os.path.join(pd_el._path, pd_el.file), image) # Collect threads in the PD. elf = pd.elf = elf.prepare(image.wordsize, image.endianess) return elf, segs
def get_kernel_file(kernel_el, machine): def relative(file): """Return a path taking relativity to XML include directories into account""" return os.path.join(kernel_el._path, file) if has_kernel_attr(kernel_el, KERNEL_PRELINK_TAGS): elf = UnpreparedElfFile(filename = relative(kernel_el.file)) return (elf, kernel_el.file, None) elif has_kernel_attr(kernel_el, KERNEL_SDKLINK_TAGS): kernel_dir = os.path.join(relative(kernel_el.sdk), "kernel", machine.get_cpu()) object_dir = os.path.join(kernel_dir, kernel_el.configuration, "object") lib_dir = os.path.join(kernel_dir, kernel_el.configuration, "libs") linker_script = os.path.abspath(os.path.join(object_dir, "linker.lds")) linker_wrapper = os.path.abspath(os.path.join(object_dir, "linker.sh")) kernel = os.path.abspath(os.path.join(object_dir, machine.get_cpu() + ".o")) soc = os.path.abspath(os.path.join(object_dir, kernel_el.platform + ".o")) libs = [os.path.abspath(lib) for lib in glob.glob(os.path.join(lib_dir, "*.a"))] return link_kernel(kernel_el, linker_script, linker_wrapper, kernel, soc, libs) elif has_kernel_attr(kernel_el, KERNEL_LINK_TAGS): libs = [relative(lib.strip()) for lib in kernel_el.libs.split(",")] return link_kernel(kernel_el, relative(kernel_el.linker_script), relative(kernel_el.linker_wrapper), relative(kernel_el.kernel), relative(kernel_el.soc), libs) raise MergeError("Invalid kernel tags; a prelinked kernel, SDK information or linking information must be provided")
def test_remove_nobits(self): for file_name in ["data/null_elf", "data/arm_exec", "data/arm_stripped_exec", "data/arm_exec_nosect", "data/arm_object", "data/mips64_exec", "data/mips32_exec", "data/ia32_exec", "data/amd64_exec", "data/ia64_exec", ]: ef = PreparedElfFile(filename=file_name) if ef.segments: seg_sizes = [] for segment in ef.segments: seg_sizes.append((segment.get_memsz(), segment.get_filesz())) ef.remove_nobits() for segment, seg_size in zip(ef.segments, seg_sizes): self.assertEqual(segment.get_memsz(), segment.get_filesz()) self.assertEqual(segment.get_memsz(), seg_size[0]) ef = UnpreparedElfFile() sec = UnpreparedElfSection(None, section_type=SHT_NOBITS, data=10) self.assertEqual(sec.get_size(), 10) ef.add_section(sec) ef.remove_nobits() sec = ef.sections[1] self.assertEqual(sec.type, SHT_PROGBITS) self.assertEqual(sec.get_size(), 10) self.assertEqual(sec.get_file_data(), ByteArray('\0' * 10))
def check_relocs(self): for src in self.srcs: file = src.replace(".s", ".o") print "[REL ] %s" % file, elf = UnpreparedElfFile(os.path.join(base_dir, 'build', file)) try: for sec in filter(lambda s: s.type in (SHT_REL, SHT_RELA), elf.sections): for reloc in sec.relocs: self.addrs.append(reloc.offset) if self.relocs[file][reloc.offset] != \ (reloc.offset, reloc.type, reloc.symdx, sec.info.get_word_at(reloc.offset)): print " -> FAILED!" self.list_relocs(file) raise Exception, \ ("Input relocation differs from expected! " + \ "Got: %s\nExpected: %s") % \ ((reloc.offset, reloc.type, reloc.symdx, sec.info.get_word_at(reloc.offset)), (reloc, self.relocs[file][reloc.offset])) print " -> OK" except KeyError: print " -> FAILED!" self.list_relocs()
def list_contents(self): print "# Addr Value" elf = UnpreparedElfFile( filename=os.path.join(base_dir, 'build', self.bin)) text = elf.find_section_named(".text") for offset in self.addrs: print "%8.8x %8.8x" % (text.address + offset, text.get_word_at(offset))
def collect_extension_element(extension_el, pd, namespace, rp_elf, image, machine, bootinfo, pools): # New namespace for objects living in the extension. extn_namespace = namespace.add_namespace(extension_el.name) elf = None start = None name = None physpool = getattr(extension_el, 'physpool', None) pager = getattr(extension_el, 'pager', None) direct = getattr(extension_el, 'direct', None) # Push the overriding pools for the extension. image.push_attrs(physical=physpool, pager=pager, direct=direct) if hasattr(extension_el, "file"): elf = UnpreparedElfFile(filename=extension_el.file) if elf.elf_type != ET_EXEC: raise MergeError, "All the merged ELF files must be of EXEC type." segment_els = extension_el.find_children("segment") segs = collect_elf_segments(elf, image.EXTENSION, segment_els, extension_el.name, extn_namespace, image, machine, pools) segs_ms = [ bootinfo.record_segment_info(extension_el.name, seg, image, machine, pools) for seg in segs ] for seg_ms in segs_ms: pd.attach_memsection(seg_ms) # Record any patches being made to the program. patch_els = extension_el.find_children("patch") collect_patches(elf, patch_els, extension_el.file, image) start = elf.entry_point name = extension_el.file if hasattr(extension_el, "start"): start = extension_el.start name = extension_el.name # If no file is supplied, look for symbols in the root # program. if elf is None: elf = rp_elf elf = elf.prepare(elf.wordsize, elf.endianess) start = start_to_value(start, elf) bootinfo.add_elf_info(name=name, elf_type=image.EXTENSION, entry_point=start) image.pop_attrs()
def test_prepare(self): ef = UnpreparedElfFile() segment = SectionedElfSegment(None, align=0x1) new_sect = UnpreparedElfSection(None, "pants") ef.add_section(new_sect) segment.add_section(new_sect) ef.add_segment(segment) ef.add_segment(HeaderElfSegment(None)) ef = ef.prepare(32, '<')
def test_prepare(self): ef = UnpreparedElfFile() sect = UnpreparedElfSection(ef, "test_sect") symtab = UnpreparedElfSymbolTable(ef, ".symtab") for name in ["foo", "bar"]: symtab.add_symbol(ElfSymbol(name, sect)) ef.wordsize = 32 ef.endianess = "<" symtab = symtab.prepare(0x1000, 1, 0)
def test_add_reloc(self): ef = UnpreparedElfFile() ef.wordsize = 32 reloc_sect = UnpreparedElfReloc(ef) for offset in range(0, 0x10000, 0x1000): reloc = ElfReloc(offset) reloc_sect.add_reloc(reloc) # we expect to have the relocs we added self.assertEqual(len(reloc_sect.relocs), 16)
def test_add_section(self): ef = UnpreparedElfFile() sect = UnpreparedElfSection() ef.add_section(sect) self.assertEqual(ef.sections[-1], sect) seg = HeaderElfSegment() self.assertRaises(InvalidArgument, seg.add_section, sect) seg = DataElfSegment() self.assertRaises(InvalidArgument, seg.add_section, sect)
def test_copy_into(self): elf_from = UnpreparedElfFile() elf_to = UnpreparedElfFile() seg = DataElfSegment(elf_from, ByteArray("pants")) new_seg = seg.copy_into(elf_to) seg = SectionedElfSegment(elf_from) seg.sections = [UnpreparedElfSection(elf_from, "test")] new_seg = seg.copy_into(elf_to) seg = DataElfSegment(elf_from, ByteArray("pants")) seg._data = ByteArray() new_seg = seg.copy_into(elf_to) seg = DataElfSegment(elf_from, ByteArray("pants")) seg.prepare(34) new_seg = seg.copy_into(elf_to) seg = HeaderElfSegment(elf_from) new_seg = seg.copy_into(elf_to)
def list_relocs(self, src=None): if src is None: srcs = self.srcs else: srcs = [src] for src in srcs: print "# Offset Type Symdx Value" elf = UnpreparedElfFile( os.path.join(base_dir, 'build', src.replace(".s", ".o"))) for sec in filter(lambda s: s.type in (SHT_REL, SHT_RELA), elf.sections): for reloc in sec.relocs: print "%8.8x %8.8x %8.8x %8.8x" % \ (reloc.offset, reloc.type, reloc.symdx, sec.info.get_word_at(reloc.offset))
def set_kernel(self, kernel): """ Record the kernel.""" self.elf = UnpreparedElfFile() self.endianess = kernel.endianess self.wordsize = kernel.wordsize self.elf.elf_type = ET_EXEC self.elf.machine = kernel.machine self.elf.osabi = kernel.osabi self.elf.abiversion = kernel.abiversion self.elf.flags = kernel.flags self.elf.entry_point = kernel.entry_point if self.ph_offset is not None: self.elf.set_ph_offset(self.ph_offset, fixed=True)
def _update_elf_file(target, seglist, scripts, linker_type, orphaned_sections): # load the elf file elf = UnpreparedElfFile(filename=target) if elf.elf_type != ET_EXEC: return seglist = get_segment_names(elf, seglist, scripts, linker_type, orphaned_sections) add_segment_names(elf, seglist) elf = elf.prepare(elf.wordsize, elf.endianess) # write the file elf.to_filename(target)
def test_null_elf(self): for endianess, wordsize, expectedsize in \ [ ('<', 32, 52), ('>', 32, 52), ('<', 64, 64), ('>', 64, 64) ]: ef = UnpreparedElfFile() self.assertEqual(len(ef.sections), 1) self.assertEqual(ef.has_segments(), False) ef = ef.prepare(wordsize, endianess) f = File("test.elf", "wb") data = ef.todata() for offset, _dat in data: f.seek(offset) _dat.tofile(f) f.flush() self.assertEqual(f.size(), expectedsize) f.close() os.remove("test.elf")
def _get_segments(self): elf_file = UnpreparedElfFile() sections = [UnpreparedElfSection(elf_file, "test1", data=ByteArray("test1 data")), UnpreparedElfSection(elf_file, "test2", data=ByteArray("test2 data"))] empty_sec_seg = SectionedElfSegment(None) full_sec_seg = SectionedElfSegment(elf_file, sections=sections) head_seg = HeaderElfSegment(None) prep_head_seg = HeaderElfSegment(None) prep_head_seg.prepare(37, PROG_HEADER_SIZE) data = ByteArray("pants") full_data_seg = DataElfSegment(None, vaddr=DATA_BASE, paddr=PHYS_BASE, data=data) nobits_data_seg = DataElfSegment(None, vaddr=DATA_BASE, paddr=PHYS_BASE, data=data, memsz=10) return empty_sec_seg, full_sec_seg, head_seg, prep_head_seg, full_data_seg, nobits_data_seg
def test_remove_section(self): ef = UnpreparedElfFile() sect = UnpreparedElfSection(None) self.assertRaises(InvalidArgument, ef.remove_section, sect) ef.add_section(sect) self.assertEqual(sect in ef.sections, True) ef.remove_section(sect) self.assertEqual(sect in ef.sections, False) seg = SectionedElfSegment(None) ef.add_segment(seg) ef.add_section(sect) seg.add_section(sect) self.assertEqual(sect in ef.sections, True) self.assertEqual(sect in seg.sections, True) ef.remove_section(sect) self.assertEqual(sect in ef.sections, False) self.assertEqual(sect in seg.sections, False)
def test_binary_scatter_load(self): """ Scatter-load files should be the same as the segment size. This is different to objcopy, which leaves out the final BSS section, but binutils can't handle scatter load files anyway. """ test_files = ("data/arm_scatter_load", ) for tf in test_files: # Then run through elfweaver modify exit_value, ret_stdout, ret_stderr = self._run_command( [tf, "--binary", "-o", "test_output"]) self.assertEquals(exit_value, 0) # The file size should be the size as the segment size. elf = UnpreparedElfFile(filename=tf) self.assertEquals( os.stat("test_output").st_size, elf.segments[0].get_memsz())
def test_long_round_trip(self): for file_name in ["data/null_elf", "data/arm_exec", "data/arm_stripped_exec", "data/arm_exec_nosect", "data/arm_object", "data/arm_scatter_load", "data/mips64_exec", "data/mips32_exec", "data/ia32_exec", "data/amd64_exec", "data/ia64_exec", ]: ef = UnpreparedElfFile(filename=file_name) ef = ef.prepare(ef.wordsize, ef.endianess) ef.to_filename("elf.tmp") # FIXME: We can't be sure that the files produced will be byte for # byte equal at this point. We need to come up with a test for # equivalance independant of things such as section ordering. # self.assertEqual(open("elf.tmp", "rb").read(), open(file_name, "rb").read(), "%s: failed to read back correctly" % file_name) os.remove("elf.tmp")
def check_contents(self): print "[CONT] %s" % self.bin, elf = UnpreparedElfFile( filename=os.path.join(base_dir, 'build', self.bin)) text = elf.find_section_named(".text") try: # for offset in self.addrs: for offset, _ in self.relocs[self.bin].items(): if text.get_word_at(offset - text.address) != self.relocs[ self.bin][offset][1]: print " -> FAILED!" self.list_contents() raise Exception, \ ("ELF contents differs from expected! " + \ # "Got: %8.8x\nExpected: %8.8x") % (text.get_word_at(offset), "Got: %s\nExpected: %s") % (text.get_word_at(offset - text.address), self.relocs[self.bin][offset][1]) print " -> OK" except KeyError: print " -> FAILED!" self.list_contents()
def test_add_symbols(self): # Adding symbols to a file with no symbol table # should create a new symbol table symbols = [] ef = UnpreparedElfFile() self.assertEqual(ef.get_symbol_table(), None) self.assertEqual(ef.get_symbols(), []) ef.add_symbols(symbols) self.assertNotEqual(ef.get_symbol_table(), None) # Apart from the opening null symbol, we should have nothing self.assertEqual(ef.get_symbols()[1:], []) sect_a = UnpreparedElfSection(ef) symbols = [ElfSymbol("a", sect_a), ElfSymbol("b", sect_a)] ef.add_symbols(symbols) self.assertEqual(ef.get_symbols()[1:], symbols) sect_b = UnpreparedElfSection(ef) symbols_b = [ElfSymbol("c", sect_b), ElfSymbol("d", sect_b)] ef.add_symbols(symbols_b) self.assertEqual(ef.section_symbols(sect_a), symbols) self.assertEqual(ef.section_symbols(sect_b), symbols_b) symbol_dict = {} for name in ["foo", "bar"]: symbol = ElfSymbol(name, sect_a) symbol_dict[name] = symbol ef.add_symbols(symbol_dict.values()) for name in symbol_dict.keys(): #full string match self.assertEqual(ef.find_symbol(name), symbol_dict[name]) # partial suffix match self.assertEqual(ef.find_symbol(name[1:]), symbol_dict[name]) self.assertEqual(ef.find_symbol("missing"), None)
def collect_xml(self, iguana_el, ignore_name, namespace, machine, pools, kernel, image): """Handle an Iguana Server Compound Object""" cell = \ kernel.register_cell(iguana_el.name, iguana_el.kernel_heap, max_caps = getattr(iguana_el, "caps", None)) # New namespace for objects living in the root program's PD. ig_namespace = namespace.add_namespace(iguana_el.name) self.namespace = ig_namespace self.s_namespace = self.namespace self.name = iguana_el.name self.space = \ cell.register_space(self.namespace, "MAIN", is_privileged = True, max_clists = getattr(iguana_el, "clists", None), max_spaces = getattr(iguana_el, "spaces", None), max_mutexes = getattr(iguana_el, "mutexes", None), max_threads = getattr(iguana_el, "threads", None), plat_control = \ getattr(iguana_el, "platform_control", False)) self.setup_bootinfo_pools(namespace, pools) self.setup_device_namespace(namespace, machine) self.env = CellEnvironment(iguana_el.name, self.namespace, machine, image, kernel, self.space.mappings) cell.env = self.env pd = weaver.cells.iguana.bootinfo.RootServerPD(iguana_el.name, ig_namespace) # Declare the default memory pools. def_virtpool = getattr(iguana_el, "virtpool", pools.get_default_virtual_pool()) def_physpool = getattr(iguana_el, "physpool", pools.get_default_physical_pool()) def_pager = getattr(iguana_el, "pager", None) def_direct = getattr(iguana_el, "direct", None) # Record any IRQs that are assigned to Iguana for irq_el in iguana_el.find_children("irq"): self.space.register_irq(irq_el.value) self.bootinfo.set_system_default_attrs(def_virtpool, def_physpool, image, def_pager, def_direct) # Iguana is not aware of segment ids. # The old mapping API uses segment 0 for all mapping ops. # Segment 0 needs to cover all of physical memory. # Subsequent segments will be created but never used, # but must still be mapped! # XXX: VVVVV THIS IS PURE EVIL VVVVV physpool_attrs = \ image.new_attrs(self.s_namespace.add_namespace("physpool_hack")) physpool_attrs.phys_addr = 0 #first_phys_base physpool_attrs.size = 0xfffff000 #first_phys_end physpool_attrs.attach = PF_R | PF_W | PF_X physpool_attrs.cache_policy = 0xff # XXX: Define me properly physpool_attrs.mem_type = physpool_attrs.unmapped self.space.register_mapping(physpool_attrs) # XXX: ^^^^^ THIS IS PURE EVIL ^^^^^ filename = os.path.join(iguana_el._path, iguana_el.file) elf = UnpreparedElfFile(filename=filename) pd.set_default_pools(image, self.bootinfo) # Collect the object environment pd.add_env_ms(image, ig_namespace, machine, pools) env, extra_ms = \ collect_environment_element(iguana_el.find_child('environment'), ig_namespace, machine, pools, image, self.bootinfo) segment_els = iguana_el.find_children("segment") segs = collect_elf_segments(elf, image.ROOT_PROGRAM, segment_els, filename, [], ig_namespace, image, machine, pools) self.elf_prog_segments = segs for seg in segs: self.space.register_mapping(seg.attrs) if elf.elf_type != ET_EXEC: raise MergeError, "All the merged ELF files must be of EXEC type." # Find out which version of libokl4 that iguana was built # against sym = elf.find_symbol("okl4_api_version") if sym == None: raise MergeError("Unable to locate the symbol 'okl4_api_version' " "in file \"%s\". Cells must link with libokl4." % filename) self.api_version = elf.get_value(sym.value, sym.size, elf.endianess) if self.api_version == None: raise MergeError("Unable to read the symbol 'okl4_api_version' in " "file \"%s\". Cells must link with libokl4." % filename) # Record any patches being made to the program. patch_els = iguana_el.find_children("patch") for patch in getattr(Iguana_el, "extra_patches", []): addr = get_symbol(elf, patch[0], True) if addr == None: continue addr = int(addr[0])+ int(patch[1]) new_patch = Patch_el(address=hex(addr), bytes=patch[2], value=patch[3]) patch_els.append(new_patch) collect_patches(elf, patch_els, filename, image) for extension_el in iguana_el.find_children("extension"): if not ignore_name.match(extension_el.name): collect_extension_element(extension_el, pd, ig_namespace, elf, image, machine, self.bootinfo, pools) # Collect the main thread. The root program can only have one # thread, so this call chiefly is used to collect information # about the stack. # # The stack is not set up as a memsection, so it is not put in the # object environment. if not hasattr(iguana_el, 'priority'): iguana_el.priority = 255 thread = collect_thread(elf, iguana_el, ignore_name, ig_namespace, image, machine, pools, self.space, entry = elf.entry_point, name = 'iguana', namespace_thread_name = "main", cell_create_thread = True) pd.add_thread(thread) # Collect the heap. Is there no element, create a fake one for # the collection code to use. # # The heap is not set up as a memsection, so it is not put in the # object environment. heap_el = iguana_el.find_child('heap') if heap_el is None: heap_el = ParsedElement('heap') heap_ms = collect_memsection_element(heap_el, ignore_name, ig_namespace, image, machine, pools) pd.attach_heap(heap_ms) self.space.register_mapping(heap_ms.ms.attrs) self.space.register_mapping(thread.get_stack().get_ms().attrs) self.elf_segments.extend([pd.env_ms.get_ms(), thread.get_stack().get_ms(), heap_ms.get_ms()] + [ms.get_ms() for ms in extra_ms]) pd.add_environment(env) pd.utcb_size = image.utcb_size self.bootinfo.add_rootserver_pd(pd) # And now parse the programs and pd's collect_program_pd_elements(iguana_el, ignore_name, ig_namespace, image, machine, self.bootinfo, pools, kernel, cell)
def link(files, section_vaddr=None, kernel_soc=True, rvct=False, verbose=False, verbose_merge=False, verbose_script=False, verbose_relocs=False): """ Perform the actual link, split so that elfweaver merge can call this easily. """ # Handle merging of multiple files. # Use the first provided elf file as the base file. For each additonal file # merge the sections (.text -> .text) but do no other merging i.e. do not # merge any .text.foo into .text. Update the symbol table and any relocs # to take into account the merging then merge in the symbol table and # relocation sections. base_elf = UnpreparedElfFile(files[0]) base_elf.sections = [] if verbose: print "Using %s as base file" % files[0] base_sym_tab = None for merge_file in files: merge_elf = UnpreparedElfFile(merge_file) if verbose: print "Merging in file %s" % merge_file sym_tab = [ sym_tab for sym_tab in merge_elf.sections if sym_tab.type == SHT_SYMTAB ] # Things get really, really ugly if there is more than one symbol table, # fortunately sane compilers / linkers appear to only have one anyway. assert len(sym_tab) == 1 sym_tab = sym_tab[0] merged_sects = [] reloc_sects = [] ind = 1 if base_elf.sections == []: ind = 0 for sect in merge_elf.sections[ind:]: # Symbol table and relocations require more specific magic and get # handled later on if sect.type == SHT_SYMTAB: continue elif sect.type in (SHT_REL, SHT_RELA): reloc_sects.append(sect) continue found_sect = base_elf.find_section_named(sect.name) if found_sect == None: # Don't need to merge this section as there is no corrosponding # entry in the base file, so just go ahead and add it. base_elf.add_section(sect) if verbose_merge: print "\tAdding section %s" % sect.name continue merge_sections(found_sect, sect, merged_sects, None, verbose_merge) # Update any symbols or relocations that relied on a merged section # to correctly point at the new section at the correct offset if verbose: print "\tUpdating relocation sections with merged data" sym_tab.update_merged_sections(merged_sects) for sect in reloc_sects: sect.update_merged_sections(merged_sects) # Merge the symbol tables, this is more just tricky than any deep magic # * For each undefined symbol in the base file try to find a match # in the input file. If we find one then replace the base file's # symbol with the defined one. Keep a list of the mappings from the # input files symbols to the new base file symbol index. # * Merge the two symbol tables. For each symbol in the input file's # symbol table; # * If it is undefined, try to find a match in the base file's symbol # table. If found record the mapping from old symbol to new index. # * If it is defined or there is no match copy it over, again keeping # a mapping from old symbol to new index. # * Update all the relocations in the input file to correctly point at # the new symbol table and the correct symbol index. And merge in # the relocations sections if a section already exists or add them. if base_sym_tab: if verbose: print "\tMerging symbol tables" merged_syms = base_sym_tab.resolve(sym_tab) merged_syms += base_sym_tab.merge(sym_tab) for sect in reloc_sects: sect.update_merged_symbols(base_sym_tab, merged_syms) else: if verbose: print "\tAdding symbol table" base_elf.add_section(sym_tab) base_sym_tab = sym_tab for sect in reloc_sects: found_sect = base_elf.find_section_named(sect.name) if found_sect == None: base_elf.add_section(sect) if verbose_merge: print "\tAdding relocation section %s" % sect.name else: found_sect.append_relocs(sect.get_relocs()) if verbose_merge: print "\tMerging in relocation section %s" % sect.name # Now before we lay everything out we need to adjust the size of any # sections (such as the .bss or .got) that may increase in size due # to allocation of symbols, etc. if verbose: print "Allocating symbol and relocation data" base_sym_tab.allocate_symbols() reloc_sects = [] for sect in base_elf.sections: if sect.type in (SHT_REL, SHT_RELA): #pylint: disable-msg=E1103 sect.set_verbose(verbose_relocs) sect.allocate_relocs() reloc_sects.append(sect) # Do any linker scripty things we need to do. For the moment we either do # a standard link or a kernel+soc link, the actions performed are in python # functions currently but may be moved into external scripts later. if kernel_soc: if verbose: print "Performing a kernel + soc link, rvct", rvct kernel_link_func_types = KERNEL_SOC_LINKER_SCRIPT[base_elf.machine] if not rvct: kernel_link_func = kernel_link_func_types['gnu'] else: kernel_link_func = kernel_link_func_types['rvct'] segments, merged_sects, discarded_sects = \ perform_link(base_elf, base_sym_tab, kernel_link_func, section_vaddr, verbose_script) else: if verbose: print "Performing standard link" segments, merged_sects, discarded_sects = \ perform_link(base_elf, base_sym_tab, standard_link, section_vaddr, verbose_script) # Remove any symbols relating to discarded sections and update for any of # the merged sections if verbose: print "Updating symbols for discarded and merged sections" discarded_syms = base_sym_tab.update_discarded_sections(discarded_sects) base_sym_tab.update_merged_sections(merged_sects) if verbose: print "Updating relocation sections with new symbols" for sect in reloc_sects: sect.update_discarded_symbols(discarded_syms) sect.update_discarded_sections(discarded_sects) sect.update_merged_sections(merged_sects) # Segments don't have the correct flags yet, go through and update them # based on the types of the included sections. for seginfo in segments: flags = PF_R for sect in seginfo[2]: if sect.flags & SHF_WRITE: flags |= PF_W if sect.flags & SHF_EXECINSTR: flags |= PF_X seginfo[0] = flags for flags, base_addr, sects in segments: if len(sects) == 0: continue new_seg = SectionedElfSegment(base_elf, PT_LOAD, base_addr, base_addr, flags, SEGMENT_ALIGN, sections=sects) base_elf.add_segment(new_seg) if verbose: print "Adding segment to file" print "\tFlags %d (R %d, W %d, X %d)" % ( flags, flags & PF_R > 0, flags & PF_W > 0, flags & PF_X > 0) print "\tBase address %x" % base_addr print "\tSections", [sect.name for sect in sects] # All sections are laid out and at their final size, go through and update # symbol values to their final position if verbose: print "Updating symbols" base_sym_tab.update_symbols() relocated_all = True # Apply all the relocs we know how to handle relocs_remove = [] if verbose: print "Applying relocations" for sect in reloc_sects: if sect.apply(): relocs_remove.append(sect) else: relocated_all = False if verbose: print "Applied all relocations", relocated_all for sect in relocs_remove: base_elf.remove_section(sect) if relocated_all: # Set the ELF entry point to _start base_elf.entry_point = base_elf.find_symbol("_start").value # Set ELF type to an executable base_elf.elf_type = ET_EXEC if verbose: print "Setting entry point to %x" % base_elf.entry_point return base_elf
def modify_cmd(args): """Merge command call from main. This parses command line arguments and calls merge, which does all the main work.""" parser = optparse.OptionParser("%prog modify file [options]", add_help_option=0) parser.add_option("-H", "--help", action="help") parser.add_option("--adjust", nargs=2, dest="adjust", action="append", default=[]) parser.add_option("--physical", dest="physical", action="store_true") parser.add_option("--physical_entry", dest="physical_entry", action="store_true") parser.add_option("--change", dest="change", action="append", nargs=2, default=[]) parser.add_option("--merge_sections", dest="merge_sections", action="append", default=[]) parser.add_option("--remove_nobits", dest="remove_nobits", action="store_true") parser.add_option("-o", "--output", dest="output", metavar="FILE", help="Display the ELF file header") parser.add_option("--no-section-headers", dest="remove_section_headers", action="store_true") (options, args) = parser.parse_args(args) if len(args) != 1: parser.error("Must supply a file to operate on.") filename = args[0] elf = UnpreparedElfFile(filename=filename) for (field_desc, offset) in options.adjust: absolute = True if offset.startswith("+") or offset.startswith("-"): absolute = False offset = long(offset, 0) field_desc = field_desc.split(".") adjust(elf, field_desc, offset, absolute) for (field_desc, mod) in options.change: field_desc = field_desc.split(".") (old, new) = [long(x, 0) for x in mod.split("=")] change(elf, field_desc, old, new) for section_name in options.merge_sections: merge_sections(elf, section_name) if options.physical_entry: entry = elf.entry_point for segment in elf.segments: if segment.contains_vaddr(entry): offset = segment.vaddr - segment.paddr elf.entry_point = entry - offset if options.physical: for segment in elf.segments: offset = segment.vaddr - segment.paddr segment.vaddr = segment.paddr if segment.has_sections(): for section in segment.get_sections(): section.address -= offset if options.remove_nobits: elf.remove_nobits() if options.remove_section_headers: elf.remove_section_headers() elf = elf.prepare(elf.wordsize, elf.endianess) outfile = filename if options.output: outfile = options.output elf.to_filename(outfile)
def collect_program_element(program_el, ignore_name, namespace, image, machine, bootinfo, pools): """Collect the attributes of a program element.""" # New namespace for objects living in the program's PD. prog_namespace = namespace.add_namespace(program_el.name) pd = weaver.bootinfo.PD(program_el.name, prog_namespace, image, machine, pools) elf = UnpreparedElfFile(filename=program_el.file) if elf.elf_type != ET_EXEC: raise MergeError, "All the merged ELF files must be of EXEC type." bootinfo.add_elf_info(name = program_el.file, elf_type = image.PROGRAM, entry_point = elf.entry_point) pd.elf = elf virtpool = getattr(program_el, 'virtpool', None) physpool = getattr(program_el, 'physpool', None) direct = getattr(program_el, 'direct', None) pd.set_platform_control(getattr(program_el, "platform_control", False)) if hasattr(program_el, 'pager'): pager = make_pager_attr(program_el.pager) else: pager = None # Push the overriding attributes for the program. image.push_attrs(virtual = virtpool, physical = physpool, pager = pager, direct = direct) pd.set_default_pools(image, bootinfo) # Collect the object environment env = collect_environment_element(program_el.find_child('environment'), prog_namespace, machine, pools, image, bootinfo) segment_els = program_el.find_all_children("segment") segs = collect_elf_segments(elf, image.PROGRAM, segment_els, program_el.name, prog_namespace, image, machine, pools) segs_ms = [bootinfo.record_segment_info(program_el.name, seg, image, machine, pools) for seg in segs] for seg_ms in segs_ms: pd.attach_memsection(seg_ms) # Record any patches being made to the program. patch_els = program_el.find_children("patch") collect_patches(elf, patch_els, program_el.file, image) # Collect the main thread. thread = collect_thread(elf, program_el, ignore_name, prog_namespace, image, machine, pools, entry = elf.entry_point, name = program_el.name, namespace_thread_name = "main") pd.add_thread(thread) pd.set_server_thread(thread) # Add the virtual device elements # Virtual devices always get added to the global namespace because they # should be globally unique server_spawn_nvdevs = 0 dev_ns = namespace.root.get_namespace("dev") if dev_ns is None: raise MergeError, "Device namespace does not exist!" for v_el in program_el.find_children('virt_device'): virt_dev = pd.add_virt_dev(v_el.name, program_el.name, pd, thread, server_spawn_nvdevs) create_alias_cap(virt_dev, dev_ns) server_spawn_nvdevs += 1 # Record the main thread and its stack in the environment. env.add_entry(key = "MAIN", cap_name = 'main/master') env.add_entry(key = "MAIN/STACK", cap_name = 'main/stack/master', attach = thread.get_stack().get_attrs().attach) # If marked, sure that the program is exported to every # environment so that it can be found by other programs. # if hasattr(program_el, 'server'): bootinfo.add_server( key = program_el.server, cap_name = prog_namespace.abs_name('main') + '/stack/master') # Collect remaining threads. elf = pd.elf = pd.elf.prepare(image.wordsize, image.endianess) for thread_el in program_el.find_children('thread'): if not ignore_name.match(thread_el.name): thread = collect_thread(elf, thread_el, ignore_name, prog_namespace, image, machine, pools, entry = "_thread_start") pd.add_thread(thread) # Record the thread and its stack in the environment. env.add_entry(key = thread.get_name(), cap_name = thread.get_name() + '/master') env.add_entry(key = thread.get_name() + "/STACK", cap_name = thread.get_name() + '/stack/master', attach = thread.get_stack().get_attrs().attach) # Collect any other memsections in the program. for ms_el in program_el.find_children('memsection'): if not ignore_name.match(ms_el.name): ms = collect_memsection_element(ms_el, ignore_name, prog_namespace, image, machine, pools) pd.attach_memsection(ms) image.add_group(0, [ms.get_ms()]) env.add_entry(key = ms.get_name(), cap_name = ms.get_name() + '/master', attach = ms.get_attrs().attach) # Collect any zones in the program. for zone_el in program_el.find_children('zone'): (zone, non_zone_ms) = \ collect_zone_element(zone_el, ignore_name, prog_namespace, pools, image, bootinfo, machine) pd.attach_zone(zone) # Attach memsections that aren't part of the zone to the program. for ms in non_zone_ms: pd.attach_memsection(ms) image.add_group(0, [ms.get_ms()]) env.add_entry(key = ms.get_name(), cap_name = ms.get_name() + '/master', attach = ms.get_attrs().attach) # Collect the heap. Is there no element, create a fake one for # the collection code to use. heap_el = program_el.find_child('heap') if heap_el is None: heap_el = ParsedElement('heap') heap_ms = collect_memsection_element(heap_el, ignore_name, prog_namespace, image, machine, pools) pd.attach_heap(heap_ms) image.add_group(0, [heap_ms.get_ms()]) # Fill env with default values. env.add_entry(key = "HEAP", cap_name = 'heap/master', attach = heap_ms.get_attrs().attach) env.add_entry(key = "HEAP_BASE", base = heap_ms) env.add_entry(key = "HEAP_SIZE", value = heap_ms.get_attrs().size) pd.add_environment(env) bootinfo.add_pd(pd) image.pop_attrs()