Exemple #1
0
    def add_segment(self, segment_index, section_prefix,
                    segment, file_type, attrs, machine, pools):
        """Create a segment for inclusion in the image."""
        if not valid_segment(segment):
            return None

        # Remove any pathname components from the prefix.
        section_prefix = os.path.basename(section_prefix)

        # Prepare the image for inclusion.
        new_segment = segment.copy()

        # Align segments to the page boundary if is safe to do so.
        # RVCT tends to give very conservative alignment (1 word) to
        # segments that could be paged aligned.
        if new_segment.vaddr % machine.min_page_size() == 0 and \
               new_segment.align < machine.min_page_size():
            new_segment.align = machine.min_page_size()

        # Rename the sections in the segment, giving each the supplied
        # prefix
        if new_segment.has_sections():
            for section in new_segment.get_sections():
                assert section.link is None

                sec_name = section.name
                #strip GNU leading dots in section names
                if sec_name[0] == ".":
                    sec_name = sec_name[1:]

                section.name = "%s.%s" % (section_prefix, sec_name)
                if section_prefix != "kernel":
                    for symbol in section.symbols:
                        symbol.name = "%s-%s" % (section_prefix, symbol.name)
                self.elf.add_section(section)

        iseg = ImageSegment(new_segment, segment_index, file_type,
                            attrs, pools)

        if attrs.protected:
            if self.protected_segment is not None:
                raise MergeError, \
                      'Only one segment can be declared protected.  ' \
                      'Found "%s" and  "%s".' % \
                      (self.protected_segment.get_attrs().abs_name(),
                      attrs.abs_name())

            self.protected_segment = iseg

        # Kernel segments need to be at the start of the memory pools
        # to place them in a different list to keep track of them.
        if file_type == Image.KERNEL:
            self.kernel_segments.append(iseg)
        else:
            self.segments.append(iseg)

        self.elf.add_segment(new_segment)
        return iseg
Exemple #2
0
    def add_memsection(self, attrs, machine, pools):
        """
        Create a memsection for inclusion in the image.

        If the data or file attributes of 'attr' are non-None, then a
        ELF segment will be created, otherwise the memsection will
        will be included in the address layout process, but will be
        created at runtime by Iguana server.
        """
        new_segment = None
        in_image = False

        if attrs.file is not None or attrs.data is not None:
            if attrs.file is not None:
                the_file = open(attrs.file, 'r')
                data = ByteArray(the_file.read())
                the_file.close()
            else:
                data = attrs.data
                
            if attrs.size is not None and len(data) < attrs.size:
                data.extend([0] * (attrs.size - len(data)))

            attrs.size = data.buffer_info()[1] * data.itemsize

            sect = UnpreparedElfSection(attrs.name, SHT_PROGBITS,
                                        attrs.virt_addr,
                                        data = data,
                                        flags = SHF_WRITE | SHF_ALLOC)
            self.elf.add_section(sect)
            new_segment = SectionedElfSegment(PT_LOAD, attrs.virt_addr,
                                              attrs.phys_addr, PF_R | PF_W,
                                              machine.min_page_size(),
                                              sections=[sect])
            self.elf.add_segment(new_segment)
            in_image = True
            
        obj = ImageMemsection(new_segment, attrs, pools)

        # If the memsection has data that goes into the image, then
        # put it at the front of the list so that it will be near the
        # code segments.
        if in_image:
            self.memsections = [obj] + self.memsections
        else:
            self.memsections.append(obj)

        return obj
Exemple #3
0
    def add_segment(self, segment_index, section_prefix, segment, file_type,
                    attrs, machine, pools):
        """Create a segment for inclusion in the image."""
        if not valid_segment(segment):
            return None

        # Remove any pathname components from the prefix.
        section_prefix = os.path.basename(section_prefix)

        # Prepare the image for inclusion.
        new_segment = segment.copy_into(self.elf)

        # Align segments to the page boundary if is safe to do so.
        # RVCT tends to give very conservative alignment (1 word) to
        # segments that could be paged aligned.
        if new_segment.vaddr % machine.min_page_size() == 0 and \
               new_segment.align < machine.min_page_size():
            new_segment.align = machine.min_page_size()

        # Rename the sections in the segment, giving each the supplied
        # prefix
        if new_segment.has_sections():
            for section in new_segment.get_sections():
                assert section.link is None

                sec_name = section.name
                #strip GNU leading dots in section names
                if sec_name[0] == ".":
                    sec_name = sec_name[1:]

                section.name = "%s.%s" % (section_prefix, sec_name)
                # Add the program name as a prefix to all non-kernel
                # symbols, except for those symbols that can't have a
                # prefix added.
                if section_prefix != "kernel":
                    for symbol in self.elf.section_symbols(section):
                        if can_prefix_symbol(symbol):
                            symbol.name = "%s-%s" % (section_prefix,
                                                     symbol.name)
                            self.elf.get_symbol_table().link.add_string(
                                symbol.name)
        self.elf.add_segment(new_segment)

        iseg = ImageSegment(new_segment, segment_index, file_type, attrs,
                            pools)

        if attrs.protected:
            if self.protected_segment is not None:
                raise MergeError, \
                      'Only one segment can be declared protected.  ' \
                      'Found "%s" and  "%s".' % \
                      (self.protected_segment.get_attrs().abs_name(),
                      attrs.abs_name())

            self.protected_segment = iseg

        # Kernel segments need to be at the start of the memory pools
        # to place them in a different list to keep track of them.
        if file_type == Image.KERNEL:
            self.kernel_segments.append(iseg)
        else:
            self.segments.append(iseg)

        return iseg
Exemple #4
0
    def add_memsection(self, attrs, machine, pools, section_prefix=None):
        """
        Create a memsection for inclusion in the image.

        If the data or file attributes of 'attr' are non-None, then a
        ELF segment will be created, otherwise the memsection will
        will be included in the address layout process, but will be
        created at runtime by Iguana server.
        """
        new_segment = None
        in_image = False

        if attrs.file is not None or attrs.data is not None:
            if attrs.file is not None:
                the_file = open(attrs.file, 'r')
                data = ByteArray(the_file.read())
                the_file.close()
            else:
                data = attrs.data

            if attrs.size is not None and len(data) < attrs.size:
                data.extend([0] * (attrs.size - len(data)))

            attrs.size = data.buffer_info()[1] * data.itemsize

            if section_prefix:
                section_name = "%s.%s" % (section_prefix, attrs.ns_node.name)
            else:
                section_name = attrs.ns_node.name
            sect = UnpreparedElfSection(self.elf,
                                        section_name,
                                        SHT_PROGBITS,
                                        attrs.virt_addr,
                                        data=data,
                                        flags=SHF_WRITE | SHF_ALLOC)
            self.elf.add_section(sect)
            new_segment = SectionedElfSegment(self.elf,
                                              PT_LOAD,
                                              attrs.virt_addr,
                                              attrs.phys_addr,
                                              PF_R | PF_W,
                                              machine.min_page_size(),
                                              sections=[sect])
            self.elf.add_segment(new_segment)
            in_image = True
# This check should be added, but currently fails with iguana.
#         elif attrs.size is None:
#             raise MergeError, \
#                   'No size attribute given for memsection \"%s\".' % attrs.abs_name()

        obj = ImageMemsection(new_segment, attrs, pools)

        # If the memsection has data that goes into the image, then
        # put it at the front of the list so that it will be near the
        # code segments.
        if in_image:
            self.memsections = [obj] + self.memsections
        else:
            self.memsections.append(obj)

        return obj
Exemple #5
0
    def create_ops(self, kernel, elf, machine):
        self.patches = []
        kernel_data = ByteArray()
        data_vaddr = kernel.base_segment.segment.vaddr + \
                (_0(kernel.heap_attrs.phys_addr) - kernel.base_segment.segment.paddr)
        start_data_vaddr = data_vaddr
        mappings = self.map.Mappings()

        total_kernel_size = (_0(kernel.heap_attrs.phys_addr) + \
                             kernel.heap_attrs.size) - \
                             kernel.base_segment.segment.paddr
        page_size = self.largest_page(kernel.base_segment.segment.vaddr,
                                      total_kernel_size,
                                      machine.page_sizes)
        mappings.add_mapping(kernel.base_segment.segment.vaddr,
                kernel.base_segment.segment.paddr,
                page_size, 1, 0x0, weaver.machine.CACHE_POLICIES['default'])

        # hacked for arm at the moment
        if self.arch_lookup[self.cpu] == 'arm':
            # user helpers page
            if elf:
                user_helpers_initial_vaddr = elf.find_symbol("user_atomic_cmp_set").value
            else:
                user_helpers_initial_vaddr = 0
            user_paddr = user_helpers_initial_vaddr - kernel.base_segment.segment.vaddr + \
                kernel.base_segment.segment.paddr
            #print "user_helpers %x -> %x" % (kernel.base_segment.segment.vaddr-4096, user_paddr)
            mappings.add_mapping(kernel.base_segment.segment.vaddr - 4096,
                    user_paddr, 4096, 1, 0x1, weaver.machine.CACHE_POLICIES['default'])

        # Format: (name, prio, entry, sp, r0)
        # TODO: Give main thread cell_environment in r0
        threads = []

        # Collect live thread info
        for (name, cell) in kernel.cells.items():
            for space in cell.spaces:
                for thread in space.threads:
                    threads.append(("nanotest",
                                    thread.priority,
                                    thread.entry,
                                    thread.get_sp(),
                                    cell.get_mr1())) # cell env pointer
                for num, name, attrs in space.mappings:
                    if not attrs.need_mapping():
                        continue
                    #print "Mapping for (%d) %s:" % (num, name)
                    for m in self.get_mappings(attrs, machine.page_sizes,
                                               machine.min_page_size()):
                        mappings.add_mapping(*m)

        # Add initial mapping
        if kernel.base_segment.segment.paddr != kernel.base_segment.segment.vaddr:
            mappings.add_mapping(kernel.base_segment.segment.paddr,
                    kernel.base_segment.segment.paddr,
                1024 * 1024, 1, 0x0, weaver.machine.CACHE_POLICIES['default'])

        # Kernel driver mappings
        for (name, mem) in self.devices:
            # Patch variables
            if elf:
                # Ensure enough kernel pointers are defined
                base_name = "%s_mem" % name
                driver_max_index = len(mem) - 1
                i = 0
                while elf.find_symbol("%s%d" % (base_name, i)):
                    i += 1
                kernel_max_index = i - 1

                if driver_max_index != kernel_max_index:
                    raise MergeError, "%s driver: "\
                            "Kernel expected %d memory range%s, "\
                            "driver supplied %d" % \
                            (name, kernel_max_index+1,
                             ["s", ""][kernel_max_index==0],
                             driver_max_index+1)

            # Add mappings
            for (i, ms) in enumerate(mem):
                for m in self.get_mappings(ms.attrs, machine.page_sizes,
                                           machine.min_page_size()):
                    mappings.add_mapping(*m)
                self.patches.append(("%s_mem%d" % (name, i),
                                    _0(ms.attrs.virt_addr)))

        # Generate pagetable
        pagetable = mappings.to_data(_0(kernel.heap_attrs.phys_addr))

        # pagetable needs to be 16k aligned, so it must be the first thing in the kernel_data
        kernel_data += pagetable
        data_vaddr += len(pagetable)

        # Number of threads defaults to the defined constant, if less are
        # specifed, the difference will be allocated as spare threads
        assert(kernel.total_threads <= self.ABSOLUTE_MAX_THREADS)
        num_spare_threads = self.DEFAULT_KERNEL_MAX_THREADS - len(threads)
        num_threads = self.DEFAULT_KERNEL_MAX_THREADS

        thread_data, priority_table, priority_bitfield, free_thread_addr = \
                self.get_thread_data(data_vaddr, threads, num_spare_threads)
        tcb_data_vaddr = data_vaddr
        kernel_data += thread_data
        data_vaddr += len(thread_data)

        priority_table_addr = data_vaddr
        kernel_data += priority_table
        data_vaddr += len(priority_table)

        # Futexes
        futex_base_addr = data_vaddr
        futex_hash_slots = next_power_of_2((num_threads * 3) / 2)

        futex_hash_addr, futex_base_addr = \
                self.allocate_memory(kernel_data, futex_base_addr,
                                futex_hash_slots * 8)
        futex_pending_tags_addr, futex_base_addr = \
                self.allocate_memory(kernel_data, futex_base_addr, num_threads * 4)

        self.patches.extend(
                [("tcbs", tcb_data_vaddr),
                 ("max_tcbs", num_threads),
                 ("tcb_free_head", free_thread_addr),
                 ("priority_bitmap", priority_bitfield),
                 ("priority_heads", priority_table_addr),
                 ("futex_hash_slots", futex_hash_slots),
                 ("futex_hash_slots_lg2", int(log(futex_hash_slots, 2))),
                 ("futex_hash", futex_hash_addr),
                 ("futex_pending_tags", futex_pending_tags_addr),
                 ("kpagetable_phys", _0(kernel.heap_attrs.phys_addr)),
                 ])

        return kernel_data
Exemple #6
0
    def create_dynamic_segments(self, kernel, namespace, image, machine,
                                pools, base_segment):
        data = self.create_ops(kernel, None, machine)

        kernel.heap_attrs.size   = align_up(len(data),
                                            machine.min_page_size())
Exemple #7
0
    def create_ops(self, kernel, elf, machine):
        self.patches = []
        kernel_data = ByteArray()
        data_vaddr = kernel.base_segment.segment.vaddr + \
                (_0(kernel.heap_attrs.phys_addr) - kernel.base_segment.segment.paddr)
        start_data_vaddr = data_vaddr
        mappings = self.map.Mappings()

        total_kernel_size = (_0(kernel.heap_attrs.phys_addr) + \
                             kernel.heap_attrs.size) - \
                             kernel.base_segment.segment.paddr
        page_size = self.largest_page(kernel.base_segment.segment.vaddr,
                                      total_kernel_size, machine.page_sizes)
        mappings.add_mapping(kernel.base_segment.segment.vaddr,
                             kernel.base_segment.segment.paddr, page_size, 1,
                             0x0, weaver.machine.CACHE_POLICIES['default'])

        # hacked for arm at the moment
        if self.arch_lookup[self.cpu] == 'arm':
            # user helpers page
            if elf:
                user_helpers_initial_vaddr = elf.find_symbol(
                    "user_atomic_cmp_set").value
            else:
                user_helpers_initial_vaddr = 0
            user_paddr = user_helpers_initial_vaddr - kernel.base_segment.segment.vaddr + \
                kernel.base_segment.segment.paddr
            #print "user_helpers %x -> %x" % (kernel.base_segment.segment.vaddr-4096, user_paddr)
            mappings.add_mapping(kernel.base_segment.segment.vaddr - 4096,
                                 user_paddr, 4096, 1, 0x1,
                                 weaver.machine.CACHE_POLICIES['default'])

        # Format: (name, prio, entry, sp, r0)
        # TODO: Give main thread cell_environment in r0
        threads = []

        # Collect live thread info
        for (name, cell) in kernel.cells.items():
            for space in cell.spaces:
                for thread in space.threads:
                    threads.append(
                        ("nanotest", thread.priority, thread.entry,
                         thread.get_sp(), cell.get_mr1()))  # cell env pointer
                for num, name, attrs in space.mappings:
                    if not attrs.need_mapping():
                        continue
                    #print "Mapping for (%d) %s:" % (num, name)
                    for m in self.get_mappings(attrs, machine.page_sizes,
                                               machine.min_page_size()):
                        mappings.add_mapping(*m)

        # Add initial mapping
        if kernel.base_segment.segment.paddr != kernel.base_segment.segment.vaddr:
            mappings.add_mapping(kernel.base_segment.segment.paddr,
                                 kernel.base_segment.segment.paddr,
                                 1024 * 1024, 1, 0x0,
                                 weaver.machine.CACHE_POLICIES['default'])

        # Kernel driver mappings
        for (name, mem) in self.devices:
            # Patch variables
            if elf:
                # Ensure enough kernel pointers are defined
                base_name = "%s_mem" % name
                driver_max_index = len(mem) - 1
                i = 0
                while elf.find_symbol("%s%d" % (base_name, i)):
                    i += 1
                kernel_max_index = i - 1

                if driver_max_index != kernel_max_index:
                    raise MergeError, "%s driver: "\
                            "Kernel expected %d memory range%s, "\
                            "driver supplied %d" % \
                            (name, kernel_max_index+1,
                             ["s", ""][kernel_max_index==0],
                             driver_max_index+1)

            # Add mappings
            for (i, ms) in enumerate(mem):
                for m in self.get_mappings(ms.attrs, machine.page_sizes,
                                           machine.min_page_size()):
                    mappings.add_mapping(*m)
                self.patches.append(
                    ("%s_mem%d" % (name, i), _0(ms.attrs.virt_addr)))

        # Generate pagetable
        pagetable = mappings.to_data(_0(kernel.heap_attrs.phys_addr))

        # pagetable needs to be 16k aligned, so it must be the first thing in the kernel_data
        kernel_data += pagetable
        data_vaddr += len(pagetable)

        # Number of threads defaults to the defined constant, if less are
        # specifed, the difference will be allocated as spare threads
        assert (kernel.total_threads <= self.ABSOLUTE_MAX_THREADS)
        num_spare_threads = self.DEFAULT_KERNEL_MAX_THREADS - len(threads)
        num_threads = self.DEFAULT_KERNEL_MAX_THREADS

        thread_data, priority_table, priority_bitfield, free_thread_addr = \
                self.get_thread_data(data_vaddr, threads, num_spare_threads)
        tcb_data_vaddr = data_vaddr
        kernel_data += thread_data
        data_vaddr += len(thread_data)

        priority_table_addr = data_vaddr
        kernel_data += priority_table
        data_vaddr += len(priority_table)

        # Futexes
        futex_base_addr = data_vaddr
        futex_hash_slots = next_power_of_2((num_threads * 3) / 2)

        futex_hash_addr, futex_base_addr = \
                self.allocate_memory(kernel_data, futex_base_addr,
                                futex_hash_slots * 8)
        futex_pending_tags_addr, futex_base_addr = \
                self.allocate_memory(kernel_data, futex_base_addr, num_threads * 4)

        self.patches.extend([
            ("tcbs", tcb_data_vaddr),
            ("max_tcbs", num_threads),
            ("tcb_free_head", free_thread_addr),
            ("priority_bitmap", priority_bitfield),
            ("priority_heads", priority_table_addr),
            ("futex_hash_slots", futex_hash_slots),
            ("futex_hash_slots_lg2", int(log(futex_hash_slots, 2))),
            ("futex_hash", futex_hash_addr),
            ("futex_pending_tags", futex_pending_tags_addr),
            ("kpagetable_phys", _0(kernel.heap_attrs.phys_addr)),
        ])

        return kernel_data
Exemple #8
0
    def create_dynamic_segments(self, kernel, namespace, image, machine, pools,
                                base_segment):
        data = self.create_ops(kernel, None, machine)

        kernel.heap_attrs.size = align_up(len(data), machine.min_page_size())