예제 #1
0
    def write_struct(self, section):
        """Write the binary form of the segment mapping struct."""
        section.write_word(len(self.mappings))

        for (num, _, attrs) in self.mappings:
            #
            # Align the segments to nearest page boundaries. If we have to move
            # 'virt_addr' backwards, we need to compensate by increasing 'size'
            # by the same amount.
            #
            # This is also done in kernel.py when writing out kernel mapping
            # operations.
            #
            if attrs.virt_addr is None:
                virt_addr = -1
                size = _0(attrs.size)
            else:
                virt_addr = align_down(_0(attrs.virt_addr), self.min_page_size)
                alignment_slack = _0(attrs.virt_addr) - virt_addr
                size = _0(attrs.size) + alignment_slack

            size = align_up(size, self.min_page_size)

            section.write_word(virt_addr)
            section.write_word(num) # seg
            section.write_word(0) # offset
            section.write_word(size)
            section.write_word(attrs.attach)
            section.write_word(attrs.cache_policy)
예제 #2
0
    def write_struct(self, section):
        """Write the binary form of the PD struct."""

        # Construct utcb memsection
        if self.utcb_memsec == None: # Create and attach the memsection only once at first round
            self.utcb_memsec = CellEnvMemsec(self.machine, self.image,
                    _0(self.space.utcb.virt_addr), _0(self.space.utcb.size),
                    self.page_size, self.space.utcb.attach,
                    self.space.utcb.cache_policy)
            self.utcb_memsec.set_callbacks(self.elf, "_okl4_utcb_memsec_lookup", "_okl4_utcb_memsec_map", "_okl4_utcb_memsec_destroy")
            self.attach_memsection(self.utcb_memsec, PF_R | PF_W | PF_X)
        self.utcb_memsec.set_virt_base(self.virt_base + self.utcb_memsec_offset)
        self.utcb_memsec.list_node.set_virt_base(self.virt_base + self.utcb_list_node_offset)

        # Write mem container list.
        section.write_word(self.mem_list.virt_base)
        # Write parent clist (usually copied over from the attr).
        section.write_word(self.kclist)
        # Write kspace pointer.
        section.write_word(self.kspace)
        # Write out the memsec.
        self.utcb_memsec.write_struct(section)
        # Write out dict_next pointer.
        section.write_word(0)
        # Write out the thread pool pointer.
        section.write_word(self.virt_base + self.thread_pool_offset)
        # Write out the thread alloc pointer.
        section.write_word(self.virt_base + self.thread_alloc_offset)
        # Write out the default pager.
        section.write_word(0)
        # Write out the extension pointer.
        section.write_word(0)

        # Initialise the _init struct to 0 because it is not needed when the PD
        # is weaved.
        # Write out the kclistid pool pointer.
        section.write_word(0)
        # Write out the kspaceid pool pointer.
        section.write_word(0)
        # Write out the virtmem pool pointer.
        section.write_word(0)
        # Write out the utcb area virt item.
        for i in range(4):
            section.write_word(0)
        # Write out the kspace id.
        section.write_word(0)
        # Write out the kclist id.
        section.write_word(0)
        # Write out the utcb area pointer.
        section.write_word(0)
        # Write out the kclist pointer.
        section.write_word(0)
        # Write out the okl4 space.
        for i in range(6):
            section.write_word(0)
        # Initialise the thread pool memory to 0.
        for i in range(self.thread_pool_num_words):
            section.write_word(0)
        # Write out the thread bitmap allocator
        self.thread_allocator.write_struct(section)
예제 #3
0
 def encode(self):
     """Encode kernel init script create clist"""
     return ''.join((
         self.encode_hdr(KI_OP_CREATE_CLIST, 1, self.eop),
         self.encode_bitfield2_halfwords(_0(self.id),
                                         _0(self.max_caps))
         ))
예제 #4
0
    def write_struct(self, section):
        """Write the binary form of the segment mapping struct."""
        section.write_word(len(self.mappings))

        for (num, _, attrs) in self.mappings:
            #
            # Align the segments to nearest page boundaries. If we have to move
            # 'virt_addr' backwards, we need to compensate by increasing 'size'
            # by the same amount.
            #
            # This is also done in kernel.py when writing out kernel mapping
            # operations.
            #
            if attrs.virt_addr is None:
                virt_addr = -1
                size = _0(attrs.size)
            else:
                virt_addr = align_down(_0(attrs.virt_addr), self.min_page_size)
                alignment_slack = _0(attrs.virt_addr) - virt_addr
                size = _0(attrs.size) + alignment_slack

            size = align_up(size, self.min_page_size)

            section.write_word(virt_addr)
            section.write_word(num)  # seg
            section.write_word(0)  # offset
            section.write_word(size)
            section.write_word(attrs.attach)
            section.write_word(attrs.cache_policy)
예제 #5
0
 def encode(self):
     """Encode kernel init script initial id's"""
     return ''.join((
         self.encode_hdr(KI_OP_INIT_IDS, 2, self.eop),
         self.encode_bitfield2_halfwords(_0(self.spaces),
                                         _0(self.clists)),
         self.encode_word(_0(self.mutexes))
         ))
예제 #6
0
 def encode(self):
     """Encode kernel init script create heap"""
     return ''.join((
         self.encode_hdr(KI_OP_CREATE_HEAP, 1, self.eop),
         self.encode_base_and_size(_0(self.base),
                                   (align_up(_0(self.size), 0x1000)) >> \
                                   SHIFT_4K,
                                   SHIFT_4K)
         ))
예제 #7
0
 def encode(self):
     """Encode kernel init script create heap"""
     return ''.join((
         self.encode_hdr(KI_OP_CREATE_HEAP, 1, self.eop),
         self.encode_base_and_size(_0(self.base),
                                   (align_up(_0(self.size), 0x1000)) >> \
                                   SHIFT_4K,
                                   SHIFT_4K)
         ))
예제 #8
0
 def encode(self):
     """
     Encode kernel init script create thread handles,
     Size is 4-word multiple. Phys is 1K aligned.
     """
     return ''.join(
         (self.encode_hdr(KI_OP_CREATE_THREAD_HANDLES, 1, self.eop),
          self.encode_base_and_size(_0(self.phys),
                                    align_up(_0(self.size), 4) / 4,
                                    SHIFT_1K)))
예제 #9
0
 def encode(self):
     """
     Encode kernel init script create thread handles,
     Size is 4-word multiple. Phys is 1K aligned.
     """
     return ''.join((
         self.encode_hdr(KI_OP_CREATE_THREAD_HANDLES, 1, self.eop),
         self.encode_base_and_size(_0(self.phys),
                                   align_up(_0(self.size), 4) / 4,
                                   SHIFT_1K)
         ))
예제 #10
0
    def write_struct(self, section):
        """Write the virtmem item."""
        section.write_word(_0(self.attrs.virt_addr)) # base
        section.write_word(_0(self.attrs.size)) # size
        section.write_word(0) # total_size
        section.write_word(0) # next

        # Write the allocator struct.
        allocator = CellEnvRangeItem(self.machine, self.image,
                _0(self.attrs.virt_addr), 0, _0(self.attrs.size))
        allocator.write_struct(section)

        # rite the parent pointer.
        section.write_word(0) # parent
예제 #11
0
    def write_struct(self, section):
        """Write the virtmem item."""
        section.write_word(_0(self.attrs.virt_addr))  # base
        section.write_word(_0(self.attrs.size))  # size
        section.write_word(0)  # total_size
        section.write_word(0)  # next

        # Write the allocator struct.
        allocator = CellEnvRangeItem(self.machine, self.image,
                                     _0(self.attrs.virt_addr), 0,
                                     _0(self.attrs.size))
        allocator.write_struct(section)

        # rite the parent pointer.
        section.write_word(0)  # parent
예제 #12
0
    def get_mappings(self, _attrs, page_sizes, minimum):
        """
        We run through all valid page sizes and try to map with a
        page size as large as possible.
        We return a list of mapping n-tuples.
        """

        # We are going to be changing attrs, so copy it first
        attrs = copy(_attrs)
        mappings = []
        # Find biggest page size we can use
        attrs.size = _0(attrs.size)
        for page_size in page_sizes:
            if attrs.size == 0:
                break
            if page_size > attrs.size and page_size != minimum:
                continue
            else:
                #print "phys_addr %x mapping size %x, page size %x" % \
                #        (_0(attrs.phys_addr), attrs.size, page_size)
                mapping, ret_size = self.create_mapping(
                    attrs, attrs.size, page_size, page_size == minimum)
                # Ensure phys addr is aligned to page size
                assert (mapping[1] % page_size == 0)
                mappings.append(mapping)
                attrs.size -= ret_size
                if attrs.phys_addr:
                    attrs.phys_addr += ret_size
                if attrs.virt_addr:
                    attrs.virt_addr += ret_size

        return mappings
예제 #13
0
파일: kernel_nano.py 프로젝트: BruceYi/okl4
    def get_mappings(self, _attrs, page_sizes, minimum):
        """
        We run through all valid page sizes and try to map with a
        page size as large as possible.
        We return a list of mapping n-tuples.
        """

        # We are going to be changing attrs, so copy it first
        attrs = copy(_attrs)
        mappings = []
        # Find biggest page size we can use
        attrs.size = _0(attrs.size)
        for page_size in page_sizes:
            if attrs.size == 0:
                break
            if page_size > attrs.size and page_size != minimum:
                continue
            else:
                #print "phys_addr %x mapping size %x, page size %x" % \
                #        (_0(attrs.phys_addr), attrs.size, page_size)
                mapping, ret_size = self.create_mapping(attrs, attrs.size,
                                                        page_size,
                                                        page_size==minimum)
                # Ensure phys addr is aligned to page size
                assert(mapping[1] % page_size == 0)
                mappings.append(mapping)
                attrs.size -= ret_size
                if attrs.phys_addr:
                    attrs.phys_addr += ret_size
                if attrs.virt_addr:
                    attrs.virt_addr += ret_size

        return mappings
예제 #14
0
    def write_struct(self, section):
        section.write_word(_0(self.space.utcb.virt_addr))
        section.write_word(self.space.utcb.size)
        section.write_word(0)
        section.write_word(0)

        # Write the bitmap allocator
        self.bitmap_allocator.write_struct(section)
예제 #15
0
    def write_struct(self, section):
        section.write_word(_0(self.space.utcb.virt_addr))
        section.write_word(self.space.utcb.size)
        section.write_word(0)
        section.write_word(0)

        # Write the bitmap allocator
        self.bitmap_allocator.write_struct(section)
예제 #16
0
    def write_struct(self, section):
        """Write the physmem item."""
        (num, _, attrs) = self.mapping

        phys = CellEnvPhysmemItem(self.machine, self.image, num, 0,
                _0(attrs.size), _0(attrs.phys_addr))
        phys.write_struct(section)

        # Write the page size.
        section.write_word(self.min_page_size)

        # Write the allocator structure.
        allocator = CellEnvRangeItem(self.machine, self.image,
                0, 0, _0(attrs.size))
        allocator.write_struct(section)

        # Write the parent pointer.
        section.write_word(0) # parent
예제 #17
0
    def write_struct(self, section):
        """Write the physmem item."""
        (num, _, attrs) = self.mapping

        phys = CellEnvPhysmemItem(self.machine, self.image, num, 0,
                                  _0(attrs.size), _0(attrs.phys_addr))
        phys.write_struct(section)

        # Write the page size.
        section.write_word(self.min_page_size)

        # Write the allocator structure.
        allocator = CellEnvRangeItem(self.machine, self.image, 0, 0,
                                     _0(attrs.size))
        allocator.write_struct(section)

        # Write the parent pointer.
        section.write_word(0)  # parent
예제 #18
0
    def write_struct(self, section):
        """Write the segment ID and physical address."""
        section.write_word(self.segment)  # segment_id
        section.write_word(self.paddr)  # paddr

        # Write the range item.
        section.write_word(0)  # base
        section.write_word(_0(self.size))  # size
        section.write_word(0)  # total_size
        section.write_word(0)
예제 #19
0
    def write_struct(self, section):
        """Write the segment ID and physical address."""
        section.write_word(self.segment) # segment_id
        section.write_word(self.paddr) # paddr

        # Write the range item.
        section.write_word(0) # base
        section.write_word(_0(self.size)) # size
        section.write_word(0) # total_size
        section.write_word(0)
예제 #20
0
    def write_struct(self, section):
        section.write_word(self.thread.cap_slot) # thread's cap
        section.write_word(self.thread.get_sp()) # sp of the thread
        section.write_word(self.thread.entry) # ip of the thread
        section.write_word(self.thread.priority) # priority of the thread

        if self.kernel_type == KERNEL_MICRO:
            section.write_word(self.kspace) # kspace of the thread
            section.write_word(_0(self.thread.utcb.virt_addr)) # utcb
            section.write_word(-3) # pager of thread
            section.write_word(self.next) #next
예제 #21
0
    def write_struct(self, section):
        section.write_word(self.thread.cap_slot)  # thread's cap
        section.write_word(self.thread.get_sp())  # sp of the thread
        section.write_word(self.thread.entry)  # ip of the thread
        section.write_word(self.thread.priority)  # priority of the thread

        if self.kernel_type == KERNEL_MICRO:
            section.write_word(self.kspace)  # kspace of the thread
            section.write_word(_0(self.thread.utcb.virt_addr))  # utcb
            section.write_word(-3)  # pager of thread
            section.write_word(self.next)  #next
예제 #22
0
    def create_mapping(self, attrs, remaining_size, page_size, is_minimum):
        """
        Map as many pages for the specified page size.
        Return the mapping n-tuple and the size of the mapping.
        If page size is the minimum page size, we need to worry about
        some extra checks.
        """

        # If minimum page size, we need to consider some extra conditions
        if is_minimum:
            phys_addr = align_down(_0(attrs.phys_addr), page_size)
            virt_addr = align_down(_0(attrs.virt_addr), page_size)

            # Calculate the shift cause by aligning the phys_addr
            alignment_diff = 0
            if attrs.phys_addr is not None:
                alignment_diff = attrs.phys_addr - phys_addr

            size = 0
            num_pages = 0
            if attrs.size != None:
                # In certain cases, phys alignments can leave us a
                # page short. To account for this we add alignment
                # differences to the size.
                size = align_up(remaining_size + alignment_diff, page_size)
                num_pages = size / page_size

        # for all other pages, we map as many as we can
        else:
            phys_addr = _0(attrs.phys_addr)
            virt_addr = _0(attrs.virt_addr)
            size = 0

            if attrs.size != None:
                num_pages = remaining_size / page_size
                size = num_pages * page_size

        #print "ceating mapping: size %x, pages %x" % (size, num_pages)
        mapping = (virt_addr, phys_addr, page_size, num_pages, attrs.attach,
                   attrs.cache_policy)
        return mapping, size
예제 #23
0
파일: kernel_nano.py 프로젝트: BruceYi/okl4
    def create_mapping(self, attrs, remaining_size, page_size, is_minimum):
        """
        Map as many pages for the specified page size.
        Return the mapping n-tuple and the size of the mapping.
        If page size is the minimum page size, we need to worry about
        some extra checks.
        """

        # If minimum page size, we need to consider some extra conditions
        if is_minimum:
            phys_addr = align_down(_0(attrs.phys_addr), page_size)
            virt_addr = align_down(_0(attrs.virt_addr), page_size)

            # Calculate the shift cause by aligning the phys_addr
            alignment_diff = 0
            if attrs.phys_addr is not None:
                alignment_diff = attrs.phys_addr - phys_addr

            size = 0
            num_pages = 0
            if attrs.size != None:
                # In certain cases, phys alignments can leave us a
                # page short. To account for this we add alignment
                # differences to the size.
                size = align_up(remaining_size + alignment_diff, page_size)
                num_pages = size / page_size

        # for all other pages, we map as many as we can
        else:
            phys_addr = _0(attrs.phys_addr)
            virt_addr = _0(attrs.virt_addr)
            size = 0

            if attrs.size != None:
                num_pages = remaining_size / page_size
                size = num_pages * page_size

        #print "ceating mapping: size %x, pages %x" % (size, num_pages)
        mapping = (virt_addr, phys_addr, page_size, num_pages,
                   attrs.attach, attrs.cache_policy)
        return mapping, size
예제 #24
0
 def encode(self):
     """Encode kernel init script create thread"""
     return ''.join(
         (self.encode_hdr(KI_OP_CREATE_THREAD, 5, self.eop),
          self.encode_bitfield2_halfwords(_0(self.cap_slot), _0(self.prio)),
          self.encode_word(_0(self.ip)), self.encode_word(_0(self.sp)),
          self.encode_word(_0(self.utcb_addr)),
          self.encode_word(_0(self.mr1))))
예제 #25
0
 def encode(self):
     """Encode kernel init script create thread"""
     return ''.join((
         self.encode_hdr(KI_OP_CREATE_THREAD, 5, self.eop),
         self.encode_bitfield2_halfwords(_0(self.cap_slot),
                                         _0(self.prio)),
         self.encode_word(_0(self.ip)),
         self.encode_word(_0(self.sp)),
         self.encode_word(_0(self.utcb_addr)),
         self.encode_word(_0(self.mr1))
         ))
예제 #26
0
    def create_ops(self, kernel, image, machine):
        """ Create in init script for Micro kernel initialisation. """

        op_list = []
        offset = [0]

        def add_op(op_func, *args):
            op = op_func(None, None, None, (args), image, machine)
            op_list.append(op)

            my_offset = offset[0]
            offset[0] += op.sizeof()

            return my_offset

        f = StringIO()

        # We just use the cells in order, hopefully the first cell has a
        # large enough heap for soc/kernel.  No longer do sorting
        cells = kernel.cells.values()

        ## PHASE ONE ##
        add_op(InitScriptHeader, [])

        add_op(InitScriptCreateHeap, _0(cells[0].heap_phys_base),
               cells[0].heap_size)

        # Declare total sizes.  The must be a minimum of 1.
        add_op(InitScriptInitIds, max(kernel.total_spaces, 1),
               max(kernel.total_clists, 1), max(kernel.total_mutexes, 1))

        needs_heap = False

        add_op(InitScriptCreateThreadHandles, _0(kernel.thread_array_base),
               kernel.thread_array_count)
        op_list[-1].set_eop()

        ## PHASE TWO ##
        for cell in cells:
            # No need to encode the heap of the first cell.
            if needs_heap:
                add_op(InitScriptCreateHeap, _0(cell.heap_phys_base),
                       cell.heap_size)

            else:
                needs_heap = True

            cell.clist_offset = \
                              add_op(InitScriptCreateClist,
                                     cell.clist_id, cell.max_caps)

            for space in cell.get_static_spaces():
                utcb_base = 0xdeadbeef  # something obvious if we ever use it!
                utcb_size = 0x11

                if space.utcb is not None:
                    utcb_base = space.utcb.virt_addr
                    if utcb_base is None:
                        utcb_base = 0
                        utcb_size = 0
                    else:
                        utcb_size = int(log(space.utcb.size, 2))

                add_op(
                    InitScriptCreateSpace,
                    space.id,
                    space.space_id_base,
                    _0(space.max_spaces),
                    space.clist_id_base,
                    _0(space.max_clists),
                    space.mutex_id_base,
                    _0(space.max_mutexes),
                    space.max_phys_segs,
                    utcb_base,
                    utcb_size,
                    space.is_privileged,
                    #XXX: A space's max priority is currently hardcoded!
                    #XXX: For now, use the kernel's max priority instead.
                    self.MAX_PRIORITY)
                #space.max_priority)

                # Grant the space access to the platform control
                # system call.
                if space.plat_control:
                    add_op(InitScriptAllowPlatformControl, [])

                # Assign any irqs to the space.
                for irq in space.irqs:
                    add_op(InitScriptAssignIrq, irq)

                for thread in space.get_static_threads():
                    # FIXME: Need to deal with entry and user_start
                    thread.offset = \
                                  add_op(InitScriptCreateThread,
                                         thread.cap_slot,
                                         thread.priority,
                                         thread.entry, thread.get_sp(),
                                         utcb_base, cell.get_mr1())

                for mutex in space.get_static_mutexes():
                    mutex.offset = \
                                 add_op(InitScriptCreateMutex, mutex.id)

                for (num, name, attrs) in space.mappings:
                    map_pg_sz = machine.min_page_size()
                    map_pg_sz_log2 = int(log(map_pg_sz, 2))

                    phys_addr = align_down(_0(attrs.phys_addr), map_pg_sz)
                    virt_addr = align_down(_0(attrs.virt_addr), map_pg_sz)

                    # Calculate the shift cause by aligning the phys_addr
                    alignment_diff = 0

                    if attrs.has_phys_addr():
                        alignment_diff = attrs.phys_addr - phys_addr

                    size = 0
                    num_pages = 0
                    if attrs.size != None:
                        # In certain cases, phys alignments can leave us a
                        # page short. To account for this we add alignment
                        # differences to the size.
                        size = align_up(attrs.size + alignment_diff, map_pg_sz)
                        num_pages = size / map_pg_sz

                    # Attributes are 0xff => All cache policies are valid!
                    if attrs.has_phys_addr():
                        add_op(InitScriptCreateSegment, num, phys_addr, 0xff,
                               size, attrs.attach)

                    if attrs.need_mapping():
                        add_op(InitScriptMapMemory, num, 0, attrs.attach,
                               map_pg_sz_log2, num_pages, attrs.cache_policy,
                               virt_addr)

        # Dump any caps
        for cell in cells:
            for space in cell.get_static_spaces():
                for cap in space.ipc_caps:
                    add_op(InitScriptCreateIpcCap, _0(cap.clist.clist_offset),
                           _0(cap.cap_slot), cap.obj.offset)

                for cap in space.mutex_caps:
                    add_op(InitScriptCreateMutexCap,
                           _0(cap.clist.clist_offset), _0(cap.cap_slot),
                           cap.obj.offset)

        op_list[-1].set_eop()

        f.write(''.join([op.encode() for op in op_list]))
        return f
예제 #27
0
    def create_ops(self, kernel, image, machine):
        """ Create in init script for Micro kernel initialisation. """

        op_list = []
        offset = [0]

        def add_op(op_func, *args):
            op = op_func(None, None, None, (args), image, machine)
            op_list.append(op)

            my_offset = offset[0]
            offset[0] += op.sizeof()

            return my_offset

        f = StringIO()

        # We just use the cells in order, hopefully the first cell has a
        # large enough heap for soc/kernel.  No longer do sorting
        cells = kernel.cells.values()

        ## PHASE ONE ##
        add_op(InitScriptHeader, [])

        add_op(InitScriptCreateHeap, _0(cells[0].heap_phys_base), cells[0].heap_size)

        # Declare total sizes.  The must be a minimum of 1.
        add_op(
            InitScriptInitIds, max(kernel.total_spaces, 1), max(kernel.total_clists, 1), max(kernel.total_mutexes, 1)
        )

        needs_heap = False

        add_op(InitScriptCreateThreadHandles, _0(kernel.thread_array_base), kernel.thread_array_count)
        op_list[-1].set_eop()

        ## PHASE TWO ##
        for cell in cells:
            # No need to encode the heap of the first cell.
            if needs_heap:
                add_op(InitScriptCreateHeap, _0(cell.heap_phys_base), cell.heap_size)

            else:
                needs_heap = True

            cell.clist_offset = add_op(InitScriptCreateClist, cell.clist_id, cell.max_caps)

            for space in cell.get_static_spaces():
                utcb_base = 0xDEADBEEF  # something obvious if we ever use it!
                utcb_size = 0x11

                if space.utcb is not None:
                    utcb_base = space.utcb.virt_addr
                    if utcb_base is None:
                        utcb_base = 0
                        utcb_size = 0
                    else:
                        utcb_size = int(log(space.utcb.size, 2))

                add_op(
                    InitScriptCreateSpace,
                    space.id,
                    space.space_id_base,
                    _0(space.max_spaces),
                    space.clist_id_base,
                    _0(space.max_clists),
                    space.mutex_id_base,
                    _0(space.max_mutexes),
                    space.max_phys_segs,
                    utcb_base,
                    utcb_size,
                    space.is_privileged,
                    # XXX: A space's max priority is currently hardcoded!
                    # XXX: For now, use the kernel's max priority instead.
                    self.MAX_PRIORITY,
                )
                # space.max_priority)

                # Grant the space access to the platform control
                # system call.
                if space.plat_control:
                    add_op(InitScriptAllowPlatformControl, [])

                # Assign any irqs to the space.
                for irq in space.irqs:
                    add_op(InitScriptAssignIrq, irq)

                for thread in space.get_static_threads():
                    # FIXME: Need to deal with entry and user_start
                    thread.offset = add_op(
                        InitScriptCreateThread,
                        thread.cap_slot,
                        thread.priority,
                        thread.entry,
                        thread.get_sp(),
                        utcb_base,
                        cell.get_mr1(),
                    )

                for mutex in space.get_static_mutexes():
                    mutex.offset = add_op(InitScriptCreateMutex, mutex.id)

                for (num, name, attrs) in space.mappings:
                    map_pg_sz = machine.min_page_size()
                    map_pg_sz_log2 = int(log(map_pg_sz, 2))

                    phys_addr = align_down(_0(attrs.phys_addr), map_pg_sz)
                    virt_addr = align_down(_0(attrs.virt_addr), map_pg_sz)

                    # Calculate the shift cause by aligning the phys_addr
                    alignment_diff = 0

                    if attrs.has_phys_addr():
                        alignment_diff = attrs.phys_addr - phys_addr

                    size = 0
                    num_pages = 0
                    if attrs.size != None:
                        # In certain cases, phys alignments can leave us a
                        # page short. To account for this we add alignment
                        # differences to the size.
                        size = align_up(attrs.size + alignment_diff, map_pg_sz)
                        num_pages = size / map_pg_sz

                    # Attributes are 0xff => All cache policies are valid!
                    if attrs.has_phys_addr():
                        add_op(InitScriptCreateSegment, num, phys_addr, 0xFF, size, attrs.attach)

                    if attrs.need_mapping():
                        add_op(
                            InitScriptMapMemory,
                            num,
                            0,
                            attrs.attach,
                            map_pg_sz_log2,
                            num_pages,
                            attrs.cache_policy,
                            virt_addr,
                        )

        # Dump any caps
        for cell in cells:
            for space in cell.get_static_spaces():
                for cap in space.ipc_caps:
                    add_op(InitScriptCreateIpcCap, _0(cap.clist.clist_offset), _0(cap.cap_slot), cap.obj.offset)

                for cap in space.mutex_caps:
                    add_op(InitScriptCreateMutexCap, _0(cap.clist.clist_offset), _0(cap.cap_slot), cap.obj.offset)

        op_list[-1].set_eop()

        f.write("".join([op.encode() for op in op_list]))
        return f
예제 #28
0
 def encode(self):
     """Encode kernel init script create mutex"""
     return ''.join((
         self.encode_hdr(KI_OP_CREATE_MUTEX, 1, self.eop),
         self.encode_word(_0(self.id))
         ))
예제 #29
0
 def encode(self):
     """Encode kernel init script create space"""
     return ''.join((
         self.encode_hdr(KI_OP_CREATE_SPACE, 6, self.eop),
         self.encode_word(_0(self.id)),
         self.encode_bitfield2_halfwords(_0(self.space_base),
                                         _0(self.space_num)),
         self.encode_bitfield2_halfwords(_0(self.clist_base),
                                         _0(self.clist_num)),
         self.encode_bitfield2_halfwords(_0(self.mutex_base),
                                         _0(self.mutex_num)),
         self.encode_base_and_size(_0(self.utcb_base),
                                   _0(self.utcb_log2_size),
                                   SHIFT_1K),
         self.encode_word((_0(self.max_priority) << (_0(self.halfword) + 1)) | \
                              (_0(self.has_kresources) << (_0(self.halfword)))   | \
                              (_0(self.max_phys_seg)))
         ))
예제 #30
0
    def create_ops(self, kernel, elf, machine):
        self.patches = []
        kernel_data = ByteArray()
        data_vaddr = kernel.base_segment.segment.vaddr + \
                (_0(kernel.heap_attrs.phys_addr) - kernel.base_segment.segment.paddr)
        start_data_vaddr = data_vaddr
        mappings = self.map.Mappings()

        total_kernel_size = (_0(kernel.heap_attrs.phys_addr) + \
                             kernel.heap_attrs.size) - \
                             kernel.base_segment.segment.paddr
        page_size = self.largest_page(kernel.base_segment.segment.vaddr,
                                      total_kernel_size, machine.page_sizes)
        mappings.add_mapping(kernel.base_segment.segment.vaddr,
                             kernel.base_segment.segment.paddr, page_size, 1,
                             0x0, weaver.machine.CACHE_POLICIES['default'])

        # hacked for arm at the moment
        if self.arch_lookup[self.cpu] == 'arm':
            # user helpers page
            if elf:
                user_helpers_initial_vaddr = elf.find_symbol(
                    "user_atomic_cmp_set").value
            else:
                user_helpers_initial_vaddr = 0
            user_paddr = user_helpers_initial_vaddr - kernel.base_segment.segment.vaddr + \
                kernel.base_segment.segment.paddr
            #print "user_helpers %x -> %x" % (kernel.base_segment.segment.vaddr-4096, user_paddr)
            mappings.add_mapping(kernel.base_segment.segment.vaddr - 4096,
                                 user_paddr, 4096, 1, 0x1,
                                 weaver.machine.CACHE_POLICIES['default'])

        # Format: (name, prio, entry, sp, r0)
        # TODO: Give main thread cell_environment in r0
        threads = []

        # Collect live thread info
        for (name, cell) in kernel.cells.items():
            for space in cell.spaces:
                for thread in space.threads:
                    threads.append(
                        ("nanotest", thread.priority, thread.entry,
                         thread.get_sp(), cell.get_mr1()))  # cell env pointer
                for num, name, attrs in space.mappings:
                    if not attrs.need_mapping():
                        continue
                    #print "Mapping for (%d) %s:" % (num, name)
                    for m in self.get_mappings(attrs, machine.page_sizes,
                                               machine.min_page_size()):
                        mappings.add_mapping(*m)

        # Add initial mapping
        if kernel.base_segment.segment.paddr != kernel.base_segment.segment.vaddr:
            mappings.add_mapping(kernel.base_segment.segment.paddr,
                                 kernel.base_segment.segment.paddr,
                                 1024 * 1024, 1, 0x0,
                                 weaver.machine.CACHE_POLICIES['default'])

        # Kernel driver mappings
        for (name, mem) in self.devices:
            # Patch variables
            if elf:
                # Ensure enough kernel pointers are defined
                base_name = "%s_mem" % name
                driver_max_index = len(mem) - 1
                i = 0
                while elf.find_symbol("%s%d" % (base_name, i)):
                    i += 1
                kernel_max_index = i - 1

                if driver_max_index != kernel_max_index:
                    raise MergeError, "%s driver: "\
                            "Kernel expected %d memory range%s, "\
                            "driver supplied %d" % \
                            (name, kernel_max_index+1,
                             ["s", ""][kernel_max_index==0],
                             driver_max_index+1)

            # Add mappings
            for (i, ms) in enumerate(mem):
                for m in self.get_mappings(ms.attrs, machine.page_sizes,
                                           machine.min_page_size()):
                    mappings.add_mapping(*m)
                self.patches.append(
                    ("%s_mem%d" % (name, i), _0(ms.attrs.virt_addr)))

        # Generate pagetable
        pagetable = mappings.to_data(_0(kernel.heap_attrs.phys_addr))

        # pagetable needs to be 16k aligned, so it must be the first thing in the kernel_data
        kernel_data += pagetable
        data_vaddr += len(pagetable)

        # Number of threads defaults to the defined constant, if less are
        # specifed, the difference will be allocated as spare threads
        assert (kernel.total_threads <= self.ABSOLUTE_MAX_THREADS)
        num_spare_threads = self.DEFAULT_KERNEL_MAX_THREADS - len(threads)
        num_threads = self.DEFAULT_KERNEL_MAX_THREADS

        thread_data, priority_table, priority_bitfield, free_thread_addr = \
                self.get_thread_data(data_vaddr, threads, num_spare_threads)
        tcb_data_vaddr = data_vaddr
        kernel_data += thread_data
        data_vaddr += len(thread_data)

        priority_table_addr = data_vaddr
        kernel_data += priority_table
        data_vaddr += len(priority_table)

        # Futexes
        futex_base_addr = data_vaddr
        futex_hash_slots = next_power_of_2((num_threads * 3) / 2)

        futex_hash_addr, futex_base_addr = \
                self.allocate_memory(kernel_data, futex_base_addr,
                                futex_hash_slots * 8)
        futex_pending_tags_addr, futex_base_addr = \
                self.allocate_memory(kernel_data, futex_base_addr, num_threads * 4)

        self.patches.extend([
            ("tcbs", tcb_data_vaddr),
            ("max_tcbs", num_threads),
            ("tcb_free_head", free_thread_addr),
            ("priority_bitmap", priority_bitfield),
            ("priority_heads", priority_table_addr),
            ("futex_hash_slots", futex_hash_slots),
            ("futex_hash_slots_lg2", int(log(futex_hash_slots, 2))),
            ("futex_hash", futex_hash_addr),
            ("futex_pending_tags", futex_pending_tags_addr),
            ("kpagetable_phys", _0(kernel.heap_attrs.phys_addr)),
        ])

        return kernel_data
예제 #31
0
 def write_struct(self, section):
     """Write the binary form of the struct."""
     assert self.cap is not None
     section.write_word(_0(self.dest_cap.get_cap_slot()))
예제 #32
0
    def write_struct(self, section):
        """Write the binary form of the PD struct."""

        # Construct utcb memsection
        if self.utcb_memsec == None:  # Create and attach the memsection only once at first round
            self.utcb_memsec = CellEnvMemsec(self.machine, self.image,
                                             _0(self.space.utcb.virt_addr),
                                             _0(self.space.utcb.size),
                                             self.page_size,
                                             self.space.utcb.attach,
                                             self.space.utcb.cache_policy)
            self.utcb_memsec.set_callbacks(self.elf,
                                           "_okl4_utcb_memsec_lookup",
                                           "_okl4_utcb_memsec_map",
                                           "_okl4_utcb_memsec_destroy")
            self.attach_memsection(self.utcb_memsec, PF_R | PF_W | PF_X)
        self.utcb_memsec.set_virt_base(self.virt_base +
                                       self.utcb_memsec_offset)
        self.utcb_memsec.list_node.set_virt_base(self.virt_base +
                                                 self.utcb_list_node_offset)

        # Write mem container list.
        section.write_word(self.mem_list.virt_base)
        # Write parent clist (usually copied over from the attr).
        section.write_word(self.kclist)
        # Write kspace pointer.
        section.write_word(self.kspace)
        # Write out the memsec.
        self.utcb_memsec.write_struct(section)
        # Write out dict_next pointer.
        section.write_word(0)
        # Write out the thread pool pointer.
        section.write_word(self.virt_base + self.thread_pool_offset)
        # Write out the thread alloc pointer.
        section.write_word(self.virt_base + self.thread_alloc_offset)
        # Write out the default pager.
        section.write_word(0)
        # Write out the extension pointer.
        section.write_word(0)

        # Initialise the _init struct to 0 because it is not needed when the PD
        # is weaved.
        # Write out the kclistid pool pointer.
        section.write_word(0)
        # Write out the kspaceid pool pointer.
        section.write_word(0)
        # Write out the virtmem pool pointer.
        section.write_word(0)
        # Write out the utcb area virt item.
        for i in range(4):
            section.write_word(0)
        # Write out the kspace id.
        section.write_word(0)
        # Write out the kclist id.
        section.write_word(0)
        # Write out the utcb area pointer.
        section.write_word(0)
        # Write out the kclist pointer.
        section.write_word(0)
        # Write out the okl4 space.
        for i in range(6):
            section.write_word(0)
        # Initialise the thread pool memory to 0.
        for i in range(self.thread_pool_num_words):
            section.write_word(0)
        # Write out the thread bitmap allocator
        self.thread_allocator.write_struct(section)
예제 #33
0
 def encode(self):
     """Encode kernel init script initial id's"""
     return ''.join(
         (self.encode_hdr(KI_OP_INIT_IDS, 2, self.eop),
          self.encode_bitfield2_halfwords(_0(self.spaces), _0(self.clists)),
          self.encode_word(_0(self.mutexes))))
예제 #34
0
파일: kernel_nano.py 프로젝트: BruceYi/okl4
    def create_ops(self, kernel, elf, machine):
        self.patches = []
        kernel_data = ByteArray()
        data_vaddr = kernel.base_segment.segment.vaddr + \
                (_0(kernel.heap_attrs.phys_addr) - kernel.base_segment.segment.paddr)
        start_data_vaddr = data_vaddr
        mappings = self.map.Mappings()

        total_kernel_size = (_0(kernel.heap_attrs.phys_addr) + \
                             kernel.heap_attrs.size) - \
                             kernel.base_segment.segment.paddr
        page_size = self.largest_page(kernel.base_segment.segment.vaddr,
                                      total_kernel_size,
                                      machine.page_sizes)
        mappings.add_mapping(kernel.base_segment.segment.vaddr,
                kernel.base_segment.segment.paddr,
                page_size, 1, 0x0, weaver.machine.CACHE_POLICIES['default'])

        # hacked for arm at the moment
        if self.arch_lookup[self.cpu] == 'arm':
            # user helpers page
            if elf:
                user_helpers_initial_vaddr = elf.find_symbol("user_atomic_cmp_set").value
            else:
                user_helpers_initial_vaddr = 0
            user_paddr = user_helpers_initial_vaddr - kernel.base_segment.segment.vaddr + \
                kernel.base_segment.segment.paddr
            #print "user_helpers %x -> %x" % (kernel.base_segment.segment.vaddr-4096, user_paddr)
            mappings.add_mapping(kernel.base_segment.segment.vaddr - 4096,
                    user_paddr, 4096, 1, 0x1, weaver.machine.CACHE_POLICIES['default'])

        # Format: (name, prio, entry, sp, r0)
        # TODO: Give main thread cell_environment in r0
        threads = []

        # Collect live thread info
        for (name, cell) in kernel.cells.items():
            for space in cell.spaces:
                for thread in space.threads:
                    threads.append(("nanotest",
                                    thread.priority,
                                    thread.entry,
                                    thread.get_sp(),
                                    cell.get_mr1())) # cell env pointer
                for num, name, attrs in space.mappings:
                    if not attrs.need_mapping():
                        continue
                    #print "Mapping for (%d) %s:" % (num, name)
                    for m in self.get_mappings(attrs, machine.page_sizes,
                                               machine.min_page_size()):
                        mappings.add_mapping(*m)

        # Add initial mapping
        if kernel.base_segment.segment.paddr != kernel.base_segment.segment.vaddr:
            mappings.add_mapping(kernel.base_segment.segment.paddr,
                    kernel.base_segment.segment.paddr,
                1024 * 1024, 1, 0x0, weaver.machine.CACHE_POLICIES['default'])

        # Kernel driver mappings
        for (name, mem) in self.devices:
            # Patch variables
            if elf:
                # Ensure enough kernel pointers are defined
                base_name = "%s_mem" % name
                driver_max_index = len(mem) - 1
                i = 0
                while elf.find_symbol("%s%d" % (base_name, i)):
                    i += 1
                kernel_max_index = i - 1

                if driver_max_index != kernel_max_index:
                    raise MergeError, "%s driver: "\
                            "Kernel expected %d memory range%s, "\
                            "driver supplied %d" % \
                            (name, kernel_max_index+1,
                             ["s", ""][kernel_max_index==0],
                             driver_max_index+1)

            # Add mappings
            for (i, ms) in enumerate(mem):
                for m in self.get_mappings(ms.attrs, machine.page_sizes,
                                           machine.min_page_size()):
                    mappings.add_mapping(*m)
                self.patches.append(("%s_mem%d" % (name, i),
                                    _0(ms.attrs.virt_addr)))

        # Generate pagetable
        pagetable = mappings.to_data(_0(kernel.heap_attrs.phys_addr))

        # pagetable needs to be 16k aligned, so it must be the first thing in the kernel_data
        kernel_data += pagetable
        data_vaddr += len(pagetable)

        # Number of threads defaults to the defined constant, if less are
        # specifed, the difference will be allocated as spare threads
        assert(kernel.total_threads <= self.ABSOLUTE_MAX_THREADS)
        num_spare_threads = self.DEFAULT_KERNEL_MAX_THREADS - len(threads)
        num_threads = self.DEFAULT_KERNEL_MAX_THREADS

        thread_data, priority_table, priority_bitfield, free_thread_addr = \
                self.get_thread_data(data_vaddr, threads, num_spare_threads)
        tcb_data_vaddr = data_vaddr
        kernel_data += thread_data
        data_vaddr += len(thread_data)

        priority_table_addr = data_vaddr
        kernel_data += priority_table
        data_vaddr += len(priority_table)

        # Futexes
        futex_base_addr = data_vaddr
        futex_hash_slots = next_power_of_2((num_threads * 3) / 2)

        futex_hash_addr, futex_base_addr = \
                self.allocate_memory(kernel_data, futex_base_addr,
                                futex_hash_slots * 8)
        futex_pending_tags_addr, futex_base_addr = \
                self.allocate_memory(kernel_data, futex_base_addr, num_threads * 4)

        self.patches.extend(
                [("tcbs", tcb_data_vaddr),
                 ("max_tcbs", num_threads),
                 ("tcb_free_head", free_thread_addr),
                 ("priority_bitmap", priority_bitfield),
                 ("priority_heads", priority_table_addr),
                 ("futex_hash_slots", futex_hash_slots),
                 ("futex_hash_slots_lg2", int(log(futex_hash_slots, 2))),
                 ("futex_hash", futex_hash_addr),
                 ("futex_pending_tags", futex_pending_tags_addr),
                 ("kpagetable_phys", _0(kernel.heap_attrs.phys_addr)),
                 ])

        return kernel_data
예제 #35
0
 def encode(self):
     """Encode kernel init script create clist"""
     return ''.join(
         (self.encode_hdr(KI_OP_CREATE_CLIST, 1, self.eop),
          self.encode_bitfield2_halfwords(_0(self.id), _0(self.max_caps))))
예제 #36
0
 def write_struct(self, section):
     """Write the binary form of the struct."""
     assert self.cap is not None
     section.write_word(_0(self.dest_cap.get_cap_slot()))
예제 #37
0
 def encode(self):
     """Encode kernel init script create space"""
     return ''.join((
         self.encode_hdr(KI_OP_CREATE_SPACE, 6, self.eop),
         self.encode_word(_0(self.id)),
         self.encode_bitfield2_halfwords(_0(self.space_base),
                                         _0(self.space_num)),
         self.encode_bitfield2_halfwords(_0(self.clist_base),
                                         _0(self.clist_num)),
         self.encode_bitfield2_halfwords(_0(self.mutex_base),
                                         _0(self.mutex_num)),
         self.encode_base_and_size(_0(self.utcb_base),
                                   _0(self.utcb_log2_size),
                                   SHIFT_1K),
         self.encode_word((_0(self.max_priority) << (_0(self.halfword) + 1)) | \
                              (_0(self.has_kresources) << (_0(self.halfword)))   | \
                              (_0(self.max_phys_seg)))
         ))
예제 #38
0
 def encode(self):
     """Encode kernel init script create mutex"""
     return ''.join(
         (self.encode_hdr(KI_OP_CREATE_MUTEX, 1,
                          self.eop), self.encode_word(_0(self.id))))