Ejemplo n.º 1
0
def create_in_out_manager(extended_architecture, layers):
    """
    @type extended_architecture: dict
    @type layers: dict[unicode, pylstm.wrapper.py_layers.BaseLayer]
    @rtype: buffer_manager.BufferManager
    """
    in_out_manager = BufferManager()

    possible_sources = list(extended_architecture.keys())

    while possible_sources:
        layer = possible_sources[0]

        source_set, sink_set = get_forward_closure(layer, extended_architecture)
        for s in source_set:
            possible_sources.remove(s)

        source_list, sink_list, connection_table = set_up_connection_table(
            source_set, sink_set, extended_architecture)
        perm = permute_rows(connection_table)
        source_list = [source_list[i] for i in perm]
        connection_table = np.atleast_2d(connection_table[perm])

        source_getters = OrderedDict()
        for n in source_list:
            source_getters[n] = (layers[n].get_output_buffer_size,
                                 layers[n].create_output_view)

        sink_getters = OrderedDict()
        for n in sink_list:
            sink_getters[n] = (layers[n].get_input_buffer_size,
                               layers[n].create_input_view)

        in_out_manager.add(source_getters, sink_getters, connection_table)
    return in_out_manager
Ejemplo n.º 2
0
    def setUp(self):
        Node = node_factory('<i')
        left = Node(True,
                    [2, 7, 11],
                    [32, 87, 43, 2])

        right = Node(True,
                     [15, 24, 31],
                     [45, 67, 89, 0])

        with open('foobar', 'wb') as file:
            pass

        manager = BufferManager()

        block = manager.get_file_block('foobar', 0)
        with pin(block):
            block.write(b'\0' * BufferManager.block_size)

        block = manager.get_file_block('foobar', 1)
        with pin(block):
            block.write(bytes(left))

        block = manager.get_file_block('foobar', 2)
        with pin(block):
            block.write(bytes(right))

        self.Node = Node
        self.left = left
Ejemplo n.º 3
0
def create_param_manager(layers):
    """
    @type layers: dict[unicode, pylstm.wrapper.py_layers.BaseLayer]
    @rtype: buffer_manager.BufferManager
    """
    param_manager = BufferManager()
    for name, l in layers.items()[1:]:
        sources = {name: (l.get_parameter_size, l.create_param_view)}
        param_manager.add(sources, {})
    return param_manager
Ejemplo n.º 4
0
    def init_my_bm(self):
        self.conn, bm_conn = multiprocessing.Pipe()
        self.bm = BufferManager(name="bm", conn=bm_conn)
        # self.bm = BufferManager(name="bm")

        # self.thread = threading.Thread(target=self.listen_to_my_conn)
        # self.thread.start()
        hub.spawn(self.listen_to_my_conn)

        self.bm.start()
Ejemplo n.º 5
0
def create_param_manager(layers):
    """
    @type layers: dict[unicode, pylstm.wrapper.py_layers.BaseLayer]
    @rtype: buffer_manager.BufferManager
    """
    param_manager = BufferManager()
    for name, l in layers.items()[1:]:
        sources = {name: (l.get_parameter_size, l.create_param_view)}
        param_manager.add(sources, {})
    return param_manager
Ejemplo n.º 6
0
def create_bwd_state_manager(layers):
    """
    @type layers: dict[unicode, pylstm.wrapper.py_layers.BaseLayer]
    @rtype: buffer_manager.BufferManager
    """
    bwd_state_manager = BufferManager()
    for name, l in layers.items()[1:]:
        sources = {name: (l.get_bwd_state_size, l.create_bwd_state)}
        bwd_state_manager.add(sources, {})
    return bwd_state_manager
Ejemplo n.º 7
0
    def test_delete_transfer(self):
        self.manager.Node.n = 4
        self.manager.insert(2, 32)
        self.manager.insert(24, 67)
        self.manager.insert(7, 87)
        self.manager.insert(15, 45)
        self.manager.insert(11, 43)
        self.manager.delete(24)

        self.assertEqual(self.manager.root, 3)
        Node = self.manager.Node

        root_block = BufferManager().get_file_block(self.manager.index_file_path, self.manager.root)
        root_node = Node.frombytes(root_block.read())
        self.assertEqual(root_node.keys, [(11,)])

        left_block = BufferManager().get_file_block(self.manager.index_file_path, 1)
        left_node = Node.frombytes(left_block.read())
        self.assertEqual(left_node.keys, _convert_to_tuple_list([2, 7]))
        self.assertEqual(left_node.children, [32, 87, 2])

        right_block = BufferManager().get_file_block(self.manager.index_file_path, 2)
        right_node = Node.frombytes(right_block.read())
        self.assertEqual(right_node.keys, _convert_to_tuple_list([11, 15]))
        self.assertEqual(right_node.children, [43, 45, 0])
Ejemplo n.º 8
0
def create_bwd_state_manager(layers):
    """
    @type layers: dict[unicode, pylstm.wrapper.py_layers.BaseLayer]
    @rtype: buffer_manager.BufferManager
    """
    bwd_state_manager = BufferManager()
    for name, l in layers.items()[1:]:
        sources = {name: (l.get_bwd_state_size,
                          l.create_bwd_state)}
        bwd_state_manager.add(sources, {})
    return bwd_state_manager
Ejemplo n.º 9
0
    def test_initial_insert(self):
        manager = IndexManager('spam', '<id')
        manager.insert([42, 7.6], 518)

        self.assertEqual(manager.root, 1)
        self.assertEqual(manager.first_deleted_block, 0)
        self.assertEqual(manager.total_blocks, 2)
        block = BufferManager().get_file_block('spam', 1)
        Node = manager.Node
        node = Node.frombytes(block.read())
        self.assertEqual(node.is_leaf, True)
        self.assertEqual(node.keys, [(42, 7.6)])
        self.assertEqual(node.children, [518, 0])
Ejemplo n.º 10
0
    def __init__(self):
        self.console = None

        self.__objTemplatesWatingVertices_l = []
        self.__objTemplatesWatingTextrue_l = []

        self.__objectTemplates_d = {}

        self.bufferManager = BufferManager()
        self.texMan = TextureManager()

        self.__toHereQueue = Queue()
        self.__toProcessQueue = Queue()
        self.objectLoader = ObjectLoader(self.__toHereQueue, self.__toProcessQueue)
        self.__haveThingsToGetFromProcess_i = 0
Ejemplo n.º 11
0
class LeafIterator:
    def __init__(self, Node, index_file_path, node, key_position):
        self.Node = Node
        self.index_file_path = index_file_path
        self.node = node
        self.key_position = key_position
        self.manager = BufferManager()

    def __iter__(self):
        return self

    def __next__(self):
        value = self.node.children[self.key_position]
        if self.key_position < len(self.node.keys):
            key = self.node.keys[self.key_position]
            self.key_position += 1
            return key, value
        else:  # jump
            if value == 0:
                raise StopIteration
            else:
                node_block = self.manager.get_file_block(
                    self.index_file_path, value)
                with pin(node_block):
                    self.node = self.Node.frombytes(node_block.read())
                self.key_position = 1
                return self.node.keys[0], self.node.children[0]
Ejemplo n.º 12
0
    def test_multiple_delete(self):
        manager = IndexManager('spam', '<id')
        manager.insert([42, 7.6], 518)
        manager.insert([42, 7.6], 212)
        manager.insert([233, 66.6], 7)
        deleted_num = manager.delete([42, 7.6])

        self.assertEqual(deleted_num, 2)
        self.assertEqual(manager.root, 1)
        self.assertEqual(manager.first_deleted_block, 0)
        self.assertEqual(manager.total_blocks, 2)
        block = BufferManager().get_file_block('spam', 1)
        Node = manager.Node
        node = Node.frombytes(block.read())
        self.assertEqual(node.is_leaf, True)
        self.assertEqual(node.keys, [(233, 66.6)])
        self.assertEqual(node.children, [7, 0])
Ejemplo n.º 13
0
    def test_buffer_manager(self):
        manager = BufferManager()
        a = manager.get_file_block('foo', 0)
        a.pin()
        self.assertEqual(a.read(), b'Hello')
        b = manager.get_file_block('./foo', 0)
        self.assertTrue(a is b)  # test cache hit
        a.write(b'hello')
        # a is not flushed

        b = manager.get_file_block('foo', 1)
        b.pin()
        time.sleep(0.5)
        self.assertEqual(b.read(), b' Worl')
        with self.assertRaises(RuntimeError):
            c = manager.get_file_block('foo',
                                       2)  # test buffer run out of space
        a.unpin()
        b.unpin()
        c = manager.get_file_block('foo', 2)  # test lru swap
        self.assertFalse((os.path.abspath('foo'), 0)
                         in manager._blocks.keys())  # a should be swapped out
        self.assertTrue(
            (os.path.abspath('foo'), 1)
            in manager._blocks.keys())  # b should remain in the buffer
        with open('foo', 'rb') as file:
            self.assertEqual(
                file.read(),
                b'hello World')  # test the swapped out block is flushed
Ejemplo n.º 14
0
    def test_delete_fuse(self):
        self.manager.Node.n = 4
        self.manager.insert(2, 32)
        self.manager.insert(24, 67)
        self.manager.insert(7, 87)
        self.manager.insert(15, 45)
        self.manager.insert(11, 43)
        self.manager.delete(24)
        self.manager.delete(11)

        self.assertEqual(self.manager.root, 1)
        Node = self.manager.Node

        root_block = BufferManager().get_file_block(self.manager.index_file_path, self.manager.root)
        root_node = Node.frombytes(root_block.read())
        self.assertEqual(root_node.is_leaf, True)
        self.assertEqual(root_node.keys, _convert_to_tuple_list([2, 7, 15]))
        self.assertEqual(root_node.children, [32, 87, 45, 0])
Ejemplo n.º 15
0
    def test_persistence(self):
        manager = IndexManager('spam', '<id')
        manager.insert([42, 7.6], 518)
        manager.insert([233, 66.6], 7)
        manager.delete([42, 7.6])
        manager.dump_header()
        manager._manager.flush_all()
        del manager

        manager = IndexManager('spam', '<id')
        self.assertEqual(manager.root, 1)
        self.assertEqual(manager.first_deleted_block, 0)
        self.assertEqual(manager.total_blocks, 2)
        block = BufferManager().get_file_block('spam', 1)
        Node = manager.Node
        node = Node.frombytes(block.read())
        self.assertEqual(node.is_leaf, True)
        self.assertEqual(node.keys, [(233, 66.6)])
        self.assertEqual(node.children, [7, 0])
Ejemplo n.º 16
0
 def __init__(self, index_file_path, fmt):
     """specify the path of the index file and the format of the keys, return a index manager
     if the index file exists, read data from the file
     otherwise create it and initialize its header info
     multiple index manager on the same file MUSTN'T simultaneously exist"""
     self.Node = node_factory(fmt)
     self.index_file_path = index_file_path
     self._manager = BufferManager()
     self.meta_struct = Struct(
         '<4i'
     )  # total blocks, offset of the first deleted block, offset of the root node
     try:
         meta_block = self._manager.get_file_block(self.index_file_path, 0)
         with pin(meta_block):
             self.total_blocks, self.first_deleted_block, self.root, self.first_leaf = self.meta_struct.unpack(
                 meta_block.read()[:self.meta_struct.size])
     except FileNotFoundError:  # create and initialize an index file if not exits
         self.total_blocks, self.first_deleted_block, self.root, self.first_leaf = 1, 0, 0, 0
         with open(index_file_path, 'wb') as f:
             f.write(
                 self.meta_struct.pack(self.total_blocks,
                                       self.first_deleted_block, self.root,
                                       self.first_leaf).ljust(
                                           BufferManager.block_size, b'\0'))
Ejemplo n.º 17
0
def create_in_out_manager(extended_architecture, layers):
    """
    @type extended_architecture: dict
    @type layers: dict[unicode, pylstm.wrapper.py_layers.BaseLayer]
    @rtype: buffer_manager.BufferManager
    """
    in_out_manager = BufferManager()

    possible_sources = list(extended_architecture.keys())

    while possible_sources:
        layer = possible_sources[0]

        source_set, sink_set = get_forward_closure(layer,
                                                   extended_architecture)
        for s in source_set:
            possible_sources.remove(s)

        source_list, sink_list, connection_table = set_up_connection_table(
            source_set, sink_set, extended_architecture)
        perm = permute_rows(connection_table)
        source_list = [source_list[i] for i in perm]
        connection_table = np.atleast_2d(connection_table[perm])

        source_getters = OrderedDict()
        for n in source_list:
            source_getters[n] = (layers[n].get_output_buffer_size,
                                 layers[n].create_output_view)

        sink_getters = OrderedDict()
        for n in sink_list:
            sink_getters[n] = (layers[n].get_input_buffer_size,
                               layers[n].create_input_view)

        in_out_manager.add(source_getters, sink_getters, connection_table)
    return in_out_manager
Ejemplo n.º 18
0
 def drop_table(table_name):
     metadata = load_metadata()
     buffer_manager = BufferManager()
     MinisqlFacade.delete_record_all(table_name)
     shutil.rmtree('schema/tables/' + table_name + '/', True)
     buffer_manager.detach_from_file('schema/tables/' + table_name + '/' +
                                     table_name + '.table')
     for index_name in metadata.tables[table_name].indexes:
         buffer_manager.detach_from_file('schema/tables/' + table_name +
                                         '/' + index_name + '.index')
     metadata.drop_table(table_name)
     metadata.dump()
Ejemplo n.º 19
0
class IndexManager:
    def __init__(self, index_file_path, fmt):
        """specify the path of the index file and the format of the keys, return a index manager
        if the index file exists, read data from the file
        otherwise create it and initialize its header info
        multiple index manager on the same file MUSTN'T simultaneously exist"""
        self.Node = node_factory(fmt)
        self.index_file_path = index_file_path
        self._manager = BufferManager()
        self.meta_struct = Struct(
            '<4i'
        )  # total blocks, offset of the first deleted block, offset of the root node
        try:
            meta_block = self._manager.get_file_block(self.index_file_path, 0)
            with pin(meta_block):
                self.total_blocks, self.first_deleted_block, self.root, self.first_leaf = self.meta_struct.unpack(
                    meta_block.read()[:self.meta_struct.size])
        except FileNotFoundError:  # create and initialize an index file if not exits
            self.total_blocks, self.first_deleted_block, self.root, self.first_leaf = 1, 0, 0, 0
            with open(index_file_path, 'wb') as f:
                f.write(
                    self.meta_struct.pack(self.total_blocks,
                                          self.first_deleted_block, self.root,
                                          self.first_leaf).ljust(
                                              BufferManager.block_size, b'\0'))

    def dump_header(self):
        """write the header info to the index file
        MUST be called before the program exits,
        otherwise the header info in the file won't be updated"""
        meta_block = self._manager.get_file_block(self.index_file_path, 0)
        with pin(meta_block):
            meta_block.write(
                self.meta_struct.pack(self.total_blocks,
                                      self.first_deleted_block, self.root,
                                      self.first_leaf).ljust(
                                          BufferManager.block_size, b'\0'))

    def _get_free_block(self):
        """return a free block and update header info, assuming this block will be used"""
        if self.first_deleted_block > 0:
            block_offset = self.first_deleted_block
            block = self._manager.get_file_block(self.index_file_path,
                                                 block_offset)
            s = Struct('<i')
            next_deleted = s.unpack(block.read()[:s.size])[0]
            self.first_deleted_block = next_deleted
            return block
        else:
            block_offset = self.total_blocks
            block = self._manager.get_file_block(self.index_file_path,
                                                 block_offset)
            self.total_blocks += 1
            return block

    def _delete_node(self, node, block):
        """delete node and writes it to block
        just a shortcut to mark a block as deleted"""
        with pin(block):
            node.next_deleted = self.first_deleted_block
            block.write(bytes(node))
            self.first_deleted_block = block.block_offset

    def _find_leaf(self, key):
        """find the first leaf node where key may reside
        key may not really reside in this node, in this case, the index file has no such key"""
        key = _convert_to_tuple(key)
        node_block_offset = self.root
        path_to_parents = []
        while True:  # find the insert position
            node_block = self._manager.get_file_block(self.index_file_path,
                                                      node_block_offset)
            with pin(node_block):
                node = self.Node.frombytes(node_block.read())
                if node.is_leaf:
                    return node, node_block, path_to_parents
                else:  # continue searching
                    path_to_parents.append(node_block_offset)
                    child_index = bisect.bisect_right(node.keys, key)
                    node_block_offset = node.children[child_index]

    def _handle_overflow(self, node, block, path_to_parents):
        if not path_to_parents:  # the root overflowed
            new_block = self._get_free_block()
            new_node, key, value = node.split(new_block.block_offset)
            with pin(block), pin(new_block):
                block.write(bytes(node))
                new_block.write(bytes(new_node))
            new_root_block = self._get_free_block()
            with pin(new_root_block):
                new_root_node = self.Node(
                    False, [key], [block.block_offset, new_block.block_offset])
                new_root_block.write(bytes(new_root_node))
            self.root = new_root_block.block_offset
            return
        else:
            parent_offset = path_to_parents.pop()
            new_block = self._get_free_block()
            new_node, key, value = node.split(new_block.block_offset)
            with pin(block), pin(new_block):
                block.write(bytes(node))
                new_block.write(bytes(new_node))
            parent_block = self._manager.get_file_block(
                self.index_file_path, parent_offset)
            parent_node = self.Node.frombytes(parent_block.read())
            parent_node.insert(key, value)
            if len(parent_node.keys) <= self.Node.n:
                with pin(parent_block):
                    parent_block.write(bytes(parent_node))
            else:
                self._handle_overflow(parent_node, parent_block,
                                      path_to_parents)

    def _handle_underflow(self, node, block, path_to_parents):
        """handle underflow after deletion
        will try to transfer from the left sibling first
        then try to transfer from the right sibling
        then try to fuse with the left sibling
        then try to fuse with the right sibling"""
        if block.block_offset == self.root:
            if not node.keys:  # root has no key at all; this node is no longer needed
                if node.is_leaf:
                    self.root = 0
                    self.first_leaf = 0
                else:
                    self.root = node.children[0]
                self._delete_node(node, block)
            else:
                block.write(bytes(node))
            return  # root underflow is not a problem

        parent_offset = path_to_parents.pop()
        parent_block = self._manager.get_file_block(self.index_file_path,
                                                    parent_offset)
        with pin(parent_block):
            parent = self.Node.frombytes(parent_block.read())
            my_position = bisect.bisect_right(parent.keys, node.keys[0])

        if my_position > 0:  # try find the left sibling
            left_sibling_offset = parent.children[my_position - 1]
            left_sibling_block = self._manager.get_file_block(
                self.index_file_path, left_sibling_offset)
            with pin(left_sibling_block):
                left_sibling = self.Node.frombytes(left_sibling_block.read())
            if len(left_sibling.keys) > ceil(
                    node.n / 2):  # a transfer is possible
                node.transfer_from_left(left_sibling, parent, my_position - 1)
                with pin(block), pin(left_sibling_block), pin(parent_block):
                    block.write(bytes(node))
                left_sibling_block.write(bytes(left_sibling))
                parent_block.write(bytes(parent))
                return
        else:
            left_sibling = None  # no left sibling

        if my_position < len(parent.keys) - 1:  # try find the right sibling
            right_sibling_offset = parent.children[my_position + 1]
            right_sibling_block = self._manager.get_file_block(
                self.index_file_path, right_sibling_offset)
            with pin(right_sibling_block):
                right_sibling = self.Node.frombytes(right_sibling_block.read())
            if len(right_sibling.keys) > ceil(
                    node.n / 2):  # a transfer is possible
                node.transfer_from_right(right_sibling, parent, my_position)
                with pin(block), pin(right_sibling_block), pin(parent_block):
                    block.write(bytes(node))
                    right_sibling_block.write(bytes(right_sibling))
                    parent_block.write(bytes(parent))
                return
        else:
            right_sibling = None  # no right sibling

        if left_sibling is not None:  # fuse with left sibling
            left_sibling.fuse_with(node, parent, my_position - 1)
            with pin(left_sibling_block):
                left_sibling_block.write(bytes(left_sibling))
            self._delete_node(node, block)
            if len(parent.keys) >= ceil(node.n / 2):
                return
            else:
                self._handle_underflow(parent, parent_block, path_to_parents)
        else:  # fuse with right sibling
            node.fuse_with(right_sibling, parent, my_position)
            with pin(block):
                block.write(bytes(node))
            self._delete_node(right_sibling, right_sibling_block)
            if len(parent.keys) >= ceil(node.n / 2):
                return
            else:
                self._handle_underflow(parent, parent_block, path_to_parents)

    def find(self, key):
        """find the smallest key >= (parameter) key
        return an iterator from this position
        raise RuntimeError if the index is empty"""
        key = _convert_to_tuple(key)
        if self.root == 0:
            raise RuntimeError('cannot find from empty index')
        else:
            node, node_block, path_to_parents = self._find_leaf(key)
            key_position = bisect.bisect_left(node.keys, key)
            return LeafIterator(self.Node, self.index_file_path, node,
                                key_position)

    def insert(self, key, value):
        """insert a key-value pair into the index file
        if key already in this index, raise ValueError"""
        key = _convert_to_tuple(key)
        if self.root == 0:
            block = self._get_free_block()
            with pin(block):
                self.root = block.block_offset
                self.first_leaf = self.root
                node = self.Node(is_leaf=True, keys=[key], children=[value, 0])
                block.write(bytes(node))
        else:
            node, node_block, path_to_parents = self._find_leaf(key)
            key_position = bisect.bisect_left(node.keys, key)
            if key_position < len(
                    node.keys) and node.keys[key_position] == key:
                raise ValueError('duplicate key {}'.format(key))
            node.insert(key, value)
            if len(node.keys) <= node.n:
                node_block.write(bytes(node))
                return
            else:  # split
                self._handle_overflow(node, node_block, path_to_parents)

    def delete(self, key):
        """delete the key-value pair with key equal the parameter
        if the index file has no such key, raise ValueError"""
        key = _convert_to_tuple(key)
        if self.root == 0:
            raise ValueError('can\'t delete from empty index')
        else:
            node, node_block, path_to_parents = self._find_leaf(key)
            key_position = bisect.bisect_left(node.keys, key)
            if key_position < len(
                    node.keys) and node.keys[key_position] == key:  # key match
                del node.keys[key_position]
                del node.children[key_position]
                if len(node.keys) >= ceil(node.n / 2):
                    node_block.write(bytes(node))
                    return
                else:  # underflow
                    self._handle_underflow(node, node_block, path_to_parents)
            else:  # key doesn't match
                raise ValueError('index has no such key {}'.format(key))

    def iter_leaves(self):
        """return an iterator at the beginning of the leaf node chain"""
        if self.first_leaf == 0:
            raise RuntimeError('can\'t iter from empty index')
        first_leaf_block = self._manager.get_file_block(
            self.index_file_path, self.first_leaf)
        first_leaf = self.Node.frombytes(first_leaf_block.read())
        return LeafIterator(self.Node, self.index_file_path, first_leaf, 0)
Ejemplo n.º 20
0
class LocalController(app_manager.RyuApp):
    OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]

    def __init__(self, *args, **kwargs):
        super(LocalController, self).__init__(*args, **kwargs)

        # self.topo = extract_topo(fourswitch())
        self.topo = {}
        self.time = 0
        self.datapaths = {}
        self.eth_to_port = {}
        self.flows_final = False

        self.buffer = dict()

        self.bm = None
        self.conn = None
        self.thread = None
        # self.bm.setDaemon(True)
        self.init_my_bm()
        self.packts_buffed = 0

        # self.queue = Queue()
        # logger.init('buftest' ,logging.INFO)

    def init_my_bm(self):
        self.conn, bm_conn = multiprocessing.Pipe()
        self.bm = BufferManager(name="bm", conn=bm_conn)
        # self.bm = BufferManager(name="bm")

        # self.thread = threading.Thread(target=self.listen_to_my_conn)
        # self.thread.start()
        hub.spawn(self.listen_to_my_conn)

        self.bm.start()

    def listen_to_my_conn(self):
        while (True):
            # print("but in the list" + str(len(self.pkg_to_save)))
            packet_to_me = None
            try:
                packet_to_me = self.conn.recv()
            except Exception as err:
                # print("I'm waiting for your bilibili")
                time.sleep(0.01)
                pass
            if (packet_to_me):
                packet = pickle.loads(packet_to_me)
                # print(packet)
                dpid = packet["dpid"]
                pkg = packet["pkg"]
                in_port = packet["in_port"]
                self.send_back(pkg, dpid, in_port)

    @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
    def _switch_features_handler(self, ev):
        msg = ev.msg
        datapath = msg.datapath
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser
        dpid = datapath.id
        self.datapaths[dpid] = datapath
        print(self.datapaths)
        print(ofproto.OFPP_CONTROLLER)
        actions = [
            parser.OFPActionOutput(port=ofproto.OFPP_CONTROLLER,
                                   max_len=ofproto.OFPCML_NO_BUFFER)
        ]
        # actions = [parser.OFPActionOutput(port=0 ,max_len=ofproto.OFPCML_NO_BUFFER)]
        inst = [
            parser.OFPInstructionActions(type_=ofproto.OFPIT_APPLY_ACTIONS,
                                         actions=actions)
        ]
        mod = parser.OFPFlowMod(datapath=datapath,
                                priority=0,
                                match=parser.OFPMatch(),
                                instructions=inst)
        datapath.send_msg(mod)
        # buf_match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP,ipv4_src="192.168.1.1",ipv4_dst="192.168.1.2")
        # buf_match = parser.OFPMatch(in_port=1)
        # self.send_buf_cmd(self.datapaths[1],buf_match)
        # buf_match = parser.OFPMatch(in_port=2)
        # self.send_buf_cmd(self.datapaths[1],buf_match)
        self.disable_dhcp(datapath)
        # self.install(datapath)

    @set_ev_cls(ofp_event.EventOFPBarrierReply, MAIN_DISPATCHER)
    def _barrier_reply_handler(self, ev):
        #nononononono   don't
        print(ev.msg)
        print("---------------------------------------------------------")
        datapath = ev.msg.datapath
        cmd_pop = self.make_buf_message(consts.BUF_POP,
                                        src="10.0.0.1",
                                        dst="10.0.0.2",
                                        dst_port=None,
                                        dpid=None,
                                        pkg=None,
                                        in_port=None)
        try:
            self.conn.send(cmd_pop)
        except Exception as e:
            print("Error in barrier!!!!!!!!!!!")
            print(e)
        # if(len(self.buffer)>0):
        #     self.send_back(1,datapath)
        # self.install34(self.datapaths[1])

    def install(self, datapath):
        # time.sleep(2)
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser
        priority = 2
        buffer_id = ofproto.OFP_NO_BUFFER
        match = parser.OFPMatch(in_port=1)
        actions = [parser.OFPActionOutput(2)]
        inst = [
            parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)
        ]
        mod = parser.OFPFlowMod(datapath=datapath,
                                priority=priority,
                                match=match,
                                instructions=inst)
        datapath.send_msg(mod)
        # time.sleep(10)
        # datapath.send_barrier()
        match = parser.OFPMatch(in_port=2)
        actions = [parser.OFPActionOutput(1)]
        inst = [
            parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS, actions)
        ]
        mod = parser.OFPFlowMod(datapath=datapath,
                                priority=priority,
                                match=match,
                                instructions=inst)
        datapath.send_msg(mod)

        # datapath.send_barrier()
        req = parser.OFPBarrierRequest(datapath, xid=1)
        datapath.send_msg(req)

    @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
    def _packet_in_handler(self, ev):
        pkt_in = ev.msg
        datapath = pkt_in.datapath
        ofproto = datapath.ofproto
        in_port = pkt_in.match["in_port"]
        print(in_port)
        src, dst, dst_port, dpid, pkg = self.identify_pkg(pkt_in)
        self.packts_buffed += 1
        if (self.packts_buffed == 100):
            self.install(self.datapaths[1])
        # print(pkg)
        # self.bm.pkg_to_save.append(pkt_in)
        cmd_to_bm = self.make_buf_message(consts.BUF_PUSH,
                                          src,
                                          dst,
                                          dst_port=dst_port,
                                          dpid=dpid,
                                          pkg=pkg,
                                          in_port=in_port)
        try:
            self.conn.send(cmd_to_bm)  #for process
        except Exception as e:
            print("Error!!!!!!!!!!!!!!!!!!!!!!!!!!")
            print(e)
        # ("go and save")

    def make_buf_message(self, msg_type, src, dst, dst_port, dpid, pkg,
                         in_port):
        return pickle.dumps({
            "msg_type": msg_type,
            "src": src,
            "dst": dst,
            "dst_port": dst_port,
            "dpid": dpid,
            "pkg": pkg,
            "in_port": in_port
        })

    def cal_update(self, src, dst, old, new):
        n_buf = new[0]
        match = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP,
                                ipv4_dst=ipAdd(dst))
        actions_buf = [
            parser.OFPActionOutput(port=ofproto.OFPP_CONTROLLER,
                                   max_len=ofproto.OFPCML_NO_BUFFER)
        ]
        dp_buf = self.datapaths[n_buf]
        self.add_flow(datapath=dp_buf,
                      priority=233,
                      match=match,
                      actions=actions)

        match_remove = parser.OFPMatch(eth_type=ether_types.ETH_TYPEk_IP,
                                       ipv4_dst=ipAdd(dst))
        for node in old:
            dp = self.datapaths[node]
            self.remove_flow(datapath=dp, priority=1, match=match)

        new_reverse = new[::-1]
        for node in new:
            dp = self.datapaths[node]
            actions = [
                parser.OFPActionOutput(port=ofproto.OFPP_CONTROLLER,
                                       max_len=ofproto.OFPCML_NO_BUFFER)
            ]
            self.add_flow(datapath=dp,
                          priority=1,
                          match=match,
                          actions=actions)

    #drop packet for udp 68
    def disable_dhcp(self, datapath):
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser
        match_dhcp = parser.OFPMatch(eth_type=ether_types.ETH_TYPE_IP,
                                     ip_proto=in_proto.IPPROTO_UDP,
                                     udp_src=68,
                                     udp_dst=67)
        instruction = [
            parser.OFPInstructionActions(ofproto.OFPIT_CLEAR_ACTIONS, [])
        ]
        mod = parser.OFPFlowMod(datapath,
                                priority=1,
                                match=match_dhcp,
                                instructions=instruction)
        datapath.send_msg(mod)

    def remove_flow(self, datapath, priority, match):
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser

        mod = parser.OFPFlowMod(datapath=datapath,
                                command=ofproto.OFPFC_DELETE,
                                out_port=ofproto.OFPP_ANY,
                                out_group=ofproto.OFPG_ANY,
                                match=match,
                                priority=priority)
        datapath.send_msg(mod)

    def send_back(self, pkg, dpid, in_port):
        datapath = self.datapaths[dpid]
        parser = datapath.ofproto_parser
        ofproto = datapath.ofproto
        actions = [parser.OFPActionOutput(port=ofproto.OFPP_TABLE)]
        req = parser.OFPPacketOut(datapath,
                                  in_port=in_port,
                                  buffer_id=ofproto.OFP_NO_BUFFER,
                                  actions=actions,
                                  data=pkg)
        datapath.send_msg(req)
        # print("sent back")

    @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
    def _features_handler(self, ev):
        msg = ev.msg
        datapath = msg.datapath
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser
        dpid = datapath.id
        self.datapaths[dpid] = datapath
        print("haha" + str(self.datapaths))
        actions = [
            parser.OFPActionOutput(port=ofproto.OFPP_CONTROLLER,
                                   max_len=ofproto.OFPCML_NO_BUFFER)
        ]
        inst = [
            parser.OFPInstructionActions(type_=ofproto.OFPIT_APPLY_ACTIONS,
                                         actions=actions)
        ]
        mod = parser.OFPFlowMod(datapath=datapath,
                                priority=0,
                                match=parser.OFPMatch(),
                                instructions=inst)
        datapath.send_msg(mod)

    def save_to_buffer(self, flow_id, pkg):
        if (pkg.datapath.id):
            flow_id = pkg.datapath.id
        if (not self.buffer.has_key(flow_id)):
            self.buffer[flow_id] = []
        print(flow_id)

        self.buffer[flow_id].append(pkg)
        print(len(self.buffer[flow_id]))

    def identify_pkg(self, pkt_in):
        (src, dst, dst_port, dpid, pkg) = ("", "", None, None, None)
        dpid = pkt_in.datapath.id
        pkg = packet.Packet(pkt_in.data)
        # pkg = pkt_in.data
        ipkg = pkg.get_protocol(ipv4.ipv4)
        if (ipkg):
            src = ipkg.src
            dst = ipkg.dst
            upkg = pkg.get_protocol(udp.udp) or pkg.get_protocol(
                tcp.tcp)  #udp and tcp are not seperated
            if (upkg):
                dst_port = upkg.dst_port
        # print(ipkg)
        return src, dst, dst_port, dpid, pkg

    def add_flow(self, datapath, priority, match):
        pass

    def start_update(self):
        old = [0, 1, 2]
        new = [0, 3, 2]
        src = 0
        dst = 2
        self.cal_update(src, dst, old, new)
Ejemplo n.º 21
0
 def test_singleton(self):
     manager_a = BufferManager()
     manager_b = BufferManager()
     self.assertTrue(manager_a is manager_b)
Ejemplo n.º 22
0
 def test_detach(self):
     manager = BufferManager()
     manager.get_file_block('foo', 0)
     manager.detach_from_file('foo')
     self.assertFalse(manager._blocks)
Ejemplo n.º 23
0
 def __init__(self, Node, index_file_path, node, key_position):
     self.Node = Node
     self.index_file_path = index_file_path
     self.node = node
     self.key_position = key_position
     self.manager = BufferManager()
Ejemplo n.º 24
0
class ObjectManager:
    def __init__(self):
        self.console = None

        self.__objTemplatesWatingVertices_l = []
        self.__objTemplatesWatingTextrue_l = []

        self.__objectTemplates_d = {}

        self.bufferManager = BufferManager()
        self.texMan = TextureManager()

        self.__toHereQueue = Queue()
        self.__toProcessQueue = Queue()
        self.objectLoader = ObjectLoader(self.__toHereQueue, self.__toProcessQueue)
        self.__haveThingsToGetFromProcess_i = 0

    def update(self) -> None:
        self.__fillObjectTemplateWithTexture()

        if self.__haveThingsToGetFromProcess_i:
            self.__popObjectBlueprintFromProcess()

    def terminate(self):
        try:
            self.objectLoader.terminate()
        except AttributeError:
            pass

        try:
            self.texMan.terminate()
        except AssertionError:
            pass

    def runProcesses(self):
        self.objectLoader.start()

    def requestObject(self, objInitInfo:"ObjectInitInfo") -> Optional[ds.Object]:
        self.update()

        if objInitInfo.objTemplateName_s in self.__objectTemplates_d.keys():
            if self.__objectTemplates_d[objInitInfo.objTemplateName_s] is None:
                return None
            else:
                objTemplate = self.__objectTemplates_d[objInitInfo.objTemplateName_s]
                obj = objTemplate.makeObject(
                    objInitInfo.name_s, objInitInfo.level, objInitInfo.initPos_t, objInitInfo.static_b,
                    objInitInfo.colliders_l, objInitInfo.colGroupTargets_l
                )
                self.console.appendLogs("Instancing object: {} ({})".format(objTemplate.templateName_s, objTemplate.refCount_i))
                return obj
        else:
            print("Object never created:", objInitInfo.objTemplateName_s)
            raise FileNotFoundError

    def dumpObjects(self, objTempNames_l:list) -> None:
        for objTempName_s in objTempNames_l:
            objTemplate = self.__objectTemplates_d[objTempName_s]
            objTemplate.refCount_i -= 1
            tempName_s = objTemplate.templateName_s

            self.console.appendLogs("Deleted an object instance: '{}' ({})".format(objTemplate.templateName_s, objTemplate.refCount_i))

            if objTemplate.refCount_i <= 0:
                trashes_l = objTemplate.terminate()
                for trash_t in trashes_l:
                    self.bufferManager.dumpVertexArray(trash_t[0])
                    self.bufferManager.dumpBuffer(trash_t[1])
                    self.bufferManager.dumpBuffer(trash_t[2])
                    self.bufferManager.dumpBuffer(trash_t[3])
                    self.texMan.dump(trash_t[4])

                del self.__objectTemplates_d[objTempName_s], objTemplate
                self.console.appendLogs( "Deleted ObjectTemplate: '{}'".format(tempName_s) )

    def giveObjectDefineBlueprint(self, objBprint:ObjectDefineBlueprint) -> None:
        if objBprint.name_s in self.__objectTemplates_d.keys():
            if self.__objectTemplates_d[objBprint.name_s] is not None:
                raise FileExistsError(objBprint.name_s)
        else:
            self.__objectTemplates_d[objBprint.name_s] = None

        objTemp = ObjectTemplate(
            objBprint.name_s, [self.__makeRendererFromBprint(xx) for xx in objBprint.rendererBlueprints_l],
            objBprint.colliders_l, objBprint.boundingBox
        )
        self.console.appendLogs("Created an object template: '{}'".format(objTemp.templateName_s))

        self.__objTemplatesWatingTextrue_l.append(objTemp)

    def giveObjectObjStaticBlueprint(self, objBprint:ObjectObjStaticBlueprint) -> None:
        if objBprint.objFileName_s in self.__objectTemplates_d.keys():
            return

        self.__objectTemplates_d[objBprint.objFileName_s] = None
        a = self.__findObjMtlDir(objBprint.objFileName_s)
        if a is None:
            self.console.appendLogs( "Failed to obj file: '{}'".format(objBprint.objFileName_s) )
        else:
            self.__toProcessQueue.put( (objBprint.objFileName_s, *a) )
            self.__haveThingsToGetFromProcess_i += 1

    def __fillObjectTemplateWithTexture(self):
        for x in range(len(self.__objTemplatesWatingTextrue_l) - 1, -1, -1):
            failedCount_i = 0

            objTemplate = self.__objTemplatesWatingTextrue_l[x]  # ObjectTemplate
            for y in range(len(objTemplate.renderers_l) - 1, -1, -1):
                aRenderer = objTemplate.renderers_l[y]
                if aRenderer.diffuseMap_i is None:
                    result = self.texMan.request(aRenderer.diffuseMapName_s)
                    if result is None:
                        failedCount_i += 1
                        continue
                    else:
                        aRenderer.diffuseMap_i = result
                        continue
                else:
                    pass

            if failedCount_i <= 0:
                self.__objectTemplates_d[objTemplate.templateName_s] = objTemplate
                del self.__objTemplatesWatingTextrue_l[x]

    def __popObjectBlueprintFromProcess(self):
        try:
            objBprint = self.__toHereQueue.get_nowait()
        except Empty:
            return None
        else:
            self.giveObjectDefineBlueprint(objBprint)

    def __makeRendererFromBprint(self, renBprint:RendererBlueprint) -> ds.Renderer:
        newRenderer = ds.Renderer(renBprint.name_s, renBprint.initPos_t, renBprint.static_b);
        vramUsage_i = 0;

        ######## VAO ########

        newRenderer.vao_i = self.bufferManager.requestVertexArray();
        gl.glBindVertexArray(newRenderer.vao_i);

        ######## Vertex buffer ########

        size = renBprint.vertexNdarray.size * renBprint.vertexNdarray.itemsize;
        newRenderer.vertexSize_i = renBprint.vertexNdarray.size // 3;
        vramUsage_i += size;

        newRenderer.vertexArrayBuffer_i = self.bufferManager.requestBuffer();
        gl.glBindBuffer(gl.GL_ARRAY_BUFFER, newRenderer.vertexArrayBuffer_i);
        gl.glBufferData(gl.GL_ARRAY_BUFFER, size, renBprint.vertexNdarray, gl.GL_STATIC_DRAW);

        gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None)
        gl.glEnableVertexAttribArray(0);

        ######## Texture coord buffer ########

        size = renBprint.texCoordNdarray.size * renBprint.texCoordNdarray.itemsize;
        vramUsage_i += size;

        newRenderer.textureArrayBuffer_i = self.bufferManager.requestBuffer();
        gl.glBindBuffer(gl.GL_ARRAY_BUFFER, newRenderer.textureArrayBuffer_i);  # Bind the buffer.
        gl.glBufferData(gl.GL_ARRAY_BUFFER, size, renBprint.texCoordNdarray, gl.GL_STATIC_DRAW);  # Allocate memory.

        gl.glVertexAttribPointer(1, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None);  # Defines vertex attributes. What are those?
        gl.glEnableVertexAttribArray(1);

        ########  ########

        size = renBprint.normalNdarray.size * renBprint.normalNdarray.itemsize;
        vramUsage_i += size;

        newRenderer.normalArrayBuffe_i = self.bufferManager.requestBuffer();
        gl.glBindBuffer(gl.GL_ARRAY_BUFFER, newRenderer.normalArrayBuffe_i);  # Bind the buffer.
        gl.glBufferData(gl.GL_ARRAY_BUFFER, size, renBprint.normalNdarray, gl.GL_STATIC_DRAW);  # Allocate memory.

        gl.glVertexAttribPointer(2, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None);  # Defines vertex attributes. What are those?
        gl.glEnableVertexAttribArray(2);

        ########  ########

        newRenderer.diffuseMapName_s = renBprint.textureDir_s;

        newRenderer.textureVerNum_f = renBprint.textureVerNum_f;
        newRenderer.textureHorNum_f = renBprint.textureHorNum_f;
        newRenderer.shininess_f = renBprint.shininess_f;
        newRenderer.specularStrength_f = renBprint.specularStrength_f;
        newRenderer.vramUsage_i = vramUsage_i;

        return newRenderer;

    @staticmethod
    def __findObjMtlDir(objFileName_s:str) -> Optional[ Tuple[str, str] ]:
        objFileDir_s = ""
        mtlFileDir_s = ""

        for folderDir_s, _, files_l in os.walk(const.MODEL_DIR_s):
            for file_s in files_l:
                if objFileDir_s and mtlFileDir_s:
                    break

                if file_s == objFileName_s + ".obj":
                    if objFileDir_s:
                        raise FileExistsError("There are multiple '{}' files.".format(objFileName_s))
                    else:
                        objFileDir_s = "{}\\{}".format(folderDir_s, file_s)
                        continue
                elif file_s == objFileName_s + ".mtl":
                    if mtlFileDir_s:
                        raise FileExistsError("There are multiple '{}' files.".format(mtlFileDir_s))
                    else:
                        mtlFileDir_s = "{}\\{}".format(folderDir_s, file_s)
                        continue

        if objFileDir_s and mtlFileDir_s:
            return objFileDir_s, mtlFileDir_s
        else:
            return None
Ejemplo n.º 25
0
 def quit():
     buffer_manager = BufferManager()
     buffer_manager.flush_all()
Ejemplo n.º 26
0
class Record:
    # The format of header should be the same for all records files.
    header_format = '<ii'  # will be confirmed by RecordManager
    header_struct = Struct(header_format)

    def __init__(self, file_path, fmt):
        self.buffer_manager = BufferManager()
        self.filename = file_path
        # Each record in file has 2 extra info: next's record_off and valid bit
        self.record_struct = Struct(fmt + 'ci')
        self.first_free_rec, self.rec_tail = self._parse_header()

    def insert(self, attributes):
        """Insert the given record"""
        record_info = convert_str_to_bytes(attributes) + (
            b'1', -1)  # valid bit, next free space
        self.first_free_rec, self.rec_tail = self._parse_header()
        if self.first_free_rec >= 0:  # There are space in free list
            first_free_blk, local_offset = self._calc(self.first_free_rec)
            block = self.buffer_manager.get_file_block(self.filename,
                                                       first_free_blk)
            with pin(block):
                data = block.read()
                records = self._parse_block_data(data, first_free_blk)
                next_free_rec = records[self.first_free_rec][-1]
                records[local_offset] = record_info
                new_data = self._generate_new_data(records, first_free_blk)
                block.write(new_data)
            position = self.first_free_rec
            self.first_free_rec = next_free_rec
        else:  # No space in free list, append the new record to the end of file
            self.rec_tail += 1
            block_offset, local_offset = self._calc(self.rec_tail)
            block = self.buffer_manager.get_file_block(self.filename,
                                                       block_offset)
            with pin(block):
                data = block.read()
                records = self._parse_block_data(data, block_offset)
                records.append(record_info)
                new_data = self._generate_new_data(records, block_offset)
                block.write(new_data)
            position = self.rec_tail
        self._update_header()
        return position

    def remove(self, record_offset):
        """Remove the record at specified position and update the free list"""
        self.first_free_rec, self.rec_tail = self._parse_header()
        block_offset, local_offset = self._calc(record_offset)
        block = self.buffer_manager.get_file_block(self.filename, block_offset)
        with pin(block):
            data = block.read()
            records = self._parse_block_data(data, block_offset)
            try:
                records[local_offset][-1]
            except IndexError:
                raise IndexError('The offset points to an empty space')
            if records[local_offset][-2] == b'0':
                raise RuntimeError('Cannot remove an empty record')
            records[local_offset][
                -1] = self.first_free_rec  # A positive number, putting this position into free list
            records[local_offset][-2] = b'0'
            self.first_free_rec = record_offset  # update the head of free list
            new_data = self._generate_new_data(records, block_offset)
            block.write(new_data)
        self._update_header()

    def modify(self, attributes, record_offset):
        """Modify the record at specified offset"""
        block_offset, local_offset = self._calc(record_offset)
        block = self.buffer_manager.get_file_block(self.filename, block_offset)
        record_info = convert_str_to_bytes(attributes) + (
            b'1', -1)  # Updated record must be real
        with pin(block):
            data = block.read()
            records = self._parse_block_data(data, block_offset)
            if records[local_offset][-2] == b'0':
                raise RuntimeError('Cannot update an empty record')
            records[local_offset] = record_info
            new_data = self._generate_new_data(records, block_offset)
            block.write(new_data)

    def read(self, record_offset):
        """ Return the record at the corresponding position """
        block_offset, local_offset = self._calc(record_offset)
        block = self.buffer_manager.get_file_block(self.filename, block_offset)
        with pin(block):
            data = block.read()
            records = self._parse_block_data(data, block_offset)
            if records[local_offset][-2] == b'0':
                raise RuntimeError('Cannot read an empty record')
        return convert_bytes_to_str(tuple(records[local_offset][:-2]))

    def scanning_select(self, conditions):
        # condition should be a dict: { attribute offset : {operator : value } }
        total_blk = self._calc(self.rec_tail)[0] + 1
        result_set = []
        for block_offset in range(total_blk):
            block = self.buffer_manager.get_file_block(self.filename,
                                                       block_offset)
            records = self._parse_block_data(block.read(), block_offset)
            result_set += tuple([
                convert_bytes_to_str(record[:-2]) for record in records
                if self._check_condition(record, conditions) is True
            ])
        return result_set

    def scanning_delete(self, conditions):
        total_blk = self._calc(self.rec_tail)[0] + 1
        record_offset = 0
        for block_offset in range(total_blk):
            block = self.buffer_manager.get_file_block(self.filename,
                                                       block_offset)
            records = self._parse_block_data(block.read(), block_offset)
            for i, record in enumerate(records):
                if self._check_condition(convert_bytes_to_str(record),
                                         conditions):
                    records[i][-2] = b'0'
                    records[i][-1] = self.first_free_rec
                    self.first_free_rec = record_offset
                record_offset += 1
            block.write(self._generate_new_data(records, block_offset))
        self._update_header()

    def scanning_update(self, conditions, attributes):
        # The file header won't change when updating
        total_blk = self._calc(self.rec_tail)[0] + 1
        new_record = convert_str_to_bytes(attributes) + (b'1', -1)
        for block_offset in range(total_blk):
            block = self.buffer_manager.get_file_block(self.filename,
                                                       block_offset)
            records = self._parse_block_data(block.read(), block_offset)
            for i, record in enumerate(records):
                if self._check_condition(convert_bytes_to_str(record),
                                         conditions):
                    records[i] = new_record
            block.write(self._generate_new_data(records, block_offset))

    def _calc(self, record_offset):
        rec_per_blk = BufferManager.block_size // self.record_struct.size
        rec_first_blk = (BufferManager.block_size -
                         self.header_struct.size) // self.record_struct.size
        if record_offset < rec_first_blk:  # in 1st block
            return 0, record_offset
        else:  # not in 1st block
            block_offset = (record_offset - rec_first_blk) // rec_per_blk + 1
            local_offset = record_offset - rec_first_blk - (block_offset -
                                                            1) * rec_per_blk
            return block_offset, local_offset

    @staticmethod
    def _check_condition(record, conditions):
        if record[
                -2] == b'0':  # check the valid bit, return false when meet empty record
            return False
        str_record = convert_bytes_to_str(record[:-2])
        for position, condition in conditions.items():
            value = str_record[position]
            for operator_type, value_restriction in condition.items():
                if operator_type == '=':
                    if value != value_restriction:
                        return False
                elif operator_type == '>':
                    if value <= value_restriction:
                        return False
                elif operator_type == '<':
                    if value >= value_restriction:
                        return False
        return True

    def _generate_new_data(self, records, blk_offset):
        if blk_offset == 0:
            data = bytearray(self.header_struct.size)
        else:
            data = bytearray()
        for r in records:
            data += self.record_struct.pack(*r)
        return data

    def _parse_block_data(self, data, blk_offset):
        upper_bound = len(data)
        if (upper_bound -
                self.header_struct.size) % self.record_struct.size != 0:
            upper_bound -= self.record_struct.size
        if blk_offset == 0:  # is the first block, need to consider the header
            lower_bound = self.header_struct.size
        else:  # not the first block, all data are records
            lower_bound = 0
        records = [
            list(self.record_struct.unpack_from(data, offset)) for offset in
            range(lower_bound, upper_bound, self.record_struct.size)
        ]
        return records

    def _parse_header(self):
        # Parse the file header, refresh corresponding info
        # and return the info with a tuple
        block = self.buffer_manager.get_file_block(self.filename,
                                                   0)  # Get the first block
        with pin(block):
            data = block.read()
            header_info = self.header_struct.unpack_from(data, 0)
        return header_info

    def _update_header(self):
        # Update the file header after modifying the records
        block = self.buffer_manager.get_file_block(self.filename, 0)
        with pin(block):
            data = block.read()
            header_info = (self.first_free_rec, self.rec_tail)
            data[:self.header_struct.size] = self.header_struct.pack(
                *header_info)
            block.write(data)
Ejemplo n.º 27
0
from buffer_manager import BufferManager

manager = BufferManager()
Ejemplo n.º 28
0
 def __init__(self, file_path, fmt):
     self.buffer_manager = BufferManager()
     self.filename = file_path
     # Each record in file has 2 extra info: next's record_off and valid bit
     self.record_struct = Struct(fmt + 'ci')
     self.first_free_rec, self.rec_tail = self._parse_header()