Example #1
0
 def _run_repeated(self, args):
     response = None
     while response is None:
         response = self.blocks[self._block_id(args)].run_command(args)
         response = self._handle_redirect(args, response)
     self.redo_times = 0
     if response[0] != b('!ok'):
         raise KeyError(bytes_to_str(response[0]))
     return response
Example #2
0
 def add_blocks(self, response, args):
     if self._block_id(args) >= len(self.blocks) - 1:
         if self.auto_scale:
             block_ids = [
                 bytes_to_str(j) for j in response[1].split(b('!'))
             ]
             chain = ReplicaChain(block_ids, 0, 0,
                                  rpc_storage_mode.rpc_in_memory)
             self.blocks.append(
                 ReplicaChainClient(self.fs, self.path, self.client_cache,
                                    chain, QueueOps.op_types))
         else:
             raise ValueError
Example #3
0
 def _handle_redirect(self, args, response):
     while b(response[0]) == b('!exporting'):
         args_copy = copy.deepcopy(args)
         if args[0] == b("update") or args[0] == b("upsert"):
             args_copy += [response[2], response[3]]
         block_ids = [bytes_to_str(x) for x in response[1].split(b('!'))]
         chain = ReplicaChain(block_ids, 0, 0,
                              rpc_storage_mode.rpc_in_memory)
         while True:
             response = ReplicaChainClient(
                 self.fs, self.path, self.client_cache, chain,
                 HashTableOps.op_types).run_command_redirected(args_copy)
             if b(response[0]) != b("!redo"):
                 break
     if b(response[0]) == b('!block_moved'):
         self._refresh()
         return None
     if b(response[0]) == b('!full'):
         time.sleep(0.001 * math.pow(2, self.redo_times))
         self.redo_times += 1
         return None
     if b(response[0]) == b("!redo"):
         return None
     return response
Example #4
0
    def write_ls(self, data):
        file_size = (self.last_partition + 1) * self.block_size
        num_chain_needed = 0
        temp_last_offset = -1
        if self.cur_partition * self.block_size + self.cur_offset > file_size:
            num_chain_needed = int(self.cur_partition - self.last_partition)
            file_size = (self.cur_partition + 1) * self.block_size
            remain_size = file_size - self.cur_partition * self.block_size - self.cur_offset
            num_chain_needed += int(
                (len(data) - remain_size) / self.block_size +
                ((len(data) - remain_size) % self.block_size != 0))
        else:
            remain_size = file_size - self.cur_partition * self.block_size - self.cur_offset
            if remain_size < len(data):
                num_chain_needed = int(
                    (len(data) - remain_size) / self.block_size +
                    ((len(data) - remain_size) % self.block_size != 0))

        if num_chain_needed and not self.auto_scale:
            return -1
        # First allocate new blocks if needed
        while num_chain_needed != 0:
            _return = self.blocks[self.last_partition].run_command([
                FileOps.add_blocks,
                b(str(self.last_partition)),
                b(str(num_chain_needed))
            ])
            if _return[0] == b("!block_allocated"):
                self.last_partition += num_chain_needed
                for i in range(num_chain_needed):
                    self.cache.append(
                        FileCache(max_length=self.cache_size,
                                  block_size=self.cache_block_size,
                                  prefetch_block_num=self.prefetch_size))
                temp_last_offset = self.last_offset
                self.last_offset = 0
                num_chain_needed = 0
                try:
                    for x in _return[1:]:
                        block_ids = [bytes_to_str(j) for j in x.split(b('!'))]
                        chain = ReplicaChain(block_ids, 0, 0,
                                             rpc_storage_mode.rpc_in_memory)
                        self.blocks.append(
                            ReplicaChainClient(self.fs, self.path,
                                               self.client_cache, chain,
                                               FileOps.op_types))
                except:
                    return -1
        if self.block_size == self.cur_offset:
            self.cur_offset = 0
            self.cur_partition += 1
        # Parallel write
        remaining_data = len(data)
        start_partition = self._block_id()
        count = 0
        while remaining_data > 0:
            count += 1
            data_to_write = data[
                len(data) - remaining_data:len(data) - remaining_data + min(
                    self.cache[self.cur_partition].block_size -
                    (self.cur_offset %
                     self.cache[self.cur_partition].block_size
                     ), remaining_data, self.block_size - self.cur_offset)]
            if temp_last_offset >= 0:
                self.blocks[self._block_id()].send_command([
                    FileOps.write, data_to_write,
                    b(str(self.cur_offset)),
                    b(str(self.cache[self.cur_partition].block_size)),
                    b(str(temp_last_offset))
                ])
            else:
                self.blocks[self._block_id()].send_command([
                    FileOps.write, data_to_write,
                    b(str(self.cur_offset)),
                    b(str(self.cache[self.cur_partition].block_size)),
                    b(str(self.last_offset))
                ])
            resp = self.blocks[self.cur_partition].recv_response()
            self.cache[self.cur_partition].miss_handling(
                self.cur_offset, resp[-1])
            remaining_data -= len(data_to_write)
            self.cur_offset += len(data_to_write)
            if self.last_offset < self.cur_offset and self.cur_partition == self.last_partition:
                self.last_offset = self.cur_offset
            if self.cur_offset >= self.block_size and self.cur_partition != self.last_partition:
                self.cur_offset = 0
                self.cur_partition += 1
                if self.last_partition < self.cur_partition:
                    self.last_partition = self.cur_partition
                    self.last_offset = self.cur_offset

        return len(data)
Example #5
0
 def out_rate(self):
     return float(bytes_to_str(self._run_repeated([QueueOps.out_rate])[1]))
Example #6
0
    def write(self, pos, data_, logical_streams):
        file_size = (self.last_partition + 1) * self.block_size
        num_chain_needed = 0

        data = ""
        for ls in logical_streams:
            data += ls
        data += data_

        if self.cur_partition * self.block_size + self.cur_offset > file_size:
            num_chain_needed = int(self.cur_partition - self.last_partition)
            file_size = (self.cur_partition + 1) * self.block_size
            remain_size = file_size - self.cur_partition * self.block_size - self.cur_offset
            num_chain_needed += int(
                (len(data) - remain_size) / self.block_size +
                ((len(data) - remain_size) % self.block_size != 0))
        else:
            remain_size = file_size - self.cur_partition * self.block_size - self.cur_offset
            if remain_size < len(data):
                num_chain_needed = int(
                    (len(data) - remain_size) / self.block_size +
                    ((len(data) - remain_size) % self.block_size != 0))

        if num_chain_needed and not self.auto_scale:
            return -1
        # First allocate new blocks if needed
        while num_chain_needed != 0:
            _return = self.blocks[self.last_partition].run_command([
                SharedLogOps.add_blocks,
                b(str(self.last_partition)),
                b(str(num_chain_needed))
            ])
            if _return[0] == b("!block_allocated"):
                self.last_partition += num_chain_needed
                self.last_offset = 0
                num_chain_needed = 0
                try:
                    for x in _return[1:]:
                        block_ids = [bytes_to_str(j) for j in x.split(b('!'))]
                        chain = ReplicaChain(block_ids, 0, 0,
                                             rpc_storage_mode.rpc_in_memory)
                        self.blocks.append(
                            ReplicaChainClient(self.fs, self.path,
                                               self.client_cache, chain,
                                               SharedLogOps.op_types))
                except:
                    return -1
        if self.block_size == self.cur_offset:
            self.cur_offset = 0
            self.cur_partition += 1
        # Parallel write
        remaining_data = len(data)
        start_partition = self._block_id()
        count = 0
        while remaining_data > 0:
            count += 1
            if len(data) > self.block_size - self.cur_offset:
                self.cur_offset = 0
                self.cur_partition += 1
                if self.last_partition < self.cur_partition:
                    self.last_partition = self.cur_partition
                    self.last_offset = self.cur_offset
            arg_list = [SharedLogOps.write, b(str(pos)), data_]
            arg_list += logical_streams

            self.blocks[self._block_id()].send_command(arg_list)
            remaining_data -= len(data)
            self.cur_offset += len(data)
            if self.last_offset < self.cur_offset and self.cur_partition == self.last_partition:
                self.last_offset = self.cur_offset

        for i in range(0, count):
            self.blocks[start_partition + i].recv_response()

        return len(data)