def run(self): self.stopped = False logger.info('Thread started!') while not self.stopped: try: neighbours = self.operator.get_neighbours(NT_SUPERIOR) if not neighbours: time.sleep(INIT_DHT_WAIT_NEIGHBOUR_TIMEOUT) continue for neighbour in neighbours: logger.debug('Checking range table at %s'%neighbour) mod_index = self.operator.ranges_table.get_mod_index() params = {'mod_index': mod_index} packet_obj = FabnetPacketRequest(method='CheckHashRangeTable', sender=self.operator.self_address, parameters=params) rcode, rmsg = self.operator.call_node(neighbour, packet_obj) for i in xrange(CHECK_HASH_TABLE_TIMEOUT): if self.stopped: break time.sleep(1) except Exception, err: logger.error(str(err))
def process(self, packet): """In this method should be implemented logic of processing reuqest packet from sender node @param packet - object of FabnetPacketRequest class @return object of FabnetPacketResponse or None for disabling packet response to sender """ start_key = packet.parameters.get('start_key', None) if start_key is None: raise Exception('start_key is not found in SplitRangeRequest packet') end_key = packet.parameters.get('end_key', None) if end_key is None: raise Exception('end_key is not found in SplitRangeRequest packet') dht_range = self.operator.get_dht_range() subranges = dht_range.get_subranges() if subranges: return FabnetPacketResponse(ret_code=RC_ERROR, ret_message='Already splitting') ret_range, new_range = dht_range.split_range(start_key, end_key) range_size = ret_range.get_range_size() logger.debug('Range is splitted for %s. Subrange size: %s'%(packet.sender, range_size)) return FabnetPacketResponse(ret_parameters={'range_size': range_size})
def process(self, packet): """In this method should be implemented logic of processing reuqest packet from sender node @param packet - object of FabnetPacketRequest class @return object of FabnetPacketResponse or None for disabling packet response to sender """ key = packet.parameters.get('key', None) if key is not None: self._validate_key(key) replica_count = int(packet.parameters.get('replica_count', MIN_REPLICA_COUNT)) if replica_count < MIN_REPLICA_COUNT: return FabnetPacketResponse(ret_code=RC_ERROR, ret_message='Minimum replica count is equal to %s!'%MIN_REPLICA_COUNT) is_replica = False keys = KeyUtils.generate_new_keys(self.operator.node_name, replica_count, prime_key=key) ret_keys = [] for key in keys: range_obj = self.operator.ranges_table.find(long(key, 16)) if not range_obj: logger.debug('[PutKeysInfoOperation] Internal error: No hash range found for key=%s!'%key) else: ret_keys.append((key, is_replica, range_obj.node_address)) is_replica = True return FabnetPacketResponse(ret_parameters={'keys_info': ret_keys})
def process(self, packet): """In this method should be implemented logic of processing reuqest packet from sender node @param packet - object of FabnetPacketRequest class @return object of FabnetPacketResponse or None for disabling packet response to sender """ dht_range = self.operator.get_dht_range() subranges = dht_range.get_subranges() if not subranges: return FabnetPacketResponse(ret_code=RC_ERROR, ret_message="Range is not splitted!") ret_range, new_range = subranges try: node_address = packet.sender logger.debug("Starting subrange data transfering to %s" % node_address) for key, data in ret_range.iter_range(): checksum = hashlib.sha1(data).hexdigest() params = {"key": key, "checksum": checksum} resp = self._init_operation(node_address, "PutDataBlock", params, binary_data=data, sync=True) if resp.ret_code: raise Exception( "Init PutDataBlock operation on %s error. Details: %s" % (node_address, resp.ret_message) ) self.operator.update_dht_range(new_range) except Exception, err: logger.error("GetRangeDataRequestOperation error: %s" % err) dht_range.join_subranges() return FabnetPacketResponse(ret_code=RC_ERROR, ret_message="Send range data failed: %s" % err)
def _process_replicas(self): dht_range = self.operator.get_dht_range() for digest, data, file_path in dht_range.iter_replicas(): logger.info('Processing replica %s'%digest) if self._put_data(digest, data, is_replica=True): logger.debug('Removing %s from local replicas'%digest) os.remove(file_path) else: logger.debug('data block with key=%s is send from reservation range'%digest)
def process(self, packet, role=None): """process request fabnet packet @param packet - object of FabnetPacketRequest class @param role - requestor role (None for disable auth) """ try: if packet.method == KEEP_ALIVE_METHOD: return self._process_keep_alive(packet) inserted = self.msg_container.put_safe(packet.message_id, {'operation': packet.method, 'sender': packet.sender, 'responses_count': 0, 'datetime': datetime.now()}) if not inserted: #this message is already processing/processed #logger.debug('packet is already processing/processed: %s'%packet) return operation_obj = self.__operations.get(packet.method, None) if operation_obj is None: raise OperException('Method "%s" does not implemented!'%packet.method) operation_obj.check_role(role) logger.debug('processing packet %s'%packet) if self.__stat is not None: self._lock() try: self.__stat[packet.method] += 1 finally: self._unlock() message_id = packet.message_id n_packet = operation_obj.before_resend(packet) if n_packet: n_packet.message_id = message_id n_packet.sync = False self._send_to_neighbours(n_packet) s_packet = operation_obj.process(packet) if s_packet: s_packet.message_id = packet.message_id s_packet.from_node = self.self_address return s_packet except Exception, err: err_packet = FabnetPacketResponse(from_node=self.self_address, message_id=packet.message_id, ret_code=1, ret_message= '[Operator.process] %s'%err) logger.write = logger.debug traceback.print_exc(file=logger) logger.error('[Operator.process] %s'%err) return err_packet
def process(self, packet): """In this method should be implemented logic of processing reuqest packet from sender node @param packet - object of FabnetPacketRequest class @return object of FabnetPacketResponse or None for disabling packet response to sender """ data = packet.binary_data key = packet.parameters.get('key', None) if key is not None: self._validate_key(key) checksum = packet.parameters.get('checksum', None) replica_count = int(packet.parameters.get('replica_count', MIN_REPLICA_COUNT)) wait_writes_count = int(packet.parameters.get('wait_writes_count', 1)) if checksum is None: return FabnetPacketResponse(ret_code=RC_ERROR, ret_message='Checksum does not found in request packet!') if wait_writes_count > replica_count: return FabnetPacketResponse(ret_code=RC_ERROR, ret_message='Cant waiting more replicas than saving!') if replica_count < MIN_REPLICA_COUNT: return FabnetPacketResponse(ret_code=RC_ERROR, ret_message='Minimum replica count is equal to %s!'%MIN_REPLICA_COUNT) data_block = DataBlock(data, checksum) data_block.validate() succ_count = 0 is_replica = False keys = KeyUtils.generate_new_keys(self.operator.node_name, replica_count, prime_key=key) data, checksum = data_block.pack(keys[0], replica_count) for key in keys: range_obj = self.operator.ranges_table.find(long(key, 16)) if not range_obj: logger.debug('[ClientPutOperation] Internal error: No hash range found for key=%s!'%key) else: params = {'key': key, 'checksum': checksum, 'is_replica': is_replica} if succ_count >= wait_writes_count: self._init_operation(range_obj.node_address, 'PutDataBlock', params, binary_data=data) else: resp = self._init_operation(range_obj.node_address, 'PutDataBlock', params, sync=True, binary_data=data) if resp.ret_code: logger.debug('[ClientPutOperation] PutDataBlock error from %s: %s'%(range_obj.node_address, resp.ret_message)) else: succ_count += 1 is_replica = True if wait_writes_count < succ_count: return FabnetPacketResponse(ret_code=RC_ERROR, ret_message='Writing data error!') return FabnetPacketResponse(ret_parameters={'key': keys[0]})
def run(self): logger.info('started') self.stopped = False while not self.stopped: try: #self._check_range_free_size() logger.debug('MonitorDHTRanges iteration...') self._process_reservation_range() self._process_replicas() except Exception, err: logger.error('[MonitorDHTRanges] %s'% err) finally:
def __spawn_work_threads(self): logger.debug('starting new work thread') self.__lock.acquire() if self.stopped: return try: if len(self.__threads) == self.max_count: return thread = FriWorker(self.queue, self.operator, self.keystorage, self.sessions) thread.setName('%s-FriWorkerThread#%i'%(self.workers_name, self.__threads_idx)) self.__threads_idx += 1 thread.start() self.__threads.append(thread) finally: self.__lock.release()
def _put_data(self, key, data, is_replica=False): k_range = self.operator.ranges_table.find(long(key, 16)) if not k_range: logger.debug('No range found for reservation key %s'%key) return False checksum = hashlib.sha1(data).hexdigest() params = {'key': key, 'checksum': checksum, 'is_replica': is_replica} req = FabnetPacketRequest(method='PutDataBlock', sender=self.operator.self_address,\ parameters=params, binary_data=data, sync=True) resp = self.operator.call_node(k_range.node_address, req, sync=True) if resp.ret_code != RC_OK: logger.error('PutDataBlock error on %s: %s'%(k_range.node_address, resp.ret_message)) return False return True
def process(self, packet): """In this method should be implemented logic of processing reuqest packet from sender node @param packet - object of FabnetPacketRequest class @return object of FabnetPacketResponse or None for disabling packet response to sender """ if self.operator.ranges_table.empty(): logger.debug('Received update for hash ranges table, but it is not initialized yet. Skip operation...') return append_lst = packet.parameters.get('append', []) rm_lst = packet.parameters.get('remove', []) rm_obj_list = [HashRange(r[0], r[1], r[2]) for r in rm_lst] ap_obj_list = [HashRange(a[0], a[1], a[2]) for a in append_lst] self._lock() try: self.operator.ranges_table.apply_changes(rm_obj_list, ap_obj_list) logger.debug('RM RANGE: %s'%', '.join([r.to_str() for r in rm_obj_list])) logger.debug('APP RANGE: %s'%', '.join([a.to_str() for a in ap_obj_list])) except Exception, err: logger.error('UpdateHashRangeTable error: %s'%err)
def process(self, packet): """In this method should be implemented logic of processing reuqest packet from sender node @param packet - object of FabnetPacketRequest class @return object of FabnetPacketResponse or None for disabling packet response to sender """ f_mod_index = packet.parameters.get('mod_index', None) if f_mod_index is None: raise Exception('Mod index parameter is expected for CheckHashRangeTable operation') c_mod_index = self.operator.ranges_table.get_mod_index() if c_mod_index == f_mod_index: return FabnetPacketResponse() logger.debug('f_mod_index=%s c_mod_index=%s'%(f_mod_index, c_mod_index)) if f_mod_index > c_mod_index: #self._get_ranges_table(packet.sender, c_mod_index) return FabnetPacketResponse() else: return FabnetPacketResponse(ret_code=RC_NEED_UPDATE, ret_parameters={'mod_index': c_mod_index})
def __stop_work_thread(self): self.__lock.acquire() if self.stopped: return try: for_delete = [] for i, thread in enumerate(self.__threads): if not thread.is_alive(): logger.debug('Worker %s is not alive! delete it...'%thread.getName()) for_delete.append(thread) for thr in for_delete: del self.__threads[self.__threads.index(thr)] if len(self.__threads) <= self.min_count: logger.debug('trying stopping worker but min threads count occured') return self.queue.put(STOP_THREAD_EVENT) finally: self.__lock.release() logger.debug('stopped one work thread')