def check_range_table(self): '''Check range table with with other DHT nodes If no neighbours found - return False ''' ranges_count = self.ranges_table.count() mod_index = self.ranges_table.get_mod_index() range_start = self.get_dht_range().get_start() range_end = self.get_dht_range().get_end() neighbour_range = self.ranges_table.find_next(range_start) if not neighbour_range: neighbour_range = self.ranges_table.get_first() neighbour = neighbour_range.node_address if neighbour == self.self_address: neighbours = self.get_neighbours(NT_SUPERIOR, self.OPTYPE) if not neighbours: return False neighbour = random.choice(neighbours) logger.debug('Checking range table at %s'%neighbour) params = {'mod_index': mod_index, 'ranges_count': ranges_count, \ 'range_start': range_start, 'range_end': range_end} packet_obj = FabnetPacketRequest(method='CheckHashRangeTable', sender=self.self_address, parameters=params) self.call_node(neighbour, packet_obj) return True
def run(self): logger.info('Thread started!') while not self.stopped.is_set(): dt = 0 try: t0 = datetime.now() logger.debug('Collecting %s nodes statistic...'%self.check_status) nodeaddrs = self.operator.get_nodes_list(self.check_status) for nodeaddr in nodeaddrs: logger.debug('Get statistic from %s'%nodeaddr) packet_obj = FabnetPacketRequest(method='NodeStatistic', sync=True) ret_packet = self.client.call_sync(nodeaddr, packet_obj) if self.check_status == UP and ret_packet.ret_code: logger.warning('Node with address %s does not response... Details: %s'%(nodeaddr, ret_packet)) self.operator.change_node_status(nodeaddr, DOWN) else: stat = json.dumps(ret_packet.ret_parameters) self.operator.update_node_stat(nodeaddr, stat) dt = total_seconds(datetime.now() - t0) logger.info('Nodes (with status=%s) stat is collected. Processed secs: %s'%(self.check_status, dt)) except Exception, err: logger.error(str(err)) finally:
def callback(self, packet, sender=None): """In this method should be implemented logic of processing response packet from requested node @param packet - object of FabnetPacketResponse class @param sender - address of sender node. If sender == None then current node is operation initiator @return object of FabnetPacketResponse that should be resended to current node requestor or None for disabling packet resending """ logger.debug('CheckHashRangeTable response from %s: %s %s'%(packet.from_node, packet.ret_code, packet.ret_message)) if self.operator.get_status() == DS_DESTROYING: return if packet.ret_code == RC_DONT_STARTED: self.operator.remove_node_range(packet.from_node) time.sleep(self.operator.get_config_value('WAIT_DHT_TABLE_UPDATE')) self.operator.check_near_range() elif packet.ret_code == RC_OK: self.operator.check_near_range() elif packet.ret_code == RC_ERROR: logger.error('CheckHashRangeTable failed on %s. Details: %s %s'%(packet.from_node, \ packet.ret_code, packet.ret_message)) elif packet.ret_code == RC_NEED_UPDATE: self._get_ranges_table(packet.from_node, packet.ret_parameters['mod_index'], \ packet.ret_parameters['ranges_count'], packet.ret_parameters.get('force', False))
def load(self, ranges_dump): self.__lock.acquire() try: is_old_ex = False if self.__ranges: is_old_ex = self.__ranges self.__ranges, self.__last_dm, self.__mod_index = pickle.loads(ranges_dump) logger.debug('HASH RANGES: %s'%'\n'.join([r.to_str() for r in self.__ranges])) if is_old_ex and len(is_old_ex) > 1: log_s = 'OLD(-)/NEW(+) HASHES IN TABLES:\n' for range_o in self.iter_table(): for old_range in is_old_ex: if old_range.to_str() == range_o.to_str(): break else: log_s += '+ %s\n' % range_o.to_str() for old_range in is_old_ex: for range_o in self.iter_table(): if old_range.to_str() == range_o.to_str(): break else: log_s += '- %s\n' % old_range.to_str() logger.info(log_s) self.__blocked.clear() if is_old_ex: return len(is_old_ex) return 0 finally: self.__lock.release()
def run(self): logger.info('started') while True: for i in xrange(Config.MONITOR_DHT_RANGES_TIMEOUT): if self.stopped.is_set(): break time.sleep(1) if self.stopped.is_set(): break try: logger.debug('MonitorDHTRanges iteration...') self._check_range_free_size() if self.stopped.is_set(): break self._process_reservation_range() if self.stopped.is_set(): break self._process_replicas() if self.stopped.is_set(): break except Exception, err: import traceback logger.write = logger.debug traceback.print_exc(file=logger) logger.error('[MonitorDHTRanges] %s'% err)
def run(self): logger.info('started') while True: for i in xrange(int(Config.MONITOR_DHT_RANGES_TIMEOUT)): if self.stopped.is_set(): break if self.interrupt.is_set(): self.interrupt.clear() break time.sleep(1) if self.stopped.is_set(): break if self.operator.status == DS_INITIALIZE: continue try: logger.debug('MonitorDHTRanges iteration...') self._process_foreign() if self.stopped.is_set(): break self._check_range_free_size() if self.stopped.is_set(): break except Exception, err: logger.write = logger.debug traceback.print_exc(file=logger) logger.error('[MonitorDHTRanges] %s'% err)
def send_subrange_data(self, node_address): dht_range = self.get_dht_range() subranges = dht_range.get_subranges() if not subranges: raise Exception('Range is not splitted!') ret_range, new_range = subranges try: logger.debug('Starting subrange data transfering to %s'% node_address) for key, data in ret_range.iter_range(): params = {'key': key, 'carefully_save': True} req = FabnetPacketRequest(method='PutDataBlock', \ sender=self.self_address, binary_data=data, sync=True, parameters=params) resp = self.call_node(node_address, req) if resp.ret_code: raise Exception('Init PutDataBlock operation on %s error. Details: %s'%(node_address, resp.ret_message)) new_range.save_range() self.update_dht_range(new_range) except Exception, err: logger.error('send_subrange_data error: %s'%err) dht_range.join_subranges() raise err
def _process_replicas(self): self.__full_nodes = [] dht_range = self.operator.get_dht_range() for digest, data, file_path in dht_range.iter_replicas(): if self.stopped.is_set(): break logger.info('Processing replica %s'%digest) if self._put_data(digest, data, is_replica=True): logger.debug('data block with key=%s is send from replicas range'%digest) os.unlink(file_path)
def process(self, packet): """In this method should be implemented logic of processing reuqest packet from sender node @param packet - object of FabnetPacketRequest class @return object of FabnetPacketResponse or None for disabling packet response to sender """ if self.operator.get_status() == DS_INITIALIZE: return FabnetPacketResponse(ret_code=RC_OK, ret_message='Node is not initialized yet!') f_mod_index = packet.parameters.get('mod_index', None) if f_mod_index is None: raise Exception('Mod index parameter is expected for CheckHashRangeTable operation') ranges_count = packet.parameters.get('ranges_count', None) if ranges_count is None: raise Exception('ranges_count parameter is expected for CheckHashRangeTable operation') range_start = packet.parameters.get('range_start', None) if range_start is None: raise Exception('range_start parameter is expected for CheckHashRangeTable operation') range_end = packet.parameters.get('range_end', None) if range_end is None: raise Exception('range_end parameter is expected for CheckHashRangeTable operation') c_mod_index, c_ranges_count, _ = self.operator.get_ranges_table_status() found_range = self._find_range(range_start, range_end, packet.sender) if not found_range: logger.debug('CheckHashRangeTable: sender range does not found in local hash table...') if ranges_count < c_ranges_count: return FabnetPacketResponse(ret_code=RC_NEED_UPDATE, \ ret_parameters={'mod_index': c_mod_index, 'ranges_count': c_ranges_count}) elif ranges_count == c_ranges_count and c_mod_index == f_mod_index: if packet.sender > self.self_address: return FabnetPacketResponse(ret_code=RC_NEED_UPDATE, \ ret_parameters={'mod_index': c_mod_index, 'ranges_count': c_ranges_count, 'force': True}) elif packet.sender < self.self_address: return FabnetPacketResponse(ret_code=RC_JUST_WAIT) logger.debug('CheckHashRangeTable: f_mod_index=%s c_mod_index=%s'%(f_mod_index, c_mod_index)) if f_mod_index >= c_mod_index: return FabnetPacketResponse() else: return FabnetPacketResponse(ret_code=RC_NEED_UPDATE, \ ret_parameters={'mod_index': c_mod_index, 'ranges_count': c_ranges_count})
def _process_foreign(self): self.__full_nodes = [] dht_range = self.operator.get_dht_range() cnt = 0 for digest, dbct, file_path in dht_range.iterator(foreign_only=True): cnt += 1 if self.stopped.is_set(): break logger.info('Processing foreign data block %s %s'%(digest, dbct)) if self._put_data(digest, file_path, dbct): logger.debug('data block with key=%s is send'%digest) os.remove(file_path) if cnt == 0: self.__changed_range = False
def run(self): logger.info('Check neighbours thread is started!') proc_dt = timedelta(0) while not self.stopped.is_set(): try: t0 = datetime.now() self.operator.check_neighbours() proc_dt = datetime.now() - t0 logger.debug('CheckNeighbours process time: %s'%proc_dt) except Exception, err: logger.write = logger.debug traceback.print_exc(file=logger) logger.error('[CheckNeighboursThread] %s'%err) finally:
def _put_data(self, key, path, dbct): k_range = self.operator.ranges_table.find(long(key, 16)) if not k_range: logger.debug('No range found for reservation key %s'%key) return False tmp = None if os.path.isdir(path): tmp = tempfile.NamedTemporaryFile(suffix='.zip') os.system('rm -f %s && cd %s && zip -r %s *'%(tmp.name, path, tmp.name)) path = tmp.name try: db = ThreadSafeDataBlock(path) if not db.try_block_for_read(): logger.info('DB %s is locked. skip it...'%path) return False if k_range.node_address in self.__full_nodes: logger.info('Node %s does not have free space. Skipping put data block...'%k_range.node_address) return False if k_range.node_address == self.operator.self_address: logger.info('Skip moving to local node') return False params = {'key': key, 'dbct': dbct, 'init_block': False, 'carefully_save': True} req = FabnetPacketRequest(method='PutDataBlock', sender=self.operator.self_address, \ parameters=params, binary_data=ThreadSafeDataBlock(path), sync=True) resp = self.operator.call_node(k_range.node_address, req) finally: if tmp: tmp.close() if resp.ret_code == RC_NO_FREE_SPACE: self.__full_nodes.append(k_range.node_address) return False if resp.ret_code not in (RC_OK, RC_OLD_DATA): logger.error('PutDataBlock error on %s: %s'%(k_range.node_address, resp.ret_message)) return False return True
def process(self, packet): """In this method should be implemented logic of processing reuqest packet from sender node @param packet - object of FabnetPacketRequest class @return object of FabnetPacketResponse or None for disabling packet response to sender """ if self.operator.get_status() == DS_DESTROYING: return _, icnt = self.operator.get_ranges_table_status() if icnt == 0: logger.debug('Received update for hash ranges table, but it is not initialized yet. Skip operation...') return append_lst = packet.parameters.get('append', []) rm_lst = packet.parameters.get('remove', []) rm_obj_list = [HashRange(r[0], r[1], r[2]) for r in rm_lst] ap_obj_list = [HashRange(a[0], a[1], a[2]) for a in append_lst] self._lock() try: self.operator.apply_ranges_table_changes(rm_obj_list, ap_obj_list) logger.debug('RM RANGE: %s'%', '.join([r.to_str() for r in rm_obj_list])) logger.debug('APP RANGE: %s'%', '.join([a.to_str() for a in ap_obj_list])) except Exception, err: logger.error('UpdateHashRangeTable error: %s'%err) if not packet.sender: self.operator.check_dht_range(False) #reinit=False
def check_session(self, sock_proc, session_id, send_allow=False): if not self._key_storage: if send_allow: sock_proc.send_packet(FabnetPacketResponse()) return None, None session = self.oper_manager.get_session(session_id) if session_id and session is None: logger.debug('Invalid session "%s"'%session_id) if session and not session.is_valid(): logger.debug('Session for "%s" is expired'%session.cn) session = None if session is None: data = str(uuid.uuid4()) cert_req_packet = FabnetPacketResponse(ret_code=RC_REQ_AUTH, ret_parameters={'data': data}) sock_proc.send_packet(cert_req_packet) cert_packet = sock_proc.recv_packet(allow_socket_close=False) certificate = cert_packet.parameters.get('certificate', None) if not certificate: raise InvalidCertificate('No client certificate found!') signed_data = cert_packet.parameters.get('signed_data', None) if not signed_data: raise InvalidCertificate('No signed data found!') cn, role = self._key_storage.verify_cert(certificate, signed_data, data) session_id = self.oper_manager.create_session(cn, role) req_packet = FabnetPacketResponse(ret_code=RC_OK, ret_parameters={'session_id': session_id}) sock_proc.send_packet(req_packet) return cn, role if send_allow: sock_proc.send_packet(FabnetPacketResponse()) return session.cn, session.role
def worker_routine(self, item): if len(item) != 2: raise Exception('Expected (<address>,<packet>), but "%s" occured'%item) address, packet = item rcode, rmsg = self.fri_client.call(address, packet) if rcode == RC_OK: return logger.debug("Can't call async operation %s on %s. Details: %s"%\ (getattr(packet, 'method', 'callback'), address, rmsg)) #logger.debug('Failed packet: %s'%packet) if packet.is_response: return ret_packet = FabnetPacketResponse(message_id=packet.message_id, \ from_node=address, ret_code=RC_DONT_STARTED, ret_message=rmsg) if not self.operator.is_stopped(): rcode, rmsg = self.fri_client.call(self.self_address, ret_packet) if rcode == RC_OK: return logger.debug("Can't send error response to self node")
def run(self): logger.info('Thread started!') while not self.stopped.is_set(): try: ranges_count = self.operator.ranges_table.count() mod_index = self.operator.ranges_table.get_mod_index() range_start = self.operator.get_dht_range().get_start() range_end = self.operator.get_dht_range().get_end() if ranges_count < 2: neighbours = self.operator.get_neighbours(NT_SUPERIOR, self.operator.OPTYPE) if not neighbours: logger.info('Waiting neighbours...') time.sleep(Config.INIT_DHT_WAIT_NEIGHBOUR_TIMEOUT) continue neighbour = random.choice(neighbours) else: neighbour_range = self.operator.ranges_table.find_next(range_start) if not neighbour_range: neighbour_range = self.operator.ranges_table.get_first() neighbour = neighbour_range.node_address logger.debug('Checking range table at %s'%neighbour) params = {'mod_index': mod_index, 'ranges_count': ranges_count, \ 'range_start': range_start, 'range_end': range_end} packet_obj = FabnetPacketRequest(method='CheckHashRangeTable', sender=self.operator.self_address, parameters=params) self.operator.call_node(neighbour, packet_obj) except Exception, err: logger.error(str(err)) for i in xrange(Config.CHECK_HASH_TABLE_TIMEOUT): if self.stopped.is_set(): break time.sleep(1)
def _put_data(self, key, data, is_replica=False): k_range = self.operator.ranges_table.find(long(key, 16)) if not k_range: logger.debug('No range found for reservation key %s'%key) return False if k_range.node_address in self.__full_nodes: logger.info('Node %s does not have free space. Skipping put data block...'%k_range.node_address) return False params = {'key': key, 'is_replica': is_replica, 'carefully_save': True} req = FabnetPacketRequest(method='PutDataBlock', sender=self.operator.self_address,\ parameters=params, binary_data=data, sync=True) resp = self.operator.call_node(k_range.node_address, req) if resp.ret_code == RC_NO_FREE_SPACE: self.__full_nodes.append(k_range.node_address) if resp.ret_code not in (RC_OK, RC_OLD_DATA): logger.error('PutDataBlock error on %s: %s'%(k_range.node_address, resp.ret_message)) return False return True
def process(self, packet): """In this method should be implemented logic of processing reuqest packet from sender node @param packet - object of FabnetPacketRequest class @return object of FabnetPacketResponse or None for disabling packet response to sender """ if self.operator.get_status() == DS_DESTROYING: return append_lst = packet.parameters.get('append', []) rm_lst = packet.parameters.get('remove', []) rm_obj_list = [HashRange(r[0], r[1], r[2]) for r in rm_lst] ap_obj_list = [HashRange(a[0], a[1], a[2]) for a in append_lst] self._lock() try: self.operator.apply_ranges_table_changes(rm_obj_list, ap_obj_list) logger.debug('RM RANGE: %s'%', '.join([r.to_str() for r in rm_obj_list])) logger.debug('APP RANGE: %s'%', '.join([a.to_str() for a in ap_obj_list])) except Exception, err: logger.debug('UpdateHashRangeTable error: %s STATUS=%s'%(err, self.operator.get_status()))