def start_as_dht_member(self): if self.status == DS_DESTROYING: return self.status = DS_INITIALIZE dht_range = self.get_dht_range() nochange = False curr_start = dht_range.get_start() curr_end = dht_range.get_end() if dht_range.is_max_range() or self.__split_requests_cache: new_range = self.__get_next_max_range() else: new_range = self.__get_next_range_near(curr_start, curr_end) if new_range: if (new_range.start != curr_start or new_range.end != curr_end): nochange = True if new_range.node_address == self.self_address: self.set_status_to_normalwork() return if new_range is None: #wait and try again if self.__start_dht_try_count == DHT_CYCLE_TRY_COUNT: logger.error('Cant initialize node as a part of DHT') self.__start_dht_try_count = 0 return logger.info('No ready range for me on network... So, sleep and try again') self.__start_dht_try_count += 1 self.__split_requests_cache = [] time.sleep(WAIT_RANGE_TIMEOUT) return self.start_as_dht_member() if nochange: new_dht_range = dht_range else: new_dht_range = FSHashRanges(long(new_range.start + new_range.length()/2+1), long(new_range.end), self.save_path) self.update_dht_range(new_dht_range) new_dht_range.restore_from_trash() #try getting new range data from trash self.__split_requests_cache.append(new_range.node_address) logger.info('Call SplitRangeRequest to %s'%(new_range.node_address,)) parameters = { 'start_key': new_dht_range.get_start(), 'end_key': new_dht_range.get_end() } req = FabnetPacketRequest(method='SplitRangeRequest', sender=self.self_address, parameters=parameters) ret_code, ret_msg = self.call_node(new_range.node_address, req) if ret_code != RC_OK: logger.error('Cant start SplitRangeRequest operation on node %s. Details: %s'%(new_range.node_address, ret_msg)) return self.start_as_dht_member()
def __init__(self, self_address, home_dir='/tmp/', certfile=None, is_init_node=False, node_name='unknown'): Operator.__init__(self, self_address, home_dir, certfile, is_init_node, node_name) self.status = DS_INITIALIZE self.ranges_table = HashRangesTable() if is_init_node: self.ranges_table.append(MIN_HASH, MAX_HASH, self.self_address) self.save_path = os.path.join(home_dir, 'dht_range') if not os.path.exists(self.save_path): os.mkdir(self.save_path) self.__dht_range = FSHashRanges.discovery_range(self.save_path) self.__split_requests_cache = [] self.__start_dht_try_count = 0 self.__init_dht_thread = None if is_init_node: self.status = DS_NORMALWORK self.__check_hash_table_thread = CheckLocalHashTableThread(self) self.__check_hash_table_thread.setName('%s-CheckLocalHashTableThread'%self.node_name) self.__check_hash_table_thread.start() self.__monitor_dht_ranges = MonitorDHTRanges(self) self.__monitor_dht_ranges.setName('%s-MonitorDHTRanges'%self.node_name) self.__monitor_dht_ranges.start()
def __init__(self, self_address, home_dir='/tmp/', key_storage=None, \ is_init_node=False, node_name='unknown', config={}): cur_cfg = {} cur_cfg.update(DEFAULT_DHT_CONFIG) cur_cfg.update(config) Operator.__init__(self, self_address, home_dir, key_storage, \ is_init_node, node_name, cur_cfg) self.status = DS_INITIALIZE self.ranges_table = HashRangesTable() if is_init_node: self.ranges_table.append(MIN_HASH, MAX_HASH, self.self_address) self.save_path = os.path.join(home_dir, 'dht_range') if not os.path.exists(self.save_path): os.mkdir(self.save_path) self.__split_requests_cache = [] self.__dht_range = FSHashRanges.discovery_range(self.save_path, ret_full=is_init_node) self.__start_dht_try_count = 0 self.__init_dht_thread = None if is_init_node: self.status = DS_NORMALWORK self.__check_hash_table_thread = CheckLocalHashTableThread(self) self.__check_hash_table_thread.setName('%s-CheckLocalHashTableThread'%self.node_name) self.__check_hash_table_thread.start() self.__monitor_dht_ranges = MonitorDHTRanges(self) self.__monitor_dht_ranges.setName('%s-MonitorDHTRanges'%self.node_name) self.__monitor_dht_ranges.start()
def test01_discovery_ranges(self): fs_range = FSHashRanges(START_RANGE_HASH, END_RANGE_HASH, TEST_FS_RANGE_DIR) fname, tmp = tmpdata('Test data #1') fs_range.put(100, fname) fname, tmp2 = tmpdata('Test data #2') fs_range.put(900, fname) fname, tmp3 = tmpdata('Test data #3') fs_range.put(10005000, fname) fs_range.split_range(0, 100500) time.sleep(.2) discovered_range = FSHashRanges.discovery_range(TEST_FS_RANGE_DIR) self.assertEqual(discovered_range.get_start(), long(START_RANGE_HASH, 16)) self.assertEqual(discovered_range.get_end(), long(END_RANGE_HASH, 16)) range_dir = discovered_range.get_range_dir() self.assertTrue(os.path.exists(os.path.join(range_dir, '%040x'%100))) self.assertTrue(os.path.exists(os.path.join(range_dir, '%040x'%900))) self.assertTrue(os.path.exists(os.path.join(range_dir, '%040x'%10005000)))
def start_as_dht_member(self): if self.status == DS_DESTROYING: return self.status = DS_INITIALIZE dht_range = self.get_dht_range() curr_start = dht_range.get_start() curr_end = dht_range.get_end() last_range = dht_range.get_last_range() if last_range and not self.__split_requests_cache: new_range = self.__get_next_range_near(last_range[0], last_range[1]) elif dht_range.is_max_range() or self.__split_requests_cache: new_range = self.__get_next_max_range() else: new_range = self.__get_next_range_near(curr_start, curr_end) if new_range is None: #wait and try again if self.__start_dht_try_count == Config.DHT_CYCLE_TRY_COUNT: logger.error('Cant initialize node as a part of DHT') self.__start_dht_try_count = 0 return logger.info('No ready range for me on network... So, sleep and try again') self.__start_dht_try_count += 1 self.__split_requests_cache = [] time.sleep(Config.WAIT_RANGE_TIMEOUT) return self.start_as_dht_member() if (new_range.start == curr_start and new_range.end == curr_end): new_dht_range = dht_range else: new_dht_range = FSHashRanges(long(new_range.start), long(new_range.end), self.save_path) self.update_dht_range(new_dht_range) new_dht_range.restore_from_reservation() #try getting new range data from reservation if new_range.node_address == self.self_address: self._take_range(new_range) self.set_status_to_normalwork() return self.__split_requests_cache.append(new_range.node_address) logger.info('Call SplitRangeRequest [%040x-%040x] to %s'% \ (new_dht_range.get_start(), new_dht_range.get_end(), new_range.node_address,)) parameters = { 'start_key': new_dht_range.get_start(), 'end_key': new_dht_range.get_end() } req = FabnetPacketRequest(method='SplitRangeRequest', sender=self.self_address, parameters=parameters) self.call_node(new_range.node_address, req)
def test02_main(self): fs_ranges = FSHashRanges(START_RANGE_HASH, END_RANGE_HASH, TEST_FS_RANGE_DIR) self.assertTrue(os.path.exists(os.path.join(TEST_FS_RANGE_DIR, '%s_%s'%(START_RANGE_HASH, END_RANGE_HASH)))) wt = WriteThread(fs_ranges, 0, 40000) wt.start() wt1 = WriteThread(fs_ranges, 40000, 70000) wt1.start() wt2 = WriteThread(fs_ranges, 70000, 100000) wt2.start() time.sleep(2) ret_range, new_range = fs_ranges.split_range('%040x'%0, '%040x'%((25000)*100)) for key, data in ret_range.iter_range(): data = data.data() if data != 'T'*1000: print 'FAILED ITER DATA [%s]: %s'%(key, len(data)) wt.join() wt1.join() wt2.join() fs_ranges.join_subranges() rt = ReadThread(fs_ranges) rt.start() data = fs_ranges.get_subranges() self.assertEqual(data, None) fs_ranges.split_range('%040x'%0, '%040x'%((45000)*100)) ret_range, new_range = fs_ranges.get_subranges() size = ret_range.get_range_size() self.assertTrue(size > 0) ret_range.move_to_reservation() fs_ranges.move_to_reservation() free_size = new_range.get_free_size() self.assertTrue(size > 0) new_range.restore_from_reservation() self.assertTrue(os.path.exists(os.path.join(TEST_FS_RANGE_DIR, 'reservation_range'))) rt.join() fname, tmp = tmpdata('final data test') new_range.put('%040x'%100500, fname) data = new_range.get('%040x'%100500) self.assertEqual(data.data(), 'final data test') try: new_range.extend('%040x'%0, '%040x'%100) except: pass else: raise Exception('Expected error in this case.') extended_range = new_range.extend('%040x'%0, '%040x'%((45000)*100)) fname, tmp2 = tmpdata('final data test #2') extended_range.put('%040x'%100500, fname) data = extended_range.get('%040x'%100500) self.assertEqual(data.data(), 'final data test #2')