def __init__(self, selfAddress, partnerAddrs, autoUnlockTime): cfg = SyncObjConf(dynamicMembershipChange=True, raftMaxTimeout=10.0, connectionTimeout=12.0) super(LockImpl, self).__init__(selfAddress, partnerAddrs, cfg) self.__locks = {} self.__autoUnlockTime = autoUnlockTime
def __init__(self, config): super(Raft, self).__init__(config) self._ttl = int(config.get('ttl') or 30) self_addr = config.get('self_addr') partner_addrs = config.get('partner_addrs', []) if self._ctl: if self_addr: partner_addrs.append(self_addr) self_addr = None # Create raft data_dir if necessary raft_data_dir = config.get('data_dir', '') if raft_data_dir != '': validate_directory(raft_data_dir) ready_event = threading.Event() file_template = os.path.join(config.get('data_dir', ''), (self_addr or '')) conf = SyncObjConf(password=config.get('password'), appendEntriesUseBatch=False, bindAddress=config.get('bind_addr'), commandsWaitLeader=False, fullDumpFile=(file_template + '.dump' if self_addr else None), journalFile=(file_template + '.journal' if self_addr else None), onReady=ready_event.set, dynamicMembershipChange=True) self._sync_obj = KVStoreTTL(self_addr, partner_addrs, conf, self._on_set, self._on_delete) while True: ready_event.wait(5) if ready_event.isSet() or self._sync_obj.applied_local_log: break else: logger.info('waiting on raft') self._sync_obj.forceLogCompaction() self.set_retry_timeout(int(config.get('retry_timeout') or 10))
def setUp(self): self.conf = SyncObjConf(appendEntriesUseBatch=False, appendEntriesPeriod=0.001, raftMinTimeout=0.004, raftMaxTimeout=0.005, autoTickPeriod=0.001) callback = Mock() callback.replicated = False self.so = KVStoreTTL('127.0.0.1:1234', [], self.conf, on_set=callback, on_delete=callback) self.so.set_retry_timeout(10)
def __init__(self, logger, host, port): cfg = SyncObjConf() cfg.fullDumpFile = 'raft.bin' cfg.logCompactionMinTime = 10 cfg.useFork = True self.serv = ThreadXMLRPCServer( (host, port), logRequests=True) for name in self._rpc_methods: self.serv.register_function(getattr(self, name)) self.logger = logger self.host = host self.port = port self.lock = RWLock() self.act_vol_serv = dict() self.writable_vid = ReplList() # 可写的vid self.vid = ReplCounter() self.fkey = ReplCounter() self.db = ReplDict() super(Master, self).__init__(config.addr, config.clusters, cfg, consumers=[self.vid, self.fkey, self.db, self.writable_vid])
def __init__(self, config): super(Raft, self).__init__(config) self._ttl = int(config.get('ttl') or 30) self_addr = config.get('self_addr') partner_addrs = config.get('partner_addrs', []) if self._ctl: if self_addr: partner_addrs.append(self_addr) self_addr = None template = os.path.join(config.get('data_dir', ''), self_addr or '') files = { 'journalFile': template + '.journal', 'fullDumpFile': template + '.dump' } if self_addr else {} ready_event = threading.Event() conf = SyncObjConf(commandsWaitLeader=False, appendEntriesUseBatch=False, onReady=ready_event.set, dynamicMembershipChange=True, **files) self._sync_obj = KVStoreTTL(self_addr, partner_addrs, conf, self._on_set, self._on_delete) while True: ready_event.wait(5) if ready_event.isSet() or self._sync_obj.applied_local_log: break else: logger.info('waiting on raft') self._sync_obj.forceLogCompaction() self.set_retry_timeout(int(config.get('retry_timeout') or 10))
def setUp(self): self.temp_dir = TemporaryDirectory() host = '0.0.0.0' port = get_free_port() seed_addr = None conf = SyncObjConf( fullDumpFile=self.temp_dir.name + '/supervise.zip', logCompactionMinTime=300, dynamicMembershipChange=True ) data_dir = self.temp_dir.name + '/supervise' grpc_port = get_free_port() grpc_max_workers = 10 http_port = get_free_port() logger = getLogger(NAME) log_handler = StreamHandler() logger.setLevel(ERROR) log_handler.setLevel(INFO) log_format = Formatter('%(asctime)s - %(levelname)s - %(pathname)s:%(lineno)d - %(message)s') log_handler.setFormatter(log_format) logger.addHandler(log_handler) http_logger = getLogger(NAME + '_http') http_log_handler = StreamHandler() http_logger.setLevel(NOTSET) http_log_handler.setLevel(INFO) http_log_format = Formatter('%(message)s') http_log_handler.setFormatter(http_log_format) http_logger.addHandler(http_log_handler) metrics_registry = CollectorRegistry() self.manager = Manager(host=host, port=port, seed_addr=seed_addr, conf=conf, data_dir=data_dir, grpc_port=grpc_port, grpc_max_workers=grpc_max_workers, http_port=http_port, logger=logger, http_logger=http_logger, metrics_registry=metrics_registry)
def measure(argv): """Measure throughput in 15s""" # Parse parameters quorumSize1, quorumSize2, drop_ratio, selfAddr, partners = parseParams( argv) maxCommandsQueueSize = int(0.9 * SyncObjConf().commandsQueueSize / len(partners)) # Init a TestObj counter1 = ReplCounter() obj = SyncObj(selfAddr, partners, quorumSize1, quorumSize2, drop_ratio, consumers=[counter1]) while obj._getLeader() is None: time.sleep(0.5) time.sleep(4.0) count = 0 startTime = time.time() while time.time() - startTime < 10.0: counter1.inc() while time.time() - startTime < 40.0: counter1.inc() count += 1 print(obj.getStatus()['raft_term']) time.sleep(4.0) return count
def __init__(self, selfNodeAddr, otherNodeAddrs, testType=TEST_TYPE.DEFAULT, compactionMinEntries=0, dumpFile=None, journalFile=None, password=None, dynamicMembershipChange=False, useFork=True): cfg = SyncObjConf(autoTick=False, appendEntriesUseBatch=False) cfg.appendEntriesPeriod = 0.1 cfg.raftMinTimeout = 0.5 cfg.raftMaxTimeout = 1.0 cfg.dynamicMembershipChange = dynamicMembershipChange if dumpFile is not None: cfg.fullDumpFile = dumpFile if password is not None: cfg.password = password cfg.useFork = useFork if testType == TEST_TYPE.COMPACTION_1: cfg.logCompactionMinEntries = compactionMinEntries cfg.logCompactionMinTime = 0.1 cfg.appendEntriesUseBatch = True if testType == TEST_TYPE.COMPACTION_2: cfg.logCompactionMinEntries = 99999 cfg.logCompactionMinTime = 99999 cfg.fullDumpFile = dumpFile if testType == TEST_TYPE.RAND_1: cfg.autoTickPeriod = 0.05 cfg.appendEntriesPeriod = 0.02 cfg.raftMinTimeout = 0.1 cfg.raftMaxTimeout = 0.2 cfg.logCompactionMinTime = 9999999 cfg.logCompactionMinEntries = 9999999 cfg.journalFile = journalFile if testType == TEST_TYPE.JOURNAL_1: cfg.logCompactionMinTime = 999999 cfg.logCompactionMinEntries = 999999 cfg.fullDumpFile = dumpFile cfg.journalFile = journalFile if testType == TEST_TYPE.AUTO_TICK_1: cfg.autoTick = True cfg.pollerType = 'select' super(TestObj, self).__init__(selfNodeAddr, otherNodeAddrs, cfg) self.__counter = 0 self.__data = {}
def __init__(self, selfAddress, partnerAddrs): cfg = SyncObjConf(dynamicMembershipChange = True) super(KVStorage, self).__init__(selfAddress, partnerAddrs, cfg) self.__data = {} self.q_array = {} self.qid = 0 self.q_table = {} self.pop_variable = 0 self.pop_flag = False
def __init__(self, selfAddress, partnerAddresses): ''' Add connection object here if exchange is physically located in a separate server ''' cfg = SyncObjConf(logCompactionMinEntries=2147483647, logCompactionMinTime=2147483647) super(AdExchange, self).__init__(selfAddress, partnerAddresses, cfg) self.__logger = get_logger("AdExchange") self.__logger.info("### Starting Ad Exchange...") self.__auction = ADEX['auction'] self.__reserve = ADEX['reserve']
def __init__(self, selfAddress, operation, operand, partners): cfg = SyncObjConf(dynamicMembershipChange=True, commandsWaitLeader=True, raftMaxTimeout=2.0, connectionTimeout=3.0) new_address = selfAddress[:-1] + str(int(selfAddress[-1]) + 1) print('Host:{}'.format(new_address)) super(Host, self).__init__(new_address, partners, cfg) self.__data = {} self.operation = operation self.operand = float(operand)
def __init__(self, selfNodeAddr, allNodeAddrs): if selfNodeAddr not in allNodeAddrs: allNodeAddrs.append(selfNodeAddr) allNodeAddrs = list(set(allNodeAddrs)) self.selfNodeAddr = copy.deepcopy(selfNodeAddr) self.allNodeAddrs = copy.deepcopy(allNodeAddrs) self.__task_status = {} if selfNodeAddr in allNodeAddrs: allNodeAddrs.remove(selfNodeAddr) super(DCSyncObj, self).__init__(selfNodeAddr, allNodeAddrs, SyncObjConf(dynamicMembershipChange=True))
def __init__(self, serverport, other_members): logger.info("Initializing SyncObj with serverport %s (others: %s)" % (serverport, other_members)) self.replicated_game_state = ReplDict() conf = SyncObjConf(dynamicMembershipChange=False) super(ShardSyncWatcher, self).__init__( serverport, other_members, conf=conf, consumers=[self.replicated_game_state]) self.__action_clock = 0
def __init__(self, selfAddress, partnerAddresses): ''' Initialize seller class ''' cfg = SyncObjConf(logCompactionMinEntries=2147483647, logCompactionMinTime=2147483647) super(Seller, self).__init__(selfAddress, partnerAddresses, cfg) self.__rsp = TEST_PARAMS['server_path'] self.__sf = TEST_PARAMS['seller_file_name'] self.__compressed = CONTEXT['compressed_content'] self.__sellerGraph = nx.Graph() self.__logger = get_logger("Seller")
def __initDistributedDict(self): rr_raft = ReplDict() config = SyncObjConf(appendEntriesUseBatch=True) syncObj = SyncObj(self.__node, self.__other_nodes, consumers=[rr_raft], conf=config) if not self.__silent: print "Initializing Raft..." while not syncObj.isReady(): continue if not self.__silent: print "Raft initialized!" return rr_raft
def __init__(self, config): super(RaftController, self).__init__(config) raft_config = self.config.get('raft') self_addr = raft_config['self_addr'] template = os.path.join(raft_config.get('data_dir', ''), self_addr) self._syncobj_config = SyncObjConf(autoTick=False, appendEntriesUseBatch=False, dynamicMembershipChange=True, journalFile=template + '.journal', fullDumpFile=template + '.dump') self._raft = KVStoreTTL(self_addr, raft_config.get('partner_addrs', []), self._syncobj_config)
def __init__(self, port, members, secret=""): JSBASE.__init__(self) self._members = members self.port = port self.dict1 = ReplDict() remotes = ["%s:%s" % item for item in self.members] cfg = SyncObjConf(autoTick=True) cfg.onReady = self.onReady if secret is not "" and secret is not None: print("SECRET") cfg.password = secret cfg.appendEntriesPeriod = 0.01 cfg.appendEntriesUseBatch = True cfg.raftMinTimeout = 0.4 cfg.raftMaxTimeout = 1.4 cfg.dynamicMembershipChange = True cfg.onStateChanged = None cfg.commandsWaitLeader = False cfg.connectionRetryTime = 5.0 # connect to other down nodes every so many secs cfg.connectionTimeout = 3.5 cfg.leaderFallbackTimeout = 10.0 cfg.journalFile = "/tmp/raft/raft_%s" % self.port cfg.leaderFallbackTimeout = True cfg.logCompactionMinEntries = 1000 cfg.logCompactionMinTime = 60 self._log_debug("port:%s" % self.port) self._log_debug("members:%s" % remotes) # self._log_debug("secret:%s"%secret) self.syncobj = SyncObj("localhost:%s" % port, remotes, consumers=[self.dict1], conf=cfg) # for i in range(100000000): # time.sleep(0.001) # # self.syncobj.doTick() # from IPython import embed;embed(colors='Linux') # s while self.syncobj.isReady() == False: time.sleep(1) print("wait sync") time.sleep(1) self.start()
def cleanup_service_tree(self): from patroni.dcs.raft import KVStoreTTL from pysyncobj import SyncObjConf if self._raft: self._raft.destroy() self._raft._SyncObj__thread.join() self.stop() os.makedirs(self._work_directory) self.start() ready_event = threading.Event() conf = SyncObjConf(appendEntriesUseBatch=False, dynamicMembershipChange=True, onReady=ready_event.set) self._raft = KVStoreTTL(None, [self.CONTROLLER_ADDR], conf) ready_event.wait()
def setUp(self): self.temp_dir = TemporaryDirectory() self.example_dir = os.path.normpath( os.path.join(os.path.dirname(__file__), '../example')) host = '0.0.0.0' port = get_free_port() seed_addr = None conf = SyncObjConf(fullDumpFile=self.temp_dir.name + '/index.zip', logCompactionMinTime=300, dynamicMembershipChange=True) data_dir = self.temp_dir.name + '/index' grpc_port = get_free_port() grpc_max_workers = 10 http_port = get_free_port() logger = getLogger(NAME) log_handler = StreamHandler() logger.setLevel(ERROR) log_handler.setLevel(INFO) log_format = Formatter( '%(asctime)s - %(levelname)s - %(pathname)s:%(lineno)d - %(message)s' ) log_handler.setFormatter(log_format) logger.addHandler(log_handler) http_logger = getLogger(NAME + '_http') http_log_handler = StreamHandler() http_logger.setLevel(NOTSET) http_log_handler.setLevel(INFO) http_log_format = Formatter('%(message)s') http_log_handler.setFormatter(http_log_format) http_logger.addHandler(http_log_handler) metrics_registry = CollectorRegistry() self.indexer = Indexer(host=host, port=port, seed_addr=seed_addr, conf=conf, data_dir=data_dir, grpc_port=grpc_port, grpc_max_workers=grpc_max_workers, http_port=http_port, logger=logger, http_logger=http_logger, metrics_registry=metrics_registry) self.channel = grpc.insecure_channel('{0}:{1}'.format(host, grpc_port))
async def setup_raft(raft_addr, cluster): """初始化/连接 Raft 集群 :param raft_addr: 本节点用于Raft集群通信的地址;为None时表示加入现有集群,本节点地址由本节点第一位用户输入 :param cluster: 集群节点地址列表;为None时表示加入现有集群,集群节点地址由本节点第一位用户输入 :return: 本节点Raft集群通信地址 """ global raft_server mode = 'init' if not raft_addr: # raft_addr 为None时,表示加入Raft集群 mode = 'join' currhost = session.get_info().origin.rsplit(":", 1)[0].split("//", 1)[-1] data = await input_group("加入Raft集群", [ input("当前节点的Raft通信端口", name="port"), input("当前节点的Host地址", name="host", value=currhost, help_text="其他节点需要可以通过此Host与当前节点通信"), input("集群节点地址", name="remote", placeholder='host:ip', help_text="填入集群中任一节点的地址即可") ]) raft_addr = '%s:%s' % (data['host'], data['port']) cluster = join_cluster(raft_addr, data['remote']) if not cluster: put_markdown("### 加入集群失败") return raft_port = raft_addr.split(":", 1)[-1] cfg = SyncObjConf(dynamicMembershipChange=True, fullDumpFile=raft_addr + '.data', onStateChanged=partial(onStateChanged, node=raft_addr), bindAddress="0.0.0.0:%s" % raft_port) raft_server = SyncObj( raft_addr, cluster, consumers=[chat_msgs, node_user_cnt, node_webui_addr], conf=cfg) if mode == 'join': send_msg(ADMIN_USER, '节点`%s`加入集群' % raft_addr, instant_output=False) return raft_addr
def start(self): logging.info('Start') self.nodes = Nodes() self.worker_q = queue.Queue() self.worker = threading.Thread(target=node_worker, args=(self, self._addr, 'slave', self.worker_q)) self.worker.daemon = True self._syncObjConf = SyncObjConf( onReady=lambda: self._onReady(), onStateChanged=lambda os, ns: self._stateChanged(os, ns)) self._syncObj = SyncObj( f'{self._addr}:{self._raft_port}', [f'{p}:{self._raft_port}' for p in self._peers], consumers=[self.logs, self.nodes], conf=self._syncObjConf)
def __init__(self, on_ready, on_set, on_delete, **config): self.__thread = None self.__on_set = on_set self.__on_delete = on_delete self.__limb = {} self.__retry_timeout = None self_addr = config.get('self_addr') partner_addrs = set(config.get('partner_addrs', [])) if config.get('patronictl'): if self_addr: partner_addrs.add(self_addr) self_addr = None # Create raft data_dir if necessary raft_data_dir = config.get('data_dir', '') if raft_data_dir != '': validate_directory(raft_data_dir) file_template = (self_addr or '') file_template = file_template.replace( ':', '_') if os.name == 'nt' else file_template file_template = os.path.join(raft_data_dir, file_template) conf = SyncObjConf( password=config.get('password'), autoTick=False, appendEntriesUseBatch=False, bindAddress=config.get('bind_addr'), dnsFailCacheTime=(config.get('loop_wait') or 10), dnsCacheTime=(config.get('ttl') or 30), commandsWaitLeader=config.get('commandsWaitLeader'), fullDumpFile=(file_template + '.dump' if self_addr else None), journalFile=(file_template + '.journal' if self_addr else None), onReady=on_ready, dynamicMembershipChange=True) super(KVStoreTTL, self).__init__(self_addr, partner_addrs, conf) self.__data = {}
def __init__(self, selfNodeAddr, otherNodeAddrs, compactionTest = 0, dumpFile = None, compactionTest2 = False): cfg = SyncObjConf(autoTick=False, commandsQueueSize=10000, appendEntriesUseBatch=False) if compactionTest: cfg.logCompactionMinEntries = compactionTest cfg.logCompactionMinTime = 0.1 cfg.appendEntriesUseBatch = True cfg.fullDumpFile = dumpFile if compactionTest2: cfg.logCompactionMinEntries = 99999 cfg.logCompactionMinTime = 99999 cfg.fullDumpFile = dumpFile cfg.sendBufferSize = 2 ** 21 cfg.recvBufferSize = 2 ** 21 cfg.appendEntriesBatchSize = 10 cfg.maxCommandsPerTick = 5 super(TestObj, self).__init__(selfNodeAddr, otherNodeAddrs, cfg) self.__counter = 0 self.__data = {}
def __init__(self, selfAddress, partnerAddrs): cfg = SyncObjConf(dynamicMembershipChange = True) super(KVStorage, self).__init__(selfAddress, partnerAddrs, cfg) self.__data = {}
else: _g_error += 1 _g_errors[err] += 1 def getRandStr(l): f = '%0' + str(l) + 'x' return f % random.randrange(16**l) if __name__ == '__main__': # Parse parameters numCommands, cmdSize, quorumSize1, quorumSize2, drop_ratio, selfAddr, partners = parseParams( sys.argv) maxCommandsQueueSize = int(0.9 * SyncObjConf().commandsQueueSize / len(partners)) # Init a TestObj obj = TestObj(selfAddr, partners, quorumSize1, quorumSize2, drop_ratio) while obj._getLeader() is None: time.sleep(0.5) time.sleep(2.0) # Measure the system during its steady state startTime = time.time() while time.time() - startTime < 10.0: st = time.time() for i in range(0, numCommands):
def __init__(self, selfNodeAddr, otherNodeAddrs): cfg = SyncObjConf(appendEntriesUseBatch=False, ) super(TestObj, self).__init__(selfNodeAddr, otherNodeAddrs, cfg) self.__appliedCommands = 0
if (len(self.__list_of_queues) >= 1): return self.__list_of_queues else: return [] def onAdd(res, err, cnt): print("on add called %d" % cnt, res, err) def onCreate(res, err, cnt): print("Queue created for %d" % cnt, res, err) if __name__ == '__main__': SyncObjConf(dynamicMembershipChange=True) if len(sys.argv) < 3: print('Usage: %s self_port partner1_port partner2_port ...' % sys.argv[0]) sys.exit(-1) port = int(sys.argv[1]) partners = ['localhost:%d' % int(p) for p in sys.argv[2:]] print("Before o") o = TestObj('localhost:%d' % port, partners) n = 0 old_value = -1 print("Before while") print("") while True: time.sleep(2.5)
def __init__(self, selfAddress, partnerAddrs, dumpFile): conf = SyncObjConf(fullDumpFile=dumpFile, ) super(KVStorage, self).__init__(selfAddress, partnerAddrs, conf) self.__data = {}
def __init__(self, selfAddress, partnerAddrs, dumpFile=None): self.ldb = ldb conf = SyncObjConf(fullDumpFile=dumpFile, ) super(KVStorage, self).__init__(selfAddress, partnerAddrs, conf)
def setUp(self): self.conf = SyncObjConf(appendEntriesUseBatch=False, dynamicMembershipChange=True, autoTick=False) self.so = DynMemberSyncObj('127.0.0.1:1234', ['127.0.0.1:1235'], self.conf)