def replay_patched_node(looper, replaying_node, node_recorder, cr): node_run_no = 0 looper.add(replaying_node) cr.start_playing() next_stop_at = time.perf_counter() + (cr.start_times[node_run_no][1] - cr.start_times[node_run_no][0]) while cr.is_playing: vals = cr.get_next() if next_stop_at is not None and time.perf_counter() >= next_stop_at: node_run_no += 1 if node_run_no < len(cr.start_times): # The node stopped here sleep_for = cr.start_times[node_run_no][0] - cr.start_times[ node_run_no - 1][1] replaying_node.stop() looper.removeProdable(replaying_node) # Create new node since node is destroyed on stop replaying_node = replaying_node.__class__( replaying_node.name, config_helper=replaying_node.config_helper, ha=replaying_node.nodestack.ha, cliha=replaying_node.clientstack.ha) patch_replaying_node(replaying_node, node_recorder, cr.start_times) print( 'sleeping for {} to simulate node stop'.format(sleep_for)) time.sleep(sleep_for) after = cr.start_times[node_run_no][1] - cr.start_times[ node_run_no][0] next_stop_at = time.perf_counter() + after print('Next stop after {}'.format(after)) looper.add(replaying_node) else: next_stop_at = None if not vals: continue n_msgs, c_msgs = vals if n_msgs: for inc in n_msgs: if Recorder.is_incoming(inc): msg, frm = to_bytes(inc[1]), to_bytes(inc[2]) replaying_node.nodestack._verifyAndAppend(msg, frm) if Recorder.is_disconn(inc): disconnecteds = inc[1:] replaying_node.nodestack._connsChanged( set(), disconnecteds) if c_msgs: incomings = Recorder.filter_incoming(c_msgs) for inc in incomings: msg, frm = to_bytes(inc[0]), to_bytes(inc[1]) replaying_node.clientstack._verifyAndAppend(msg, frm) looper.run(replaying_node.prod()) return replaying_node
def patch_sent_prepreapres(replaying_node, node_recorder): sent_pps = {} def add_preprepare(msg): inst_id, v, p = msg[f.INST_ID.nm], msg[f.VIEW_NO.nm], msg[ f.PP_SEQ_NO.nm] if inst_id not in sent_pps: sent_pps[inst_id] = {} sent_pps[inst_id][v, p] = [ msg[f.PP_TIME.nm], [l for l in msg[f.REQ_IDR.nm]], msg[f.DISCARDED.nm], ] msg_count = 0 min_msg_time = sys.maxsize max_msg_time = -1 for k, v in node_recorder.store.iterator(include_value=True): max_msg_time = max(max_msg_time, int(k)) min_msg_time = min(min_msg_time, int(k)) parsed = Recorder.get_parsed(v.decode()) msg_count += len(parsed) outgoings = Recorder.filter_outgoing(parsed) if not outgoings: continue for out in outgoings: try: msg = json.loads(out[0]) if isinstance(msg, dict) and OP_FIELD_NAME in msg: op_name = msg[OP_FIELD_NAME] if op_name == PREPREPARE: add_preprepare(msg) elif op_name == BATCH: for m in msg['messages']: try: m = json.loads(m) if m[OP_FIELD_NAME] == PREPREPARE: add_preprepare(m) except json.JSONDecodeError: continue else: continue except json.JSONDecodeError: continue for r in replaying_node.replicas.values(): r.sent_pps = sent_pps.pop(r.instId, {}) replaying_node.sent_pps = sent_pps replaying_node.replay_msg_count = msg_count run_time = max_msg_time - min_msg_time run_time = int(run_time / Recorder.TIME_FACTOR) print("Aprox run time: {}".format(str( datetime.timedelta(seconds=run_time))))
def patch_sent_prepreapres(replaying_node, node_recorder): sent_pps = {} def add_preprepare(msg): inst_id, v, p = msg[f.INST_ID.nm], msg[f.VIEW_NO.nm], msg[ f.PP_SEQ_NO.nm] if inst_id not in sent_pps: sent_pps[inst_id] = {} sent_pps[inst_id][v, p] = [msg[f.PP_TIME.nm], [l for l in msg[f.REQ_IDR.nm]], msg[f.DISCARDED.nm], ] msg_count = 0 min_msg_time = sys.maxsize max_msg_time = -1 for k, v in node_recorder.store.iterator(include_value=True): max_msg_time = max(max_msg_time, int(k)) min_msg_time = min(min_msg_time, int(k)) parsed = Recorder.get_parsed(v.decode()) msg_count += len(parsed) outgoings = Recorder.filter_outgoing(parsed) if not outgoings: continue for out in outgoings: try: msg = json.loads(out[0]) if isinstance(msg, dict) and OP_FIELD_NAME in msg: op_name = msg[OP_FIELD_NAME] if op_name == PREPREPARE: add_preprepare(msg) elif op_name == BATCH: for m in msg['messages']: try: m = json.loads(m) if m[OP_FIELD_NAME] == PREPREPARE: add_preprepare(m) except json.JSONDecodeError: continue else: continue except json.JSONDecodeError: continue for r in replaying_node.replicas.values(): r.sent_pps = sent_pps.pop(r.instId, {}) replaying_node.sent_pps = sent_pps replaying_node.replay_msg_count = msg_count run_time = max_msg_time - min_msg_time run_time = int(run_time / Recorder.TIME_FACTOR) print("Aprox run time: {}".format(str(datetime.timedelta(seconds=run_time))))
def get_recorders_from_node_data_dir(node_data_dir, node_name) -> Tuple[Recorder, Recorder]: rec_path = os.path.join(node_data_dir, node_name, 'recorder') client_stack_name = node_name + CLIENT_STACK_SUFFIX client_rec_kv_store = initKeyValueStorageIntKeys(KeyValueStorageType.Rocksdb, rec_path, client_stack_name) node_rec_kv_store = initKeyValueStorageIntKeys( KeyValueStorageType.Rocksdb, rec_path, node_name) return Recorder(node_rec_kv_store, skip_metadata_write=True), \ Recorder(client_rec_kv_store, skip_metadata_write=True)
def get_parsed(msg, only_incoming=None, only_outgoing=None): # To conform to the Recorder interface assert only_incoming is None assert only_outgoing is None n_msgs, c_msgs = msg.split(CombinedRecorder.separator) n_msgs = n_msgs.lstrip(CombinedRecorder.n_prefix) c_msgs = c_msgs.lstrip(CombinedRecorder.c_prefix) return [Recorder.get_parsed(n_msgs) if n_msgs else [], Recorder.get_parsed(c_msgs) if c_msgs else []]
def __init__(self, *args, **kwargs): parent_dir, _ = os.path.split(args[0]['basedirpath']) name = args[0]['name'] from stp_core.network.keep_in_touch import KITNetworkInterface if isinstance(self, KITNetworkInterface): db_path = os.path.join(parent_dir, 'data', name, 'recorder') else: db_path = os.path.join(parent_dir, 'data', name[:-1], 'recorder') os.makedirs(db_path, exist_ok=True) db = KeyValueStorageRocksdbIntKeys(db_path, name) self.recorder = Recorder(db) super().__init__(*args, **kwargs)
class SimpleZStackWithRecorder(SimpleZStack): # Used during recording def __init__(self, *args, **kwargs): parent_dir, _ = os.path.split(args[0]['basedirpath']) name = args[0]['name'] from stp_core.network.keep_in_touch import KITNetworkInterface if isinstance(self, KITNetworkInterface): db_path = os.path.join(parent_dir, 'data', name, 'recorder') else: db_path = os.path.join(parent_dir, 'data', name[:-1], 'recorder') os.makedirs(db_path, exist_ok=True) db = KeyValueStorageRocksdbIntKeys(db_path, name) self.recorder = Recorder(db) super().__init__(*args, **kwargs) def _verifyAndAppend(self, msg, ident): if super()._verifyAndAppend(msg, ident): logger.trace('{} recording incoming {} from {}'.format(self, msg, ident)) self.recorder.add_incoming(msg, ident) def transmit(self, msg, uid, timeout=None, serialized=False): status, err = super().transmit(msg, uid, timeout=timeout, serialized=serialized) if status: self.recorder.add_outgoing(msg, uid) return status, err def _connsChanged(self, ins: Set[str], outs: Set[str]) -> None: from plenum.common.stacks import KITZStack if isinstance(self, KITZStack) and outs: self.recorder.add_disconnecteds(*outs) super()._connsChanged(ins, outs) def stop(self): self.recorder.stop() super().stop()
def test_recorder_get_next_incoming_only(recorder): incoming_count = 100 incoming = [(randomString(100), randomString(6)) for _ in range(incoming_count)] while incoming: recorder.add_incoming(*incoming.pop()) time.sleep(random.choice([0, 1]) + random.random()) recorded_incomings = OrderedDict() keys = [] for k, v in recorder.store.iterator(include_value=True): v = Recorder.get_parsed(v) keys.append(int(k)) recorded_incomings[int(k)] = v assert len(recorded_incomings) == incoming_count assert sorted(keys) == keys max_time_to_run = incoming_count * 2 + 10 recorder.start_playing() start = time.perf_counter() while recorder.is_playing and (time.perf_counter() < start + max_time_to_run): vals = recorder.get_next() if vals: check = recorded_incomings.popitem(last=False)[1] assert check == vals else: time.sleep(0.01) assert len(recorded_incomings) == 0 assert not recorder.is_playing
def test_recorder_get_next(recorder): incoming_count = 100 outgoing_count = 50 incoming = [(randomString(100), randomString(6)) for _ in range(incoming_count)] outgoing = [(randomString(100), randomString(6)) for _ in range(outgoing_count)] while incoming or outgoing: if random.choice([0, 1]) and outgoing: recorder.add_outgoing(*outgoing.pop()) time.sleep(random.choice([0, 1]) + random.random()) elif incoming: recorder.add_incoming(*incoming.pop()) time.sleep(random.choice([0, 1]) + random.random()) else: continue recorded_incomings = OrderedDict() for k, v in recorder.store.iterator(include_value=True): v = Recorder.get_parsed(v, only_incoming=True) if v: recorded_incomings[int(k)] = v assert len(recorded_incomings) == incoming_count max_time_to_run = incoming_count * 2 + 10 recorder.start_playing() start = time.perf_counter() while recorder.is_playing and (time.perf_counter() < start + max_time_to_run): vals = recorder.get_next() if vals: inc = Recorder.filter_incoming(vals) if inc: assert recorded_incomings.popitem(last=False)[1] == inc else: time.sleep(0.01) assert len(recorded_incomings) == 0 assert not recorder.is_playing
def patch_sent_prepreapres(replaying_node, node_recorder): sent_pps = {} def add_preprepare(msg): inst_id, v, p = msg[f.INST_ID.nm], msg[f.VIEW_NO.nm], msg[ f.PP_SEQ_NO.nm] if inst_id not in sent_pps: sent_pps[inst_id] = {} sent_pps[inst_id][v, p] = [ msg[f.PP_TIME.nm], [tuple(l) for l in msg[f.REQ_IDR.nm]], msg[f.DISCARDED.nm], ] for k, v in node_recorder.store.iterator(include_value=True): parsed = Recorder.get_parsed(v.decode()) outgoings = Recorder.filter_outgoing(parsed) if not outgoings: continue for out in outgoings: try: msg = json.loads(out[0]) if isinstance(msg, dict) and OP_FIELD_NAME in msg: op_name = msg[OP_FIELD_NAME] if op_name == PREPREPARE: add_preprepare(msg) elif op_name == BATCH: for m in msg['messages']: try: m = json.loads(m) if m[OP_FIELD_NAME] == PREPREPARE: add_preprepare(m) except json.JSONDecodeError: continue else: continue except json.JSONDecodeError: continue for r in replaying_node.replicas: r.sent_pps = sent_pps.pop(r.instId, {}) replaying_node.sent_pps = sent_pps
def test_recorded_parsings(recorder): incoming = [[randomString(10), randomString(6)] for i in range(3)] outgoing = [[randomString(10), randomString(6)] for i in range(5)] for m, f in incoming: recorder.add_incoming(m, f) time.sleep(0.01) for m, f in outgoing: recorder.add_outgoing(m, f) time.sleep(0.01) with pytest.raises(AssertionError): recorder.get_parsed(incoming[0], only_incoming=True, only_outgoing=True) combined = incoming + outgoing def sublist(lst1, lst2): ls1 = [element for element in lst1 if element in lst2] ls2 = [element for element in lst2 if element in lst1] return ls1 == ls2 for k, v in recorder.store.iterator(include_value=True): p = Recorder.get_parsed(v) assert sublist([i[1:] for i in p], combined) p = Recorder.get_parsed(v, only_incoming=True) if p: assert sublist(p, incoming) for i in p: incoming.remove(i) p = Recorder.get_parsed(v, only_outgoing=True) if p: assert sublist(p, outgoing) for i in p: outgoing.remove(i) assert not incoming assert not outgoing
def test_recorded_parsings(recorder): incoming = [[randomString(10), randomString(6)] for i in range(3)] outgoing = [[randomString(10), randomString(6)] for i in range(5)] for m, f in incoming: recorder.add_incoming(m, f) time.sleep(0.01) for m, f in outgoing: recorder.add_outgoing(m, f) time.sleep(0.01) with pytest.raises(AssertionError): recorder.get_parsed(incoming[0], only_incoming=True, only_outgoing=True) combined = incoming + outgoing def sublist(lst1, lst2): ls1 = [element for element in lst1 if element in lst2] ls2 = [element for element in lst2 if element in lst1] return ls1 == ls2 for k, v in recorder.store.iterator(include_value=True): p = Recorder.get_parsed(v) assert sublist([i[1:] for i in p] , combined) p = Recorder.get_parsed(v, only_incoming=True) if p: assert sublist(p, incoming) for i in p: incoming.remove(i) p = Recorder.get_parsed(v, only_outgoing=True) if p: assert sublist(p, outgoing) for i in p: outgoing.remove(i) assert not incoming assert not outgoing
def __init__(self, kv_store: KeyValueStorageRocksdbIntKeys, skip_metadata_write=True): Recorder.__init__(self, kv_store, skip_metadata_write=skip_metadata_write) self.recorders = [] self.start_times = []
def replay_patched_node(looper, replaying_node, node_recorder, cr): node_run_no = 0 looper.add(replaying_node) cr.start_playing() next_stop_at = time.perf_counter() + (cr.start_times[node_run_no][1] - cr.start_times[node_run_no][0]) progress_data = _create_progress_data(replaying_node.replay_msg_count) # # n_msg_count = 0 # start_time = time.perf_counter() # next_progress_note = start_time + 5 while cr.is_playing: _print_progress(progress_data) vals = cr.get_next() if next_stop_at is not None and time.perf_counter() >= next_stop_at: node_run_no += 1 if node_run_no < len(cr.start_times): # The node stopped here sleep_for, after = _cal_run_times(node_run_no, cr.start_times) replaying_node.stop() looper.removeProdable(replaying_node) # Create new node since node is destroyed on stop replaying_node = replaying_node.__class__(replaying_node.name, config_helper=replaying_node.config_helper, ha=replaying_node.nodestack.ha, cliha=replaying_node.clientstack.ha) patch_replaying_node(replaying_node, node_recorder, cr.start_times) print('Sleeping for {}s to simulate node stop'.format(sleep_for)) time.sleep(sleep_for) if after is None: next_stop_at = None else: next_stop_at = time.perf_counter() + after print('Next stop after {}s'.format(after)) looper.add(replaying_node) else: next_stop_at = None if not vals: looper.run(replaying_node.prod()) continue n_msgs, c_msgs = vals progress_data = _update_progress_msg_count(progress_data, len(n_msgs)) if n_msgs: for inc in n_msgs: if Recorder.is_incoming(inc): msg, frm = to_bytes(inc[1]), to_bytes(inc[2]) replaying_node.nodestack._verifyAndAppend(msg, frm) if Recorder.is_disconn(inc): disconnecteds = inc[1:] replaying_node.nodestack._connsChanged(set(), disconnecteds) if c_msgs: incomings = Recorder.filter_incoming(c_msgs) for inc in incomings: msg, frm = to_bytes(inc[0]), to_bytes(inc[1]) replaying_node.clientstack._verifyAndAppend(msg, frm) looper.run(replaying_node.prod()) return replaying_node
def create_recorder_for_test(tmpdir_factory, name): storage = KeyValueStorageLeveldbIntKeys( tmpdir_factory.mktemp('').strpath, name) return Recorder(storage)