def test_node_load_consistent_time(tconf, change_checkpoint_freq, disable_node_monitor_config, looper, txnPoolNodeSet, tdirWithPoolTxns, allPluginsPath, poolTxnStewardData, capsys): # One of the reason memory grows is because spylog grows client, wallet = buildPoolClientAndWallet(poolTxnStewardData, tdirWithPoolTxns, clientClass=TestClient) looper.add(client) looper.run(client.ensureConnectedToNodes()) client_batches = 300 txns_per_batch = 25 time_log = [] warm_up_batches = 10 tolerance_factor = 2 from pympler import asizeof for i in range(client_batches): s = perf_counter() sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, txns_per_batch, override_timeout_limit=True) t = perf_counter() - s with capsys.disabled(): print('{} executed {} client txns in {:.2f} seconds'.format( i + 1, txns_per_batch, t)) print('--------Memory Usage details start') for node in txnPoolNodeSet: # print(sys.getsizeof(node)) print('---Node {}-----'.format(node)) # print('Requests {}'.format(asizeof.asizeof(node.requests, detail=1))) print(get_memory_usage(node, True, get_only_non_empty=True)) for r in node.replicas: print('---Replica {}-----'.format(r)) print(get_memory_usage(r, True, get_only_non_empty=True)) print('--------Memory Usage details end') if len(time_log) >= warm_up_batches: m = mean(time_log) sd = tolerance_factor * pstdev(time_log) assert m > t or abs(t - m) <= sd, '{} {}'.format(abs(t - m), sd) time_log.append(t) # Since client checks inbox for sufficient replies, clear inbox so that # it takes constant time to check replies for each batch client.inBox.clear() client.txnLog.reset()
def test_node_load_consistent_time(tconf, change_checkpoint_freq, disable_node_monitor_config, looper, txnPoolNodeSet, capsys, sdk_pool_handle, sdk_wallet_client): # One of the reason memory grows is because spylog grows client_batches = 300 txns_per_batch = 25 time_log = [] warm_up_batches = 10 tolerance_factor = 2 print_detailed_memory_usage = False from pympler import tracker tr = tracker.SummaryTracker() node_methods_to_capture = [TestNode.executeBatch, TestNode.recordAndPropagate, TestNode.domainDynamicValidation, TestNode.domainRequestApplication] times = {n.name: {meth.__name__: [] for meth in node_methods_to_capture} for n in txnPoolNodeSet} for node in txnPoolNodeSet: for meth in node_methods_to_capture: meth_name = meth.__name__ patched = timeit(getattr(node, meth_name), times[node.name][meth_name]) setattr(node, meth_name, patched) for i in range(client_batches): s = perf_counter() sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, txns_per_batch) t = perf_counter() - s with capsys.disabled(): print('{} executed {} client txns in {:.2f} seconds'. format(i + 1, txns_per_batch, t)) print('--------Memory Usage details start') for node in txnPoolNodeSet: # print(sys.getsizeof(node)) print('---Node {}-----'.format(node)) # print('Requests {}'.format(asizeof.asizeof(node.requests, detail=1))) print( get_memory_usage( node, print_detailed_memory_usage, get_only_non_empty=True)) for r in node.replicas: print('---Replica {}-----'.format(r)) print( get_memory_usage( r, print_detailed_memory_usage, get_only_non_empty=True)) # if i % 3 == 0: # tr.print_diff() print('--------Memory Usage details end') for node in txnPoolNodeSet: for meth in node_methods_to_capture: ts = times[node.name][meth.__name__] print('{} {} {} {}'.format( node, meth.__name__, mean(ts), ts)) if len(time_log) >= warm_up_batches: m = mean(time_log) sd = tolerance_factor * pstdev(time_log) assert m > t or abs(t - m) <= sd, '{} {}'.format(abs(t - m), sd) time_log.append(t)
def test_node_load_consistent_time(tconf, change_checkpoint_freq, disable_node_monitor_config, looper, txnPoolNodeSet, tdirWithPoolTxns, allPluginsPath, poolTxnStewardData, capsys): # One of the reason memory grows is because spylog grows client, wallet = buildPoolClientAndWallet(poolTxnStewardData, tdirWithPoolTxns, clientClass=TestClient) looper.add(client) looper.run(client.ensureConnectedToNodes()) client_batches = 300 txns_per_batch = 25 time_log = [] warm_up_batches = 10 tolerance_factor = 2 print_detailed_memory_usage = False from pympler import tracker tr = tracker.SummaryTracker() node_methods_to_capture = [ TestNode.executeBatch, TestNode.recordAndPropagate, TestNode.domainDynamicValidation, TestNode.domainRequestApplication ] times = { n.name: {meth.__name__: [] for meth in node_methods_to_capture} for n in txnPoolNodeSet } for node in txnPoolNodeSet: for meth in node_methods_to_capture: meth_name = meth.__name__ patched = timeit(getattr(node, meth_name), times[node.name][meth_name]) setattr(node, meth_name, patched) for i in range(client_batches): s = perf_counter() sendReqsToNodesAndVerifySuffReplies(looper, wallet, client, txns_per_batch, override_timeout_limit=True) t = perf_counter() - s with capsys.disabled(): print('{} executed {} client txns in {:.2f} seconds'.format( i + 1, txns_per_batch, t)) print('--------Memory Usage details start') for node in txnPoolNodeSet: # print(sys.getsizeof(node)) print('---Node {}-----'.format(node)) # print('Requests {}'.format(asizeof.asizeof(node.requests, detail=1))) print( get_memory_usage(node, print_detailed_memory_usage, get_only_non_empty=True)) for r in node.replicas: print('---Replica {}-----'.format(r)) print( get_memory_usage(r, print_detailed_memory_usage, get_only_non_empty=True)) # if i % 3 == 0: # tr.print_diff() print('--------Memory Usage details end') for node in txnPoolNodeSet: for meth in node_methods_to_capture: ts = times[node.name][meth.__name__] print('{} {} {} {}'.format(node, meth.__name__, mean(ts), ts)) if len(time_log) >= warm_up_batches: m = mean(time_log) sd = tolerance_factor * pstdev(time_log) assert m > t or abs(t - m) <= sd, '{} {}'.format(abs(t - m), sd) time_log.append(t) # Since client checks inbox for sufficient replies, clear inbox so that # it takes constant time to check replies for each batch client.inBox.clear() client.txnLog.reset()