def create_balance(parameters, test_instance): nodes = parameters["nodes"] amount = parameters["amount"] for node_index in nodes: node_host = "localhost" node_port = test_instance._nodes[node_index]._port_start api = LedgerApi(node_host, node_port) # create the entity from the node's private key entity = Entity(get_nodes_private_key(test_instance, node_index)) tx = api.tokens.transfer(test_instance._benefactor_entity, entity, amount, BASE_TX_FEE) for i in range(10): output('Create balance of: ', amount) api.sync(tx, timeout=120, hold_state_sec=20) for j in range(5): b = api.tokens.balance(entity) output('Current balance: ', b) if b >= amount: return time.sleep(5) time.sleep(5) raise Exception("Failed to send funds to node!")
def clean_shutdown(): output( "***** Shutting down test due to failure!. Debug YAML: {} *****\n". format(test_yaml)) test_instance.stop() # test_instance.dump_debug() os._exit(1)
def destake(parameters, test_instance): nodes = parameters["nodes"] for node_index in nodes: node_host = "localhost" node_port = test_instance._nodes[node_index]._port_start # create the API objects we use to interface with the nodes api = LedgerApi(node_host, node_port) # create the entity from the node's private key entity = Entity(get_nodes_private_key(test_instance, node_index)) current_stake = api.tokens.stake(entity) output(f'Destaking node {node_index}. Current stake: ', current_stake) output(f'Destaking node {node_index}. Current balance: ', api.tokens.balance(entity)) api.sync(api.tokens.add_stake(entity, 1, 500)) api.sync(api.tokens.de_stake(entity, current_stake, 500)) api.sync(api.tokens.collect_stake(entity, 500)) output(f'Destaked node {node_index}. Current stake: ', current_stake) output(f'Destaked node {node_index}. Current balance: ', api.tokens.balance(entity)) output(f'Destaked node {node_index}. Current cooldown stake: ', api.tokens.stake_cooldown(entity))
def verify_chain_sync(self, node_index, max_trials=20): """ Verify if a node has synced it's chain with the rest of the network :param node_index: which node we want to verify :param max_trials: maximum of how many times we try the sync test :return: """ config = [] for node in self._nodes: node_host = "localhost" node_port = node._port_start config.append({"host": node_host, "port": node_port}) sync_test = ChainSyncTesting(config) target_host = "localhost" target_port = self._nodes[node_index]._port_start sleep_time = self._nodes[node_index].block_interval * 1.2 / 1000. for i in range(max_trials): try: if sync_test.node_synced(target_host, target_port): output(f"Node {node_index} chain synced with the network!") return True except Exception as e: output(f"verify_chain_sync exception: {e}") time.sleep(sleep_time) return False
def verify_chain_sync(parameters, test_instance): max_trials = parameters.get("max_trials", 20) node_idx = parameters["node"] output(f"verify_chain_sync: node={node_idx}") if not test_instance.verify_chain_sync(node_idx, max_trials): raise RuntimeError( f"Node {node_idx} chain not synced with the network!")
def send_txs(parameters, test_instance): name = parameters["name"] amount = parameters["amount"] nodes = parameters["nodes"] if len(nodes) != 1: output("Only one node supported for sending TXs to at this time!") sys.exit(1) # Create or load the identities up front identities = [] if "load_from_file" in parameters and parameters["load_from_file"] == True: filename = "{}/identities_pickled/{}.pickle".format( test_instance._test_files_dir, name) verify_file(filename) with open(filename, 'rb') as handle: identities = pickle.load(handle) else: identities = [Entity() for i in range(amount)] # If pickling, save this to the workspace with open('{}/{}.pickle'.format(test_instance._workspace, name), 'wb') as handle: pickle.dump(identities, handle) for node_index in nodes: node_host = "localhost" node_port = test_instance._nodes[node_index]._port_start # create the API objects we use to interface with the nodes api = LedgerApi(node_host, node_port) tx_and_identity = [] for index in range(amount): # get next identity identity = identities[index] # create and send the transaction to the ledger, capturing the tx # hash tx = api.tokens.wealth(identity, index) tx_and_identity.append((tx, identity, index)) output("Created wealth with balance: ", index) # Attach this to the test instance so it can be used for verification test_instance._metadata = tx_and_identity # Save the metatada too with open('{}/{}_meta.pickle'.format(test_instance._workspace, name), 'wb') as handle: pickle.dump(test_instance._metadata, handle)
def _sleep(self): # This will return false iff the stop event isn't set before the # timeout if not self._stop_event.wait(self._time): output( "Watchdog '{}' awoke before being stopped! Awoke after: {}s . Watchdog will now: {}" .format(self._name, self._time, self._task)) self.trigger() else: output("Watchdog safely stopped")
def wait_for_blocks(self, node_index, number_of_blocks): """ Wait for a specific number of blocks in the selected node :param node_index: which node we are interested in :param number_of_blocks: for how many new block to wait :return: """ port = self._nodes[node_index]._port_start output(f"Waiting for {number_of_blocks} blocks on node {port}") api = LedgerApi("localhost", port) api.wait_for_blocks(number_of_blocks)
def get_nodes_private_key(test_instance, index): # Path to config files (should already be generated) expected_ouptut_dir = os.path.abspath( os.path.dirname(test_instance._yaml_file) + "/input_files") key_path = expected_ouptut_dir + "/{}.key".format(index) if not os.path.isfile(key_path): output("Couldn't find expected file: {}".format(key_path)) return None private_key = open(key_path, "rb").read(32) return private_key
def run_contract(parameters, test_instance): nodes = parameters["nodes"] contract_name = parameters["contract_name"] wait_for_blocks_num = parameters["wait_for_blocks"] for node_index in nodes: node_host = "localhost" node_port = test_instance._nodes[node_index]._port_start api = LedgerApi(node_host, node_port) # create the entity from the node's private key entity = Entity(get_nodes_private_key(test_instance, node_index)) try: contract_helper = test_instance._nodes[node_index]._contract except AttributeError: output( f"No contract stored in test_instance (node_index={node_index})! Loading from file...") contract_helper = SynergeticContractTestHelper( contract_name, api, entity, test_instance._workspace) contract_helper.load() output('Submit data, available balance: ', api.tokens.balance(entity)) contract_helper.submit_random_data(10, (0, 200)) api.wait_for_blocks(wait_for_blocks_num) valid = contract_helper.validate_execution() if not valid: output( f"Synergetic contract ({contract_name}) execution failed on node {node_index}!") raise Exception( f"Synergetic contract ({contract_name}) execution failed on node {node_index}!") else: output( f"Synergetic contract ({contract_name}) executed on node {node_index} ")
def node_ready(self, index): try: port = self._nodes[index]._port_start api = ApiEndpoint("localhost", port) status, response = api._get_json("health/ready") if status: for key, value in response.items(): if not value: output( f"Node {index} not ready, because {key} is False!") return False return True except Exception as e: output(f"Failed to call node {index}: {str(e)}") return False
def setup_test(test_yaml, test_instance): output("Setting up test: {}".format(test_yaml)) test_name = yaml_extract(test_yaml, 'test_name', expected=True, expect_type=str) number_of_nodes = yaml_extract( test_yaml, 'number_of_nodes', expected=True, expect_type=int) node_load_directory = yaml_extract( test_yaml, 'node_load_directory', expected=False, expect_type=dict) node_connections = yaml_extract( test_yaml, 'node_connections', expected=False, expect_type=list) mining_nodes = yaml_extract(test_yaml, 'mining_nodes', expected=False, expect_type=list, default=[]) max_test_time = yaml_extract(test_yaml, 'max_test_time', expected=False, expect_type=int, default=10) pos_mode = yaml_extract(test_yaml, 'pos_mode', expected=False, expect_type=bool, default=False) num_lanes = yaml_extract(test_yaml, 'lanes', expected=False, expect_type=int, default=1) test_instance._number_of_nodes = number_of_nodes test_instance._node_load_directory = node_load_directory test_instance._node_connections = node_connections test_instance._nodes_are_mining = mining_nodes test_instance._max_test_time = max_test_time test_instance._pos_mode = pos_mode test_instance._lanes = num_lanes # Watchdog will trigger this if the tests exceeds allowed bounds. Note stopping the test cleanly is # necessary to preserve output logs etc. def clean_shutdown(): output( "***** Shutting down test due to failure!. Debug YAML: {} *****\n".format(test_yaml)) test_instance.stop() test_instance.dump_debug() os._exit(1) watchdog = TimerWatchdog( time=max_test_time, name=test_name, task="End test and cleanup", callback=clean_shutdown) watchdog.start() test_instance._watchdog = watchdog # This shouldn't take a long time since nodes are started asynchronously test_instance.run()
def run_test(build_directory, yaml_file, node_exe, name_filter=None): # Read YAML file with open(yaml_file, 'r') as stream: try: all_yaml = yaml.safe_load_all(stream) # Parse yaml documents as tests (sequentially) for test in all_yaml: # Get test setup conditions setup_conditions = yaml_extract(test, 'setup_conditions') # Check if name is not filtered out if name_filter is not None: name = yaml_extract(setup_conditions, 'test_name') if name not in name_filter: continue # Create a new test instance description = yaml_extract(test, 'test_description') output("\n=================================================") output("Test: {}".format(description)) output("=================================================\n") if "DISABLED" in description: output("Skipping disabled test") continue # Create a test instance test_instance = create_test(setup_conditions, build_directory, node_exe, yaml_file) # Configure the test - this will start the nodes asynchronously setup_test(setup_conditions, test_instance) # Run the steps in the test run_steps(yaml_extract(test, 'steps'), test_instance) test_instance.stop() except Exception as e: print('Failed to parse yaml or to run test! Error: "{}"'.format(e)) traceback.print_exc() test_instance.stop() test_instance.dump_debug() sys.exit(1) output("\nAll end to end tests have passed :)")
def execute_expression(parameters, test_instance): nodes = parameters["nodes"] expression = parameters["expression"] ops = { "==": operator.eq, "<=": operator.le, ">=": operator.ge, ">": operator.gt, "<": operator.lt, } op = None ls = None rs = None for key in ops: if expression.find(key) != -1: ls, rs = expression.split(key) ls = ls.replace(" ", "") rs = rs.replace(" ", "") op = key break print("ls='", ls, "'") print("op='", op, "'") print("rs='", rs, "'") if op is None: raise RuntimeError( f"Expression '{expression}' not supported! Available ops: {ops.keys()}" ) for node_index in nodes: node_instance = test_instance._nodes[node_index] if not hasattr(node_instance, "_variables"): raise RuntimeError( f"Expression '{expression}' can't be evaluated because node {node_instance} doesn't have the required variables!" ) ls = node_instance._variables.get(ls, None) rs = node_instance._variables.get(rs, None) if ls is None or rs is None: raise RuntimeError( f"Expression '{expression}' can't be evaluated because node {node_instance} doesn't have one of the required variables!" ) result = ops[op](ls, rs) if not result: raise RuntimeError( f"Evaluation of '{expression}' failed or false!") output(f"Result of execution of '{expression}' is '{result}'")
def create_synergetic_contract(parameters, test_instance): nodes = parameters["nodes"] name = parameters["name"] fee_limit = parameters["fee_limit"] for node_index in nodes: node_host = "localhost" node_port = test_instance._nodes[node_index]._port_start api = LedgerApi(node_host, node_port) # create the entity from the node's private key entity = Entity(get_nodes_private_key(test_instance, node_index)) output('Create contract, available balance: ', api.tokens.balance(entity)) helper = SynergeticContractTestHelper(name, api, entity, test_instance._workspace) helper.create_new(fee_limit) test_instance._nodes[node_index]._contract = helper
def query_balance(parameters, test_instance): nodes = parameters["nodes"] variable = parameters["save_as"] for node_index in nodes: node_instance = test_instance._nodes[node_index] node_host = "localhost" node_port = node_instance._port_start api = LedgerApi(node_host, node_port) # create the entity from the node's private key entity = Entity(get_nodes_private_key(test_instance, node_index)) b = api.tokens.balance(entity) address = Address(entity) output( f"Requested balance ({b}) {address.to_hex()}, saved as {variable}") if not hasattr(node_instance, "_variables"): setattr(node_instance, "_variables", {}) node_instance._variables[variable] = b
def dump_debug(self, only_node=None): if self._nodes: for n, node in enumerate(self._nodes): if only_node is not None and n is not only_node: continue print('\nNode debug. Node:{}'.format(n)) node_log_path = node.log_path if not os.path.isfile(node_log_path): output("Couldn't find supposed node log file: {}".format( node_log_path)) else: # Send raw bytes directly to stdout since it contains # non-ascii data = Path(node_log_path).read_bytes() sys.stdout.buffer.write(data) sys.stdout.flush()
def run(self): # build up all the node instances for index in range(self._number_of_nodes): self.append_node(index, self._node_load_directory) # Now connect the nodes as specified if self._node_connections: self.connect_nodes(self._node_connections) # Enable mining node(s) for miner_index in self._nodes_are_mining: self._nodes[miner_index].mining = True # In the case only one miner node, it runs in standalone mode if len(self._nodes) == 1 and len(self._nodes_are_mining) > 0: self._nodes[0].standalone = True else: for node in self._nodes: node.private_network = True # Temporary special case for POS mode if self._pos_mode: self.setup_pos_for_nodes() # start all the nodes for index in range(self._number_of_nodes): if self._number_of_nodes > 1 and not self._pos_mode: self._nodes[index].append_to_cmd([ "-private-network", ]) self.start_node(index) sleep_time = 5 + (3 * self._lanes) if self._pos_mode: sleep_time *= 2 output("POS mode. sleep extra time.") time.sleep(sleep_time)
def run_steps(test_yaml, test_instance): output("Running steps: {}".format(test_yaml)) for step in test_yaml: output("Running step: {}".format(step)) command = "" parameters = "" if isinstance(step, dict): command = list(step.keys())[0] parameters = step[command] elif isinstance(step, str): command = step else: raise RuntimeError( "Failed to parse command from step: {}".format(step)) func = COMMAND_MAP.get(command, None) if func: func(parameters, test_instance) else: output("Found unknown command when running steps: '{}'".format( command)) sys.exit(1)
def send_txs(parameters, test_instance): name = parameters["name"] amount = parameters["amount"] nodes = parameters["nodes"] if len(nodes) != 1: output("Only one node supported for sending TXs to at this time!") sys.exit(1) # Create or load the identities up front identities = [Entity() for i in range(amount)] for node_index in nodes: node_host = "localhost" node_port = test_instance._nodes[node_index]._port_start # create the API objects we use to interface with the nodes api = LedgerApi(node_host, node_port) tx_and_identity = [] for index in range(amount): # get next identity identity = identities[index] amount = index + 1 # create and send the transaction to the ledger, capturing the tx # hash tx = api.tokens.transfer(test_instance._benefactor_entity, identity, amount, BASE_TX_FEE) tx_and_identity.append((tx, identity, amount)) output(f"Sent balance {amount} to node") # Attach this to the test instance so it can be used for verification test_instance._metadata = tx_and_identity
def run_steps(test_yaml, test_instance): output("Running steps: {}".format(test_yaml)) for step in test_yaml: output("Running step: {}".format(step)) command = "" parameters = "" if isinstance(step, dict): command = list(step.keys())[0] parameters = step[command] elif isinstance(step, str): command = step else: raise RuntimeError( "Failed to parse command from step: {}".format(step)) if command == 'send_txs': send_txs(parameters, test_instance) elif command == 'verify_txs': verify_txs(parameters, test_instance) elif command == 'add_node': add_node(parameters, test_instance) elif command == 'sleep': time.sleep(parameters) elif command == 'print_time_elapsed': test_instance.print_time_elapsed() elif command == 'run_python_test': run_python_test(parameters, test_instance) elif command == 'restart_nodes': restart_nodes(parameters, test_instance) elif command == 'stop_nodes': stop_nodes(parameters, test_instance) elif command == 'start_nodes': start_nodes(parameters, test_instance) elif command == 'destake': destake(parameters, test_instance) elif command == 'run_dmlf_etch_client': run_dmlf_etch_client(parameters, test_instance) elif command == "create_wealth": create_wealth(parameters, test_instance) elif command == "create_synergetic_contract": create_synergetic_contract(parameters, test_instance) elif command == "run_contract": run_contract(parameters, test_instance) elif command == "wait_for_blocks": wait_for_blocks(parameters, test_instance) elif command == "verify_chain_sync": verify_chain_sync(parameters, test_instance) elif command == "wait_network_ready": wait_network_ready(parameters, test_instance) else: output("Found unknown command when running steps: '{}'".format( command)) sys.exit(1)
def fail(parameters, test_instance): for key in parameters: output(f"Running {key} command in fail mode") try: func = COMMAND_MAP.get(key, None) if func: func(parameters[key], test_instance) else: output("Found unknown command when running steps: '{}'".format( key)) sys.exit(1) output(f"Running command {key} not failed!") sys.exit(1) except BaseException: pass
def print_time_elapsed(self): output("Elapsed time: {}".format(time.perf_counter() - self._creation_time))
def verify_txs(parameters, test_instance): name = parameters["name"] nodes = parameters["nodes"] expect_mined = False try: expect_mined = parameters["expect_mined"] except: pass # Currently assume there only one set of TXs tx_and_identity = test_instance._metadata # Load these from file if specified if "load_from_file" in parameters and parameters["load_from_file"] == True: filename = "{}/identities_pickled/{}_meta.pickle".format( test_instance._test_files_dir, name) verify_file(filename) with open(filename, 'rb') as handle: tx_and_identity = pickle.load(handle) for node_index in nodes: node_host = "localhost" node_port = test_instance._nodes[node_index]._port_start api = LedgerApi(node_host, node_port) # Verify TXs - will block until they have executed for tx, identity, balance in tx_and_identity: error_message = "" # Check TX has executed, unless we expect it should already have been mined while True: status = api.tx.status(tx).status if status == "Executed" or expect_mined: output("found executed TX") error_message = "" break tx_b64 = codecs.encode(codecs.decode(tx, 'hex'), 'base64').decode() next_error_message = "Waiting for TX to get executed (node {}). Found: {} Tx: {}".format( node_index, status, tx_b64) time.sleep(0.5) if next_error_message != error_message: output(next_error_message) error_message = next_error_message failed_to_find = 0 while True: seen_balance = api.tokens.balance(identity) # There is an unavoidable race that can cause you to see a balance of 0 # since the TX can be lost even after supposedly being executed. if seen_balance == 0 and balance is not 0: output( f"Note: found a balance of 0 when expecting {balance}. Retrying." ) time.sleep(1) failed_to_find = failed_to_find + 1 if failed_to_find > 5: # Forces the resubmission of wealth TX to the chain (TX most likely was lost) api.tokens.wealth(identity, balance) failed_to_find = 0 else: # Non-zero balance at this point. Stop waiting. if balance != seen_balance: output( "Balance mismatch found after sending to node. Found {} expected {}" .format(seen_balance, balance)) test_instance._watchdog.trigger() break output("Verified a wealth of {}".format(seen_balance)) output("Verified balances for node: {}".format(node_index))
def connect_nodes(self, node_connections): for connect_from, connect_to in node_connections: self._nodes[connect_from].add_peer(self._nodes[connect_to]) output("Connect node {} to {}".format(connect_from, connect_to))
def stop_node(self, index, remove_db=False): output("Not implemented")