def send_txs(parameters, test_instance): name = parameters["name"] amount = parameters["amount"] nodes = parameters["nodes"] if len(nodes) != 1: output("Only one node supported for sending TXs to at this time!") sys.exit(1) # Create or load the identities up front identities = [] if "load_from_file" in parameters and parameters["load_from_file"] == True: filename = "{}/identities_pickled/{}.pickle".format( test_instance._test_files_dir, name) verify_file(filename) with open(filename, 'rb') as handle: identities = pickle.load(handle) else: identities = [Entity() for i in range(amount)] # If pickling, save this to the workspace with open('{}/{}.pickle'.format(test_instance._workspace, name), 'wb') as handle: pickle.dump(identities, handle) for node_index in nodes: node_host = "localhost" node_port = test_instance._nodes[node_index]._port_start # create the API objects we use to interface with the nodes api = LedgerApi(node_host, node_port) tx_and_identity = [] for index in range(amount): # get next identity identity = identities[index] # create and send the transaction to the ledger, capturing the tx # hash tx = api.tokens.wealth(identity, index) tx_and_identity.append((tx, identity, index)) output("Created wealth with balance: ", index) # Attach this to the test instance so it can be used for verification test_instance._metadata = tx_and_identity # Save the metatada too with open('{}/{}_meta.pickle'.format(test_instance._workspace, name), 'wb') as handle: pickle.dump(test_instance._metadata, handle)
def get_nodes_private_key(test_instance, index): # Path to config files (should already be generated) expected_ouptut_dir = os.path.abspath( os.path.dirname(test_instance._yaml_file) + "/input_files") key_path = expected_ouptut_dir + "/{}.key".format(index) verify_file(key_path) private_key = open(key_path, "rb").read(32) return private_key
def __init__(self, build_directory, constellation_exe, yaml_file): self._number_of_nodes = 0 self._node_load_directory = [] self._node_connections = None self._nodes_are_mining = [] self._port_start_range = 8000 self._port_range = 20 self._workspace = "" self._lanes = 1 self._slices = 16 self._max_test_time = 1000 self._nodes = [] self._metadata = None self._watchdog = None self._creation_time = time.perf_counter() self._block_interval = 1000 self._genesis_file_location = "" # In order for the tests to have tokens, allocate # a benefactor address enough at genesis self._benefactor_entity = Entity() self._benefactor_address = Address(self._benefactor_entity) # Variables related to temporary pos mode self._pos_mode = False self._nodes_pubkeys = [] self._nodes_keys = [] # Default to removing old tests for f in glob.glob(build_directory + "/end_to_end_test_*"): shutil.rmtree(f) # To avoid possible collisions, prepend output files with the date self._random_identifier = '{0:%Y_%m_%d_%H_%M_%S}'.format( datetime.datetime.now()) self._random_identifier = "default" self._workspace = os.path.join( build_directory, 'end_to_end_test_{}'.format(self._random_identifier)) self._build_directory = build_directory self._constellation_exe = os.path.abspath(constellation_exe) self._yaml_file = os.path.abspath(yaml_file) self._test_files_dir = os.path.dirname(self._yaml_file) verify_file(constellation_exe) verify_file(self._yaml_file) # Ensure that build/end_to_end_output_XXX/ exists for the test output os.makedirs(self._workspace, exist_ok=True)
def run_dmlf_etch_client(parameters, test_instance): indexes = parameters["nodes"] client_exe = parameters["exe"] etch_file = parameters["etch"] dmlf_etch_nodes = [] for index in indexes: dmlf_etch_nodes.append(test_instance._nodes[index]) build_directory = os.path.abspath(test_instance._build_directory) # get dmlf etch client executable client_exe = os.path.abspath(os.path.join(build_directory, client_exe)) verify_file(client_exe) # get etch file etch_file = os.path.abspath(os.path.join(build_directory, etch_file)) verify_file(etch_file) client_output_dir = os.path.abspath( os.path.join(test_instance._workspace, "steps/")) os.makedirs(client_output_dir, exist_ok=True) # generate config file config_path = os.path.join(client_output_dir, "e2e_config_client.json") nodes = [ { "uri": node.uri, "pub": node.public_key } for node in dmlf_etch_nodes ] key = Entity() config = { "client": { "key": key.private_key }, "nodes": nodes } with open(config_path, 'w') as f: json.dump(config, f) # generate client command cmd = [client_exe, config_path, etch_file] # run client logfile_path = os.path.join(client_output_dir, "dmlf_etch_client.log") logfile = open(logfile_path, 'w') subprocess.check_call(cmd, cwd=build_directory, stdout=logfile, stderr=subprocess.STDOUT)
def __init__(self, build_directory, node_exe, yaml_file): self._number_of_nodes = 0 self._node_connections = None self._port_start_range = 8000 self._port_range = 20 self._workspace = "" self._max_test_time = 1000 self._nodes = [] self._watchdog = None self._creation_time = time.perf_counter() self._nodes_pubkeys = [] self._nodes_keys = [] # Default to removing old tests for f in glob.glob(build_directory + "/end_to_end_test_*"): shutil.rmtree(f) # To avoid possible collisions, prepend output files with the date self._random_identifer = '{0:%Y_%m_%d_%H_%M_%S}'.format( datetime.datetime.now()) self._random_identifer = "default" self._workspace = os.path.join( build_directory, 'end_to_end_test_{}'.format(self._random_identifer)) self._build_directory = build_directory self._node_exe = os.path.abspath(node_exe) self._yaml_file = os.path.abspath(yaml_file) self._test_files_dir = os.path.dirname(self._yaml_file) verify_file(node_exe) verify_file(self._yaml_file) # Ensure that build/end_to_end_output_XXX/ exists for the test output os.makedirs(self._workspace, exist_ok=True)
def verify_txs(parameters, test_instance): name = parameters["name"] nodes = parameters["nodes"] expect_mined = False try: expect_mined = parameters["expect_mined"] except: pass # Currently assume there only one set of TXs tx_and_identity = test_instance._metadata # Load these from file if specified if "load_from_file" in parameters and parameters["load_from_file"] == True: filename = "{}/identities_pickled/{}_meta.pickle".format( test_instance._test_files_dir, name) verify_file(filename) with open(filename, 'rb') as handle: tx_and_identity = pickle.load(handle) for node_index in nodes: node_host = "localhost" node_port = test_instance._nodes[node_index]._port_start api = LedgerApi(node_host, node_port) # Verify TXs - will block until they have executed for tx, identity, balance in tx_and_identity: error_message = "" # Check TX has executed, unless we expect it should already have been mined while True: status = api.tx.status(tx).status if status == "Executed" or expect_mined: output("found executed TX") error_message = "" break tx_b64 = codecs.encode(codecs.decode(tx, 'hex'), 'base64').decode() next_error_message = "Waiting for TX to get executed (node {}). Found: {} Tx: {}".format( node_index, status, tx_b64) time.sleep(0.5) if next_error_message != error_message: output(next_error_message) error_message = next_error_message failed_to_find = 0 while True: seen_balance = api.tokens.balance(identity) # There is an unavoidable race that can cause you to see a balance of 0 # since the TX can be lost even after supposedly being executed. if seen_balance == 0 and balance is not 0: output( f"Note: found a balance of 0 when expecting {balance}. Retrying." ) time.sleep(1) failed_to_find = failed_to_find + 1 if failed_to_find > 5: # Forces the resubmission of wealth TX to the chain (TX most likely was lost) api.tokens.wealth(identity, balance) failed_to_find = 0 else: # Non-zero balance at this point. Stop waiting. if balance != seen_balance: output( "Balance mismatch found after sending to node. Found {} expected {}" .format(seen_balance, balance)) test_instance._watchdog.trigger() break output("Verified a wealth of {}".format(seen_balance)) output("Verified balances for node: {}".format(node_index))