def waitTillReady(self): Utils.waitForTruth( lambda: Utils.runCmdArrReturnStr([ 'curl', '-H', 'Accept: application/json', self.endpoint + 'v1/chain/get_info' ], silentErrors=True) != "", timeout=30)
def waitUntilBeginningOfProdTurn(node, producerName, timeout=30, sleepTime=0.4): def isDesiredProdTurn(): beginningOfProdTurnHead = node.getHeadBlockNum() res = node.getBlock(beginningOfProdTurnHead)["producer"] == producerName and \ node.getBlock(beginningOfProdTurnHead-1)["producer"] != producerName return res ret = Utils.waitForTruth(isDesiredProdTurn, timeout, sleepTime) assert ret != None, "Expected producer to arrive within 19 seconds (with 3 other producers)"
def waitUntilBlockBecomeIrr(node, blockNum, timeout=60): def hasBlockBecomeIrr(): return node.getIrreversibleBlockNum() >= blockNum return Utils.waitForTruth(hasBlockBecomeIrr, timeout)
newProducerAcc.activePublicKey = "EOS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV" producerNode.createAccount(newProducerAcc, cluster.eosioAccount) setProdsStr = '{"schedule": [' setProdsStr += '{"producer_name":' + newProducerAcc.name + ',"block_signing_key":' + newProducerAcc.activePublicKey + '}' setProdsStr += ']}' cmd = "push action -j eosio setprods '{}' -p eosio".format(setProdsStr) trans = producerNode.processCleosCmd(cmd, cmd, silentErrors=False) assert trans setProdsBlockNum = int(trans["processed"]["block_num"]) # Wait until the block where set prods is executed become irreversible so the producer schedule def isSetProdsBlockNumIrr(): return producerNode.getIrreversibleBlockNum() >= setProdsBlockNum Utils.waitForTruth(isSetProdsBlockNumIrr, timeout=30, sleepTime=0.1) # Once it is irreversible, immediately pause the producer so the promoted producer schedule is not cleared producerNode.processCurlCmd("producer", "pause", "") producerNode.kill(signal.SIGTERM) # Create the snapshot and rename it to avoid name conflict later on res = irrNode.createSnapshot() beforeShutdownSnapshotPath = res["snapshot_name"] snapshotPathWithoutExt, snapshotExt = os.path.splitext( beforeShutdownSnapshotPath) os.rename(beforeShutdownSnapshotPath, snapshotPathWithoutExt + "_before_shutdown" + snapshotExt) # Restart irr node and ensure the snapshot is still identical irrNode.kill(signal.SIGTERM)
Print("Wait until block 1 is no longer retrievable, this ensures newly joined nodeos cannot sync from net plugin") while os.path.exists('var/lib/node_00/archive/blocks-1-20.log'): time.sleep(2) Print("#################################################################################") Print("# Scenario 1: Test node 1 failover without snapshot in the block vault #") Print("#################################################################################") testFailOver(cluster, nodeToKill=node1) Print("#################################################################################") Print("# Scenario 2: Test node 1 failover from the snapshot in the block vault #") Print("#################################################################################") Print("Create a snapshot") node1.createSnapshot() Print("Wait until the snapshot appears in the database") Utils.waitForTruth(lambda: num_rows_in_table('SnapshotData') != 0) testFailOver(cluster, nodeToKill=node1) Print("#################################################################################") Print("# Scenario 3: Test two identical producer nodes connecting to the block vault #") Print("#################################################################################") node2 = cluster.getNode(2) time.sleep(10) testFailOver(cluster, nodeToKill=node2, addSwapFlags={ "--plugin": "eosio::blockvault_client_plugin", "--block-vault-backend": "postgresql://*****:*****@localhost", "--producer-name": "vltproducera", "--signature-provider": "{}=KEY:{}".format(vltproducerAccount.ownerPublicKey, vltproducerAccount.ownerPrivateKey) }) assert node2.waitForLibToAdvance(timeout=60)