def test_state_regenerated_from_ledger(looper,
                                       nodeSet, tconf, tdir,
                                       sdk_pool_handle,
                                       sdk_wallet_trustee,
                                       allPluginsPath):
    """
    Node loses its state database but recreates it from ledger after start.
    Checking ATTRIB txns too since they store some data off ledger too
    """
    endorsers = []
    for i in range(5):
        endorsers.append(sdk_add_new_nym(looper, sdk_pool_handle,
                                             sdk_wallet_trustee,
                                             'TA' + str(i),
                                             ENDORSER_STRING))
        sdk_add_raw_attribute(looper, sdk_pool_handle,
                              endorsers[-1],
                              randomString(6),
                              randomString(10))

    for wh in endorsers:
        for i in range(3):
            sdk_add_new_nym(looper, sdk_pool_handle,
                            wh, 'NP1' + str(i))

    ensure_all_nodes_have_same_data(looper, nodeSet)

    node_to_stop = nodeSet[-1]
    node_state = node_to_stop.states[DOMAIN_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv.db_path
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, nodeSet[:-1])

    shutil.rmtree(state_db_path)

    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(
        node_to_stop.name,
        config_helper=config_helper,
        config=tconf,
        pluginPaths=allPluginsPath,
        ha=node_to_stop.nodestack.ha,
        cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    nodeSet[-1] = restarted_node

    looper.run(checkNodesConnected(nodeSet))
    # Need some time as `last_ordered_3PC` is compared too and that is
    # communicated through catchup
    waitNodeDataEquality(looper, restarted_node, *nodeSet[:-1])

    # Pool is still functional
    for wh in endorsers:
        sdk_add_new_nym(looper, sdk_pool_handle,
                        wh, 'NP--' + randomString(5))

    ensure_all_nodes_have_same_data(looper, nodeSet)
コード例 #2
0
def run_node(config, name, node_ip, node_port, client_ip, client_port):
    node_ha = HA(node_ip, node_port)
    client_ha = HA(client_ip, client_port)

    node_config_helper = NodeConfigHelper(name, config)

    logFileName = os.path.join(node_config_helper.log_dir, name + ".log")

    logger = getlogger()
    Logger().apply_config(config)
    Logger().enableFileLogging(logFileName)

    logger.setLevel(config.logLevel)
    logger.debug("You can find logs in {}".format(logFileName))

    vars = [var for var in os.environ.keys() if var.startswith("INDY")]
    logger.debug("Indy related env vars: {}".format(vars))

    with Looper(debug=config.LOOPER_DEBUG) as looper:
        node = Node(name,
                    config_helper=node_config_helper,
                    ha=node_ha, cliha=client_ha)
        node = integrate(node_config_helper, node, logger)
        looper.add(node)
        looper.run()
コード例 #3
0
def testNodeSchedulesUpgradeAfterRestart(upgradeScheduled, looper, nodeSet,
                                         validUpgrade, testNodeClass, tdir,
                                         tconf, allPluginsPath):
    names = []
    while nodeSet:
        node = nodeSet.pop()
        names.append(node.name)
        node.cleanupOnStopping = False
        looper.removeProdable(node)
        node.stop()
        del node

    for nm in names:
        config_helper = NodeConfigHelper(nm, tconf, chroot=tdir)
        node = testNodeClass(nm,
                             config_helper=config_helper,
                             config=tconf,
                             pluginPaths=allPluginsPath)
        looper.add(node)
        nodeSet.append(node)

    looper.run(checkNodesConnected(nodeSet))
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1)
    looper.run(
        eventually(checkUpgradeScheduled,
                   nodeSet,
                   validUpgrade[VERSION],
                   retryWait=1,
                   timeout=waits.expectedUpgradeScheduled()))
コード例 #4
0
def migrate_all():

    node_name = get_node_name()
    if node_name is None:
        return False

    config = getConfig()
    config_helper = NodeConfigHelper(node_name, config)

    if BUILDER_NET_NETWORK_NAME != config.NETWORK_NAME:
        logger.info("This script can be used only for {} network".format(BUILDER_NET_NETWORK_NAME))
        return False

    path_to_config_state = os.path.join(config_helper.ledger_dir, config.configStateDbName)
    path_to_config_ts_db = os.path.join(config_helper.ledger_dir, config.configStateTsDbName)

    if not os.path.exists(path_to_config_ts_db):
        logger.error("Path {} to config's timestamp storage does not exist".format(path_to_config_ts_db))
        return False

    if not os.path.exists(path_to_config_state):
        logger.error("Path {} to config_state storage does not exist".format(path_to_config_state))
        return False

    if not remove_dir(path_to_config_ts_db):
        logger.error("Failed to remove {}".format(path_to_config_ts_db))
        return False

    if not remove_dir(path_to_config_state):
        logger.error("Failed to remove {}".format(path_to_config_state))
        return False

    logger.info("Config state storage was successfully removed. Path was {}".format(path_to_config_state))

    return True
コード例 #5
0
    def __init__(self,
                 name,
                 nodeRegistry=None,
                 clientAuthNr=None,
                 ha=None,
                 cliname=None,
                 cliha=None,
                 config_helper=None,
                 ledger_dir: str = None,
                 keys_dir: str = None,
                 genesis_dir: str = None,
                 plugins_dir: str = None,
                 node_info_dir: str = None,
                 primaryDecider=None,
                 pluginPaths: Iterable[str] = None,
                 storage=None,
                 config=None):
        config = config or getConfig()

        config_helper = config_helper or NodeConfigHelper(name, config)

        ledger_dir = ledger_dir or config_helper.ledger_dir
        keys_dir = keys_dir or config_helper.keys_dir
        genesis_dir = genesis_dir or config_helper.genesis_dir
        plugins_dir = plugins_dir or config_helper.plugins_dir
        node_info_dir = node_info_dir or config_helper.node_info_dir

        # TODO: 4 ugly lines ahead, don't know how to avoid
        self.idrCache = None
        self.attributeStore = None
        self.stateTsDbStorage = None
        self.upgrader = None
        self.poolCfg = None

        super().__init__(name=name,
                         nodeRegistry=nodeRegistry,
                         clientAuthNr=clientAuthNr,
                         ha=ha,
                         cliname=cliname,
                         cliha=cliha,
                         config_helper=config_helper,
                         ledger_dir=ledger_dir,
                         keys_dir=keys_dir,
                         genesis_dir=genesis_dir,
                         plugins_dir=plugins_dir,
                         node_info_dir=node_info_dir,
                         primaryDecider=primaryDecider,
                         pluginPaths=pluginPaths,
                         storage=storage,
                         config=config)

        # TODO: ugly line ahead, don't know how to avoid
        self.clientAuthNr = clientAuthNr or self.defaultAuthNr()

        self.nodeMsgRouter.routes[Request] = self.processNodeRequest
        self.nodeAuthNr = self.defaultNodeAuthNr()
コード例 #6
0
    def __init__(self,
                 name,
                 clientAuthNr=None,
                 ha=None,
                 cliname=None,
                 cliha=None,
                 config_helper=None,
                 ledger_dir: str = None,
                 keys_dir: str = None,
                 genesis_dir: str = None,
                 plugins_dir: str = None,
                 node_info_dir: str = None,
                 primaryDecider=None,
                 pluginPaths: Iterable[str] = None,
                 storage=None,
                 config=None,
                 bootstrap_cls=NodeBootstrap):
        config = config or getConfig()

        config_helper = config_helper or NodeConfigHelper(name, config)

        ledger_dir = ledger_dir or config_helper.ledger_dir
        keys_dir = keys_dir or config_helper.keys_dir
        genesis_dir = genesis_dir or config_helper.genesis_dir
        plugins_dir = plugins_dir or config_helper.plugins_dir
        node_info_dir = node_info_dir or config_helper.node_info_dir

        # TODO: 4 ugly lines ahead, don't know how to avoid
        # self.idrCache = None
        # self.attributeStore = None
        self.upgrader = None
        self.restarter = None
        self.poolCfg = None

        super().__init__(name=name,
                         clientAuthNr=clientAuthNr,
                         ha=ha,
                         cliname=cliname,
                         cliha=cliha,
                         config_helper=config_helper,
                         ledger_dir=ledger_dir,
                         keys_dir=keys_dir,
                         genesis_dir=genesis_dir,
                         plugins_dir=plugins_dir,
                         node_info_dir=node_info_dir,
                         pluginPaths=pluginPaths,
                         storage=storage,
                         config=config,
                         bootstrap_cls=bootstrap_cls)

        # TODO: ugly line ahead, don't know how to avoid
        self.clientAuthNr = clientAuthNr or self.defaultAuthNr()

        self.nodeMsgRouter.routes[Request] = self.processNodeRequest
        self.nodeAuthNr = self.defaultNodeAuthNr()
        self.db_manager.set_txn_version_controller(TxnVersionController())
コード例 #7
0
ファイル: helper.py プロジェクト: phillipgibb/blockchain
def populate_log_with_upgrade_events(
        pool_txn_node_names, tdir, tconf, version: Tuple[str, str, str]):
    for nm in pool_txn_node_names:
        config_helper = NodeConfigHelper(nm, tconf, chroot=tdir)
        ledger_dir = config_helper.ledger_dir
        os.makedirs(ledger_dir)
        log = UpgradeLog(os.path.join(ledger_dir, tconf.upgradeLogFile))
        when = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
        log.appendScheduled(when, version, randomString(10))
        log.appendStarted(when, version, randomString(10))
コード例 #8
0
def migrate_all():
    node_name = get_node_name()
    if node_name is None:
        logger.error("Could not get node name")
        return False

    config_helper = NodeConfigHelper(node_name, config)

    ledger_dir = config_helper.ledger_dir

    # 1. Archiving old ledger
    try:
        archive_old_ledger(node_name, ledger_dir)
    except Exception:
        logger.warning("Could not create an archive of old transactions ledger, proceed anyway")

    # 2. migrate txn log
    if migrate_txn_logs(ledger_dir):
        logger.info("All txn logs migrated successfully from old to new transaction format")
    else:
        logger.error("Txn log migration from old to new format failed!")
        return False

    # Rename new seq_no_db into old
    rename_seq_no_db(ledger_dir)

    # 3. migrate hash store
    if migrate_hash_stores(ledger_dir):
        logger.info("All hash stores migrated successfully from old to new transaction format")
    else:
        logger.error("Hash store migration from old to new format failed!")
        return False

    # 4. migrate states
    if migrate_states(ledger_dir):
        logger.info("All states migrated successfully from old to new transaction format")
    else:
        logger.error("State migration from old to new format failed!")
        return False

    # 5. migrate ts store
    if migrate_ts_store(ledger_dir):
        logger.info("Timestamp store migrated successfully from old to new transaction format")
    else:
        logger.error("Timestamp store migration from old to new format failed!")
        return False

    # 6. migrate bls signature xtore
    if migrate_bls_signature_store(ledger_dir):
        logger.info("BLS signature store migrated successfully from old to new transaction format")
    else:
        logger.error("BLS signature store migration from old to new format failed!")
        return False

    return True
コード例 #9
0
def multiPoolNodesCreated(request,
                          tconf,
                          looper,
                          tdir,
                          cliTempLogger,
                          namesOfPools=("pool1", "pool2")):

    multiNodes = []
    for poolName in namesOfPools:
        newPoolTxnNodeNames = [
            poolName + n for n in ("Alpha", "Beta", "Gamma", "Delta")
        ]
        config_helper = ConfigHelper(tconf, chroot=tdir)
        ledger_dir = os.path.join(config_helper.ledger_base_dir, poolName)
        newPoolTxnData = getPoolTxnData(poolName, newPoolTxnNodeNames)
        newTdirWithPoolTxns = custom_tdir_with_pool_txns(
            newPoolTxnData, ledger_dir, tconf.poolTransactionsFile)
        newTdirWithDomainTxns = custom_tdir_with_domain_txns(
            newPoolTxnData, ledger_dir, domainTxnOrderedFields(),
            tconf.domainTransactionsFile)
        testPoolNode = TestMultiNode(poolName, newPoolTxnNodeNames, tdir,
                                     tconf, newPoolTxnData,
                                     newTdirWithPoolTxns,
                                     newTdirWithDomainTxns, None)

        poolCLIBabyGen = CliBuilder(tdir, newTdirWithPoolTxns,
                                    newTdirWithDomainTxns, looper, tconf,
                                    cliTempLogger)
        poolCLIBaby = next(poolCLIBabyGen(poolName))

        # Ugly hack to build several networks
        network_bak = tconf.NETWORK_NAME
        tconf.NETWORK_NAME = poolName
        tdirWithNodeKeepInited(tdir, tconf, NodeConfigHelper, newPoolTxnData,
                               newPoolTxnNodeNames)

        nodes = []
        for nm in newPoolTxnNodeNames:
            config_helper = NodeConfigHelper(nm, tconf, chroot=tdir)
            node = TestNode(nm,
                            config_helper=config_helper,
                            config=tconf,
                            pluginPaths=None)
            looper.add(node)
            nodes.append(node)
        looper.run(checkNodesConnected(nodes))
        ensureElectionsDone(looper=looper, nodes=nodes)

        poolCli = poolCLI(tdir, tconf, poolCLIBaby, newPoolTxnData,
                          newPoolTxnNodeNames, nodes)
        testPoolNode.poolCli = poolCli
        multiNodes.append(testPoolNode)
        tconf.NETWORK_NAME = network_bak

    return multiNodes
コード例 #10
0
ファイル: helper.py プロジェクト: zukobronja/indy-node
def start_stopped_node(stopped_node, looper, tconf, tdir, allPluginsPath):
    nodeHa, nodeCHa = HA(*stopped_node.nodestack.ha), HA(
        *stopped_node.clientstack.ha)
    config_helper = NodeConfigHelper(stopped_node.name, tconf, chroot=tdir)
    restarted_node = TestNode(stopped_node.name,
                              config_helper=config_helper,
                              config=tconf,
                              ha=nodeHa,
                              cliha=nodeCHa,
                              pluginPaths=allPluginsPath)
    looper.add(restarted_node)
    return restarted_node
コード例 #11
0
def get_pool_ledger(node_name):
    config = getConfig()
    config_helper = NodeConfigHelper(node_name, config)

    genesis_txn_initiator = GenesisTxnInitiatorFromFile(config_helper.genesis_dir,
                                                        config.poolTransactionsFile)
    hash_store = initHashStore(config_helper.ledger_dir, "pool", config)
    return Ledger(CompactMerkleTree(hashStore=hash_store),
                  dataDir=config_helper.ledger_dir,
                  fileName=config.poolTransactionsFile,
                  ensureDurability=config.EnsureLedgerDurability,
                  genesis_txn_initiator=genesis_txn_initiator)
コード例 #12
0
def poolCLI(tdir, tconf, poolCLI_baby, poolTxnData, poolTxnNodeNames,
            txnPoolNodeSet):
    seeds = poolTxnData["seeds"]
    for nName in poolTxnNodeNames:
        seed = seeds[nName]
        use_bls = nName in poolTxnData['nodesWithBls']
        config_helper = NodeConfigHelper(nName, tconf, chroot=tdir)
        initNodeKeysForBothStacks(nName,
                                  config_helper.keys_dir,
                                  seed,
                                  override=True,
                                  use_bls=use_bls)
    for node in txnPoolNodeSet:
        poolCLI_baby.nodes[node.name] = node
    return poolCLI_baby
コード例 #13
0
def create_local_pool(node_base_dir, cli_base_dir, config=None, node_size=4):
    conf = config or getConfig()

    stewards = []
    node_conf = []
    nodes = []
    genesis_txns = []
    for i in range(node_size):
        w = Wallet("steward")
        s = Steward(wallet=w)
        s.wallet.addIdentifier()

        stewards.append(s)

        node_config_helper = NodeConfigHelper(conf.name,
                                              conf,
                                              chroot=node_base_dir)
        n_config = adict(name='Node' + str(i + 1),
                         basedirpath=node_config_helper.ledger_dir,
                         ha=('127.0.0.1', 9700 + (i * 2)),
                         cliha=('127.0.0.1', 9700 + (i * 2) + 1))

        n_verkey, n_bls_key, n_bls_key_proof = \
            initialize_node_environment(name=n_config.name,
                                        node_config_helper=node_config_helper,
                                        override_keep=True,
                                        sigseed=randomSeed())

        s.set_node(n_config,
                   verkey=n_verkey,
                   blskey=n_bls_key,
                   blsley_proof=n_bls_key_proof)

        node_conf.append(n_config)

        genesis_txns += s.generate_genesis_txns()

    pool = None  # LocalPool(genesis_txns, pool_dir, steward=stewards[0])

    for c in node_conf:
        n = Node(**c)
        pool.add(n)
        nodes.append(n)

    pool.runFor(5)

    return pool
def test_node_does_not_reschedule_cancelled_upgrade_after_restart(
        upgradeScheduled, looper, nodeSet, validUpgrade,
        testNodeClass, tdir, tconf, allPluginsPath,
        trustee, trusteeWallet):

    # Cancel the scheduled upgrade
    valid_upgrade_cancel = deepcopy(validUpgrade)
    valid_upgrade_cancel[ACTION] = CANCEL
    del valid_upgrade_cancel[SCHEDULE]

    ensureUpgradeSent(looper, trustee, trusteeWallet, valid_upgrade_cancel)

    # Verify that no upgrade is scheduled now
    looper.run(
        eventually(
            checkNoUpgradeScheduled,
            nodeSet,
            retryWait=1,
            timeout=waits.expectedNoUpgradeScheduled()))

    # Restart all the nodes
    names = []
    while nodeSet:
        node = nodeSet.pop()
        names.append(node.name)
        node.cleanupOnStopping = False
        looper.removeProdable(node)
        node.stop()
        del node

    for nm in names:
        config_helper = NodeConfigHelper(nm, tconf, chroot=tdir)
        node = testNodeClass(nm, config_helper=config_helper,
                             config=tconf, pluginPaths=allPluginsPath)
        looper.add(node)
        nodeSet.append(node)

    looper.run(checkNodesConnected(nodeSet))
    ensureElectionsDone(looper=looper, nodes=nodeSet, retryWait=1)

    # Verify that still no upgrade is scheduled
    looper.run(
        eventually(
            checkNoUpgradeScheduled,
            nodeSet,
            retryWait=1,
            timeout=waits.expectedNoUpgradeScheduled()))
コード例 #15
0
def test_valid_txn_with_fees(helpers, mint_tokens, fees_set,
                             nodeSetWithIntegratedTokenPlugin, looper,
                             address_main, addresses, tdir, tconf):
    seq_no = get_seq_no(mint_tokens)
    remaining = 1000
    last_node = nodeSetWithIntegratedTokenPlugin[-1]
    last_node.cleanupOnStopping = False
    last_node.stop()
    looper.removeProdable(last_node)

    nodeSetWithIntegratedTokenPlugin = nodeSetWithIntegratedTokenPlugin[:-1]

    for address in addresses:
        inputs = [{
            "source": utxo_from_addr_and_seq_no(address_main, seq_no)
        }]
        outputs = [
            {ADDRESS: address, AMOUNT: 1},
            {ADDRESS: address_main, AMOUNT: remaining - 2}, # XFER fee is 1
        ]
        request = helpers.request.transfer(inputs, outputs)
        response = helpers.sdk.send_and_check_request_objects([request])
        result = helpers.sdk.get_first_result(response)
        seq_no = get_seq_no(result)
        remaining -= 2

    for _ in range(5):
        pay_fees(helpers, fees_set, address_main)

    config_helper = NodeConfigHelper(last_node.name, tconf, chroot=tdir)
    restarted_node = TestNode(last_node.name,
                              config_helper=config_helper,
                              config=tconf, ha=last_node.nodestack.ha,
                              cliha=last_node.clientstack.ha)

    integrate_token_plugin_in_node(restarted_node)
    integrate_fees_plugin_in_node(restarted_node)

    tl = restarted_node.getLedger(TOKEN_LEDGER_ID)
    for node in nodeSetWithIntegratedTokenPlugin:
        token_ledger = node.getLedger(TOKEN_LEDGER_ID)
        assert token_ledger.size > tl.size

    looper.add(restarted_node)
    nodeSetWithIntegratedTokenPlugin.append(restarted_node)

    ensure_all_nodes_have_same_data(looper, nodeSetWithIntegratedTokenPlugin)
コード例 #16
0
        default='/etc/indy/indy.env')
    args = parser.parse_args()
    path_to_txns = os.path.realpath(args.infpath)
    path_to_env = os.path.realpath(args.env_file)

    if not os.path.exists(path_to_txns):
        print("Path to txns file does not exist")
        sys.exit(1)

    if not os.path.exists(path_to_env):
        print("Path to env file does not exist")
        sys.exit(1)

    nname, nport, ncliport = get_ha_cliha_node_name(path_to_env)
    ha = HA("0.0.0.0", nport)
    cliha = HA("0.0.0.0", ncliport)
    config_helper = NodeConfigHelper(nname, config)

    node = Node(nname,
                ha=ha,
                cliha=cliha,
                config_helper=config_helper,
                config=config)
    i = 0
    with open(path_to_txns) as txns:
        for txn in txns:
            node.domainLedger.add(json.loads(txn))
            i += 1
            if not i % 1000:
                print("added {} txns".format(i))
コード例 #17
0
ファイル: node.py プロジェクト: mgbailey/indy-node
    def __init__(self,
                 name,
                 clientAuthNr=None,
                 ha=None,
                 cliname=None,
                 cliha=None,
                 config_helper=None,
                 ledger_dir: str = None,
                 keys_dir: str = None,
                 genesis_dir: str = None,
                 plugins_dir: str = None,
                 node_info_dir: str = None,
                 primaryDecider=None,
                 pluginPaths: Iterable[str] = None,
                 storage=None,
                 config=None):
        config = config or getConfig()

        config_helper = config_helper or NodeConfigHelper(name, config)

        ledger_dir = ledger_dir or config_helper.ledger_dir
        keys_dir = keys_dir or config_helper.keys_dir
        genesis_dir = genesis_dir or config_helper.genesis_dir
        plugins_dir = plugins_dir or config_helper.plugins_dir
        node_info_dir = node_info_dir or config_helper.node_info_dir

        # TODO: 4 ugly lines ahead, don't know how to avoid
        self.idrCache = None
        self.attributeStore = None
        self.upgrader = None
        self.restarter = None
        self.poolCfg = None

        super().__init__(name=name,
                         clientAuthNr=clientAuthNr,
                         ha=ha,
                         cliname=cliname,
                         cliha=cliha,
                         config_helper=config_helper,
                         ledger_dir=ledger_dir,
                         keys_dir=keys_dir,
                         genesis_dir=genesis_dir,
                         plugins_dir=plugins_dir,
                         node_info_dir=node_info_dir,
                         primaryDecider=primaryDecider,
                         pluginPaths=pluginPaths,
                         storage=storage,
                         config=config)

        self.upgrader = self.init_upgrader()
        self.restarter = self.init_restarter()
        self.poolCfg = self.init_pool_config()

        # TODO: ugly line ahead, don't know how to avoid
        self.clientAuthNr = clientAuthNr or self.defaultAuthNr()

        self.nodeMsgRouter.routes[Request] = self.processNodeRequest
        self.nodeAuthNr = self.defaultNodeAuthNr()

        # Will be refactored soon
        self.get_req_handler(CONFIG_LEDGER_ID).upgrader = self.upgrader
        self.get_req_handler(CONFIG_LEDGER_ID).poolCfg = self.poolCfg
        self.actionReqHandler.poolCfg = self.poolCfg
        self.actionReqHandler.restarter = self.restarter
コード例 #18
0
ファイル: node.py プロジェクト: wittrock/indy-node
    def __init__(self,
                 name,
                 nodeRegistry=None,
                 clientAuthNr=None,
                 ha=None,
                 cliname=None,
                 cliha=None,
                 config_helper=None,
                 ledger_dir: str = None,
                 keys_dir: str = None,
                 genesis_dir: str = None,
                 plugins_dir: str = None,
                 node_info_dir: str = None,
                 primaryDecider=None,
                 pluginPaths: Iterable[str] = None,
                 storage=None,
                 config=None):
        config = config or getConfig()

        config_helper = config_helper or NodeConfigHelper(name, config)

        ledger_dir = ledger_dir or config_helper.ledger_dir
        keys_dir = keys_dir or config_helper.keys_dir
        genesis_dir = genesis_dir or config_helper.genesis_dir
        plugins_dir = plugins_dir or config_helper.plugins_dir
        node_info_dir = node_info_dir or config_helper.node_info_dir

        # TODO: 2 ugly lines ahead, don't know how to avoid
        self.idrCache = None
        self.attributeStore = None

        super().__init__(name=name,
                         nodeRegistry=nodeRegistry,
                         clientAuthNr=clientAuthNr,
                         ha=ha,
                         cliname=cliname,
                         cliha=cliha,
                         config_helper=config_helper,
                         ledger_dir=ledger_dir,
                         keys_dir=keys_dir,
                         genesis_dir=genesis_dir,
                         plugins_dir=plugins_dir,
                         node_info_dir=node_info_dir,
                         primaryDecider=primaryDecider,
                         pluginPaths=pluginPaths,
                         storage=storage,
                         config=config)

        # TODO: ugly line ahead, don't know how to avoid
        self.clientAuthNr = clientAuthNr or self.defaultAuthNr()

        self.configLedger = self.getConfigLedger()
        self.ledgerManager.addLedger(
            CONFIG_LEDGER_ID,
            self.configLedger,
            postCatchupCompleteClbk=self.postConfigLedgerCaughtUp,
            postTxnAddedToLedgerClbk=self.postTxnFromCatchupAddedToLedger)
        self.on_new_ledger_added(CONFIG_LEDGER_ID)
        self.states[CONFIG_LEDGER_ID] = self.loadConfigState()
        self.upgrader = self.getUpgrader()
        self.poolCfg = self.getPoolConfig()
        self.configReqHandler = self.getConfigReqHandler()
        self.initConfigState()
        self.register_req_handler(CONFIG_LEDGER_ID, self.configReqHandler)
        self.requestExecuter[CONFIG_LEDGER_ID] = self.executeConfigTxns

        self.nodeMsgRouter.routes[Request] = self.processNodeRequest
        self.nodeAuthNr = self.defaultNodeAuthNr()
def test_state_recovery_with_xfer(looper, tconf, tdir,
                                  sdk_pool_handle,
                                  sdk_wallet_trustee,
                                  allPluginsPath,
                                  do_post_node_creation,
                                  nodeSetWithIntegratedTokenPlugin,
                                  helpers,
                                  valid_upgrade,
                                  mint_tokens,
                                  addresses,
                                  fees_set, fees,
                                  monkeypatch):
    version1 = "1.1.50"
    version2 = "1.1.88"
    current_amount = get_amount_from_token_txn(mint_tokens)
    seq_no = 1
    node_set = nodeSetWithIntegratedTokenPlugin

    current_amount, seq_no, _ = send_and_check_nym_with_fees(helpers, fees_set, seq_no, looper, addresses,
                                                             current_amount)
    # send POOL_UPGRADE to write in a ledger
    last_ordered = node_set[0].master_last_ordered_3PC[1]
    sdk_ensure_upgrade_sent(looper, sdk_pool_handle, sdk_wallet_trustee,
                            valid_upgrade)
    looper.run(eventually(lambda: assertEquality(node_set[0].master_last_ordered_3PC[1],
                                                 last_ordered + 1)))

    send_node_upgrades(node_set, version1, looper)
    for n in node_set:
        handler = n.write_manager.request_handlers.get(XFER_PUBLIC)[0]
        handler_for_1_0_0 = n.write_manager._request_handlers_with_version.get((XFER_PUBLIC, "1.0.0"))[0]
        monkeypatch.setattr(handler, 'update_state',
                            handler_for_1_0_0.update_state)

    current_amount, seq_no, _ = send_and_check_transfer(helpers, [addresses[0], addresses[1]], fees_set, looper,
                                                        current_amount, seq_no,
                                                        transfer_summ=current_amount)
    send_node_upgrades(node_set, version2, looper)
    monkeypatch.undo()
    current_amount, seq_no, _ = send_and_check_transfer(helpers, [addresses[1], addresses[0]], fees_set, looper,
                                                        current_amount, seq_no,
                                                        transfer_summ=current_amount)

    node_to_stop = node_set[-1]
    state_db_pathes = [state._kv.db_path
                       for state in node_to_stop.states.values()]
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, node_set[:-1])

    for path in state_db_pathes:
        shutil.rmtree(path)
    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(
        node_to_stop.name,
        config_helper=config_helper,
        config=tconf,
        pluginPaths=allPluginsPath,
        ha=node_to_stop.nodestack.ha,
        cliha=node_to_stop.clientstack.ha)
    do_post_node_creation(restarted_node)

    looper.add(restarted_node)
    node_set[-1] = restarted_node

    looper.run(checkNodesConnected(node_set))
    waitNodeDataEquality(looper, restarted_node, *node_set[:-1], exclude_from_check=['check_last_ordered_3pc_backup'])
    current_amount, seq_no, _ = send_and_check_transfer(helpers, [addresses[0], addresses[1]], {}, looper,
                                                        current_amount, seq_no,
                                                        transfer_summ=1)
    waitNodeDataEquality(looper, restarted_node, *node_set[:-1], exclude_from_check=['check_last_ordered_3pc_backup'])
コード例 #20
0
def test_state_regenerated_from_ledger(looper, tdirWithClientPoolTxns,
                                       tdirWithDomainTxnsUpdated, nodeSet,
                                       tconf, tdir, trustee, trusteeWallet,
                                       allPluginsPath):
    """
    Node loses its state database but recreates it from ledger after start.
    Checking ATTRIB txns too since they store some data off ledger too
    """
    trust_anchors = []
    for i in range(5):
        trust_anchors.append(
            getClientAddedWithRole(nodeSet,
                                   tdirWithClientPoolTxns,
                                   looper,
                                   trustee,
                                   trusteeWallet,
                                   'TA' + str(i),
                                   role=TRUST_ANCHOR))
        addRawAttribute(looper,
                        *trust_anchors[-1],
                        randomString(6),
                        randomString(10),
                        dest=trust_anchors[-1][1].defaultId)

    for tc, tw in trust_anchors:
        for i in range(3):
            getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, tc,
                                   tw, 'NP1' + str(i))

    ensure_all_nodes_have_same_data(looper, nodeSet)

    node_to_stop = nodeSet[-1]
    node_state = node_to_stop.states[DOMAIN_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv.db_path
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop.name, nodeSet[:-1])

    shutil.rmtree(state_db_path)

    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name,
                              config_helper=config_helper,
                              config=tconf,
                              pluginPaths=allPluginsPath,
                              ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    nodeSet[-1] = restarted_node

    looper.run(checkNodesConnected(nodeSet))
    # Need some time as `last_ordered_3PC` is compared too and that is
    # communicated through catchup
    waitNodeDataEquality(looper, restarted_node, *nodeSet[:-1])

    # Pool is still functional
    for tc, tw in trust_anchors:
        getClientAddedWithRole(nodeSet, tdirWithClientPoolTxns, looper, tc, tw,
                               'NP--{}'.format(tc.name))

    ensure_all_nodes_have_same_data(looper, nodeSet)
コード例 #21
0
def test_new_node_catchup_update_projection(looper,
                                            nodeSet, tconf, tdir,
                                            sdk_pool_handle,
                                            sdk_wallet_trustee,
                                            allPluginsPath,
                                            some_transactions_done):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward_wallet, new_node = sdk_node_theta_added(looper,
                                                        nodeSet,
                                                        tdir,
                                                        tconf,
                                                        sdk_pool_handle,
                                                        sdk_wallet_trustee,
                                                        allPluginsPath,
                                                        node_config_helper_class=NodeConfigHelper,
                                                        testNodeClass=TestNode)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2 * ta_count + np_count  # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - \
                   old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - \
                   old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - \
                   old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    new_node.stop()
    looper.removeProdable(new_node)
    ensure_node_disconnected(looper, new_node, other_nodes)

    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(
            sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
                            role='TRUST_ANCHOR', alias='TA' + str(i)))
        attributes.append((randomString(6), randomString(10)))
        sdk_add_raw_attribute(looper, sdk_pool_handle, trust_anchors[-1],
                              *attributes[-1])
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(
            sdk_add_new_nym(looper, sdk_pool_handle, sdk_wallet_trustee,
                            alias='NP' + str(i)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    config_helper = NodeConfigHelper(new_node.name, tconf, chroot=tdir)
    new_node = TestNode(
        new_node.name,
        config_helper=config_helper,
        config=tconf,
        pluginPaths=allPluginsPath,
        ha=new_node.nodestack.ha,
        cliha=new_node.clientstack.ha)
    looper.add(new_node)
    nodeSet[-1] = new_node
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for wh in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(sdk_add_new_nym(looper, sdk_pool_handle, wh,
                                                  alias='NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count * len(trust_anchors)
    check_sizes(nodeSet)
コード例 #22
0
def test_new_node_catchup_update_projection(looper, tdirWithClientPoolTxns,
                                            tdirWithDomainTxnsUpdated, nodeSet,
                                            tconf, tdir, trustee,
                                            trusteeWallet, allPluginsPath,
                                            some_transactions_done):
    """
    A node which receives txns from catchup updates both ledger and projection
    4 nodes start up and some txns happen, after txns are done, new node joins
    and starts catching up, the node should not process requests while catchup
    is in progress. Make sure the new requests are coming from the new NYMs
    added while the node was offline or catching up.
    """
    # Create a new node and stop it.

    new_steward, new_steward_wallet, new_node = nodeThetaAdded(
        looper, nodeSet, tdirWithClientPoolTxns, tconf, trustee, trusteeWallet,
        allPluginsPath, TestNode, TestClient, NodeConfigHelper, tdir)

    waitNodeDataEquality(looper, new_node, *nodeSet[:-1])
    ta_count = 2
    np_count = 2
    new_txn_count = 2 * ta_count + np_count  # Since ATTRIB txn is done for TA
    old_ledger_sizes = {}
    new_ledger_sizes = {}
    old_projection_sizes = {}
    new_projection_sizes = {}
    old_seq_no_map_sizes = {}
    new_seq_no_map_sizes = {}

    def get_ledger_size(node):
        return len(node.domainLedger)

    def get_projection_size(node):
        domain_state = node.getState(DOMAIN_LEDGER_ID)
        return len(domain_state.as_dict)

    def get_seq_no_map_size(node):
        return node.seqNoDB.size

    def fill_counters(ls, ps, ss, nodes):
        for n in nodes:
            ls[n.name] = get_ledger_size(n)
            ps[n.name] = get_projection_size(n)
            ss[n.name] = get_seq_no_map_size(n)

    def check_sizes(nodes):
        for node in nodes:
            assert new_ledger_sizes[node.name] - \
                old_ledger_sizes[node.name] == new_txn_count
            assert new_projection_sizes[node.name] - \
                old_projection_sizes[node.name] == new_txn_count
            assert new_seq_no_map_sizes[node.name] - \
                old_seq_no_map_sizes[node.name] == new_txn_count

    # Stop a node and note down the sizes of ledger and projection (state)
    other_nodes = nodeSet[:-1]
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  other_nodes)
    new_node.cleanupOnStopping = False
    new_node.stop()
    looper.removeProdable(new_node)
    ensure_node_disconnected(looper, new_node.name, other_nodes)

    trust_anchors = []
    attributes = []
    for i in range(ta_count):
        trust_anchors.append(
            getClientAddedWithRole(other_nodes,
                                   tdirWithClientPoolTxns,
                                   looper,
                                   trustee,
                                   trusteeWallet,
                                   'TA' + str(i),
                                   role=TRUST_ANCHOR,
                                   client_connects_to=len(other_nodes)))
        attributes.append((randomString(6), randomString(10)))
        addRawAttribute(looper,
                        *trust_anchors[-1],
                        *attributes[-1],
                        dest=trust_anchors[-1][1].defaultId)
    non_privileged = []
    for i in range(np_count):
        non_privileged.append(
            getClientAddedWithRole(other_nodes,
                                   tdirWithClientPoolTxns,
                                   looper,
                                   trustee,
                                   trusteeWallet,
                                   'NP' + str(i),
                                   client_connects_to=len(other_nodes)))

    checkNodeDataForEquality(nodeSet[0], *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  other_nodes)
    # The size difference should be same as number of new NYM txns
    check_sizes(other_nodes)

    config_helper = NodeConfigHelper(new_node.name, tconf, chroot=tdir)
    new_node = TestNode(new_node.name,
                        config_helper=config_helper,
                        config=tconf,
                        pluginPaths=allPluginsPath,
                        ha=new_node.nodestack.ha,
                        cliha=new_node.clientstack.ha)
    looper.add(new_node)
    nodeSet[-1] = new_node
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  [new_node])
    looper.run(checkNodesConnected(nodeSet))
    waitNodeDataEquality(looper, new_node, *other_nodes)
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  [new_node])
    check_sizes([new_node])

    for i, (tc, tw) in enumerate(trust_anchors):
        # To prevent sending of 'get_attr' to just one node
        tc._read_only_requests = set()

        reply = getAttribute(looper, tc, tw, tw.defaultId, *attributes[i])
        all_replies = tc.getRepliesFromAllNodes(reply[f.IDENTIFIER.nm],
                                                reply[f.REQ_ID.nm])
        assertLength(all_replies, len(nodeSet))
        assert new_node.clientstack.name in all_replies

    # Set the old counters to be current ledger and projection size
    fill_counters(old_ledger_sizes, old_projection_sizes, old_seq_no_map_sizes,
                  nodeSet)

    more_nyms_count = 2
    for tc, tw in trust_anchors:
        for i in range(more_nyms_count):
            non_privileged.append(
                getClientAddedWithRole(other_nodes, tdirWithClientPoolTxns,
                                       looper, tc, tw, 'NP1' + str(i)))

    # The new node should process transactions done by Nyms added to its
    # ledger while catchup
    fill_counters(new_ledger_sizes, new_projection_sizes, new_seq_no_map_sizes,
                  nodeSet)
    new_txn_count = more_nyms_count * len(trust_anchors)
    check_sizes(nodeSet)
コード例 #23
0
def migrate_all():
    node_name = get_node_name()
    if node_name is None:
        logger.error("Could not get node name")
        return False

    config = getConfig()
    config_helper = NodeConfigHelper(node_name, config)

    leveldb_ledger_dir = config_helper.ledger_dir
    rocksdb_ledger_dir = os.path.join(config_helper.ledger_data_dir,
                                      node_name + "_rocksdb")
    if os.path.exists(rocksdb_ledger_dir):
        logger.error(
            "Temporary directory for RocksDB-based ledger exists, please remove: {}"
            .format(rocksdb_ledger_dir))
        return False

    try:
        os.mkdir(rocksdb_ledger_dir)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error(
            "Could not create temporary directory for RocksDB-based ledger: {}"
            .format(rocksdb_ledger_dir))
        return False

    logger.info("Starting migration of storages from LevelDB to RocksDB...")

    if migrate_storages(leveldb_ledger_dir, rocksdb_ledger_dir):
        logger.info(
            "All storages migrated successfully from LevelDB to RocksDB")
    else:
        logger.error("Storages migration from LevelDB to RocksDB failed!")
        shutil.rmtree(rocksdb_ledger_dir)
        return False

    # Archiving LevelDB-based ledger
    try:
        archive_leveldb_ledger(node_name, leveldb_ledger_dir)
    except Exception:
        logger.warning(
            "Could not create an archive of LevelDB-based ledger, proceed anyway"
        )

    # TODO: it whould be nice to open new RocksDB-based ledger
    # and compare root hashes with LevelDB-based ledger here

    # Remove LevelDB-based ledger
    try:
        shutil.rmtree(leveldb_ledger_dir)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not remove LevelDB-based ledger: {}".format(
            leveldb_ledger_dir))
        shutil.rmtree(rocksdb_ledger_dir)
        return False

    ledger_dir = leveldb_ledger_dir

    try:
        shutil.move(rocksdb_ledger_dir, ledger_dir)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error(
            "Could not rename temporary RocksDB-based ledger from '{}' to '{}'"
            .format(rocksdb_ledger_dir, ledger_dir))
        shutil.rmtree(rocksdb_ledger_dir)
        return False

    set_own_perm("indy", ledger_dir)

    return True
コード例 #24
0
def test_auth_txn_with_deprecated_key(tconf, tdir, allPluginsPath,
                                      txnPoolNodeSet, looper,
                                      sdk_wallet_trustee, sdk_pool_handle):
    """
    Add to the auth_map a fake rule
    Send AUTH_RULE txn to change this fake rule (and set the fake key to the config state)
    Send GET_AUTH_RULE txn and check that the fake rule was changed
    Remove the fake auth rule from the map
    Check that we can't get the fake auth rule
    Restart the last node with its state regeneration
    Check that nodes data is equal after changing the existing auth rule (restarted node regenerate config state)
    """

    fake_txn_type = "100002"
    fake_key = AuthActionAdd(txn_type=fake_txn_type, field="*",
                             value="*").get_action_id()
    fake_constraint = one_trustee_constraint
    new_auth_constraint = AuthConstraint(role=STEWARD,
                                         sig_count=1,
                                         need_to_be_owner=False).as_dict

    # Add to the auth_map a fake rule
    with extend_auth_map(txnPoolNodeSet, fake_key, fake_constraint):
        # Send AUTH_RULE txn to change this fake rule (and set the fake key to the config state)
        sdk_send_and_check_auth_rule_request(looper,
                                             sdk_pool_handle,
                                             sdk_wallet_trustee,
                                             auth_action=ADD_PREFIX,
                                             auth_type=fake_txn_type,
                                             field='*',
                                             new_value='*',
                                             constraint=new_auth_constraint)
        # Send GET_AUTH_RULE txn and check that the fake rule was changed
        result = sdk_send_and_check_get_auth_rule_request(
            looper,
            sdk_pool_handle,
            sdk_wallet_trustee,
            auth_type=fake_txn_type,
            auth_action=ADD_PREFIX,
            field="*",
            new_value="*")[0][1]["result"][DATA][0]
        assert result[AUTH_TYPE] == fake_txn_type
        assert result[CONSTRAINT] == new_auth_constraint

    # Remove the fake auth rule from the map
    # Check that we can't get the fake auth rule
    with pytest.raises(RequestNackedException,
                       match="not found in authorization map"):
        sdk_send_and_check_auth_rule_request(
            looper,
            sdk_pool_handle,
            sdk_wallet_trustee,
            auth_action=ADD_PREFIX,
            auth_type=fake_txn_type,
            field='*',
            new_value='*',
            constraint=AuthConstraint(role=STEWARD,
                                      sig_count=2,
                                      need_to_be_owner=False).as_dict)

    resp = sdk_send_and_check_get_auth_rule_request(looper, sdk_pool_handle,
                                                    sdk_wallet_trustee)

    assert all(rule[AUTH_TYPE] != fake_txn_type
               for rule in resp[0][1]["result"][DATA])

    with pytest.raises(RequestNackedException,
                       match="not found in authorization map"):
        sdk_send_and_check_get_auth_rule_request(looper,
                                                 sdk_pool_handle,
                                                 sdk_wallet_trustee,
                                                 auth_type=fake_txn_type,
                                                 auth_action=ADD_PREFIX,
                                                 field="*",
                                                 new_value="*")
    # Restart the last node with its state regeneration
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet)

    node_to_stop = txnPoolNodeSet[-1]
    node_state = node_to_stop.states[CONFIG_LEDGER_ID]
    assert not node_state.isEmpty
    state_db_path = node_state._kv.db_path
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, txnPoolNodeSet[:-1])

    shutil.rmtree(state_db_path)

    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name,
                              config_helper=config_helper,
                              config=tconf,
                              pluginPaths=allPluginsPath,
                              ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    looper.add(restarted_node)
    txnPoolNodeSet[-1] = restarted_node

    # Check that nodes data is equal (restarted node regenerate config state)
    looper.run(checkNodesConnected(txnPoolNodeSet))
    ensureElectionsDone(looper, txnPoolNodeSet, customTimeout=30)
    sdk_send_and_check_auth_rule_request(looper,
                                         sdk_pool_handle,
                                         sdk_wallet_trustee,
                                         auth_action=ADD_PREFIX,
                                         auth_type=NYM,
                                         field=ROLE,
                                         new_value=STEWARD,
                                         constraint=AuthConstraint(
                                             role=STEWARD,
                                             sig_count=2,
                                             need_to_be_owner=False).as_dict)
    ensure_all_nodes_have_same_data(looper, txnPoolNodeSet, custom_timeout=20)
コード例 #25
0
    parser = argparse.ArgumentParser(
        description="Generate keys for a node's stack"
                    "by taking the node's name and seeds values")
    
    parser.add_argument('--name', required=True, help='node name')
    parser.add_argument('--seed', required=False, type=str,
                        help='seed for keypair')
    parser.add_argument('--force', help='overrides keys', action='store_true')
    args = parser.parse_args()

    print("Node-stack name is", args.name)
    print("Client-stack name is", args.name + CLIENT_STACK_SUFFIX)


    config = getConfig()
    config_helper = NodeConfigHelper(args.name, config)
    os.makedirs(config_helper.keys_dir, exist_ok=True)

    try:
       _, verkey, blskey, key_proof = initNodeKeysForBothStacks(args.name, config_helper.keys_dir, 
                                    args.seed, override=args.force)

       print()
       print("Get your key from here: ")
       print()
       print("Verkey: ")
       print(verkey)
       print("BLS: ")
       print(blskey)
       print("key_proof")
       print(key_proof)
コード例 #26
0
def test_state_recover_from_ledger(looper, tconf, tdir, sdk_pool_handle,
                                   sdk_wallet_trustee, allPluginsPath,
                                   fees_set, mint_tokens, addresses, fees,
                                   do_post_node_creation,
                                   nodeSetWithIntegratedTokenPlugin, helpers):
    node_set = nodeSetWithIntegratedTokenPlugin
    current_amount = get_amount_from_token_txn(mint_tokens)
    seq_no = 1

    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, addresses, current_amount)
    current_amount, seq_no, _ = send_and_check_transfer(
        helpers, [addresses[0], addresses[1]],
        fees,
        looper,
        current_amount,
        seq_no,
        transfer_summ=current_amount)

    current_amount, seq_no, _ = send_and_check_transfer(
        helpers, [addresses[1], addresses[2]],
        fees,
        looper,
        current_amount,
        seq_no,
        transfer_summ=current_amount)

    ensure_all_nodes_have_same_data(looper, node_set)

    node_to_stop = node_set[-1]
    state_db_pathes = [
        state._kv.db_path for state in node_to_stop.states.values()
    ]
    node_to_stop.cleanupOnStopping = False
    node_to_stop.stop()
    looper.removeProdable(node_to_stop)
    ensure_node_disconnected(looper, node_to_stop, node_set[:-1])

    for path in state_db_pathes:
        shutil.rmtree(path)
    config_helper = NodeConfigHelper(node_to_stop.name, tconf, chroot=tdir)
    restarted_node = TestNode(node_to_stop.name,
                              config_helper=config_helper,
                              config=tconf,
                              pluginPaths=allPluginsPath,
                              ha=node_to_stop.nodestack.ha,
                              cliha=node_to_stop.clientstack.ha)
    do_post_node_creation(restarted_node)

    looper.add(restarted_node)
    node_set = node_set[:-1]

    looper.run(checkNodesConnected(node_set))
    waitNodeDataEquality(looper, restarted_node, *node_set[:-1])

    ensure_all_nodes_have_same_data(looper, node_set)

    current_amount, seq_no, _ = send_and_check_transfer(
        helpers, [addresses[2], addresses[0]],
        fees,
        looper,
        current_amount,
        seq_no,
        transfer_summ=current_amount)

    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, addresses, current_amount)
    current_amount, seq_no, _ = send_and_check_nym_with_fees(
        helpers, fees_set, seq_no, looper, addresses, current_amount)

    ensure_all_nodes_have_same_data(looper, node_set)