Esempio n. 1
0
def tdirWithPoolTxns(poolTxnData, tdir, tconf):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    fileName=tconf.poolTransactionsFile)
    for item in poolTxnData["txns"]:
        ledger.add(item)
    return tdir
Esempio n. 2
0
def updateGenesisPoolTxnFile(genesisTxnDir, genesisTxnFile, txn):
    # The lock is an advisory lock, it might not work on linux filesystems
    # not mounted with option `-o mand`, another approach can be to use a .lock
    # file to indicate presence or absence of .lock
    try:
        # Exclusively lock file in a non blocking manner. Locking is neccessary
        # since there might be multiple clients running on a machine so genesis
        #  files should be updated safely.
        # TODO: There is no automated test in the codebase that confirms it.
        # It has only been manaully tested in the python terminal. Add a test
        # for it using multiple processes writing concurrently
        with portalocker.Lock(os.path.join(genesisTxnDir, genesisTxnFile),
                              flags=portalocker.LOCK_EX | portalocker.LOCK_NB):
            seqNo = txn[F.seqNo.name]
            ledger = Ledger(CompactMerkleTree(hashStore=FileHashStore(
                dataDir=genesisTxnDir)), dataDir=genesisTxnDir,
                fileName=genesisTxnFile)
            ledgerSize = len(ledger)
            if seqNo - ledgerSize == 1:
                ledger.add({k:v for k,v in txn.items() if k != F.seqNo.name})
                logger.debug('Adding transaction with sequence number {} in'
                             ' genesis pool transaction file'.format(seqNo))
            else:
                logger.debug('Already {} genesis pool transactions present so '
                             'transaction with sequence number {} '
                             'not applicable'.format(ledgerSize, seqNo))
    except (portalocker.exceptions.LockException,
            portalocker.exceptions.LockException) as ex:
        return
Esempio n. 3
0
def tdirWithPoolTxns(poolTxnData, tdir, tconf):
    ledger = Ledger(CompactMerkleTree(),
           dataDir=tdir,
           fileName=tconf.poolTransactionsFile)
    for item in poolTxnData["txns"]:
        ledger.add(item)
    return tdir
Esempio n. 4
0
def testRecoverLedgerNewFieldsToTxnsAdded(tempdir):
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir, serializer=ledgerSerializer)
    for d in range(10):
        ledger.add({
            "identifier": "i{}".format(d),
            "reqId": d,
            "op": "operation"
        })
    updatedTree = ledger.tree
    ledger.stop()

    newOrderedFields = OrderedDict([("identifier", (str, str)),
                                    ("reqId", (str, int)), ("op", (str, str)),
                                    ("newField", (str, str))])
    newLedgerSerializer = CompactSerializer(newOrderedFields)

    tree = CompactMerkleTree(hashStore=fhs)
    restartedLedger = Ledger(tree=tree,
                             dataDir=tempdir,
                             serializer=newLedgerSerializer)
    assert restartedLedger.size == ledger.size
    assert restartedLedger.root_hash == ledger.root_hash
    assert restartedLedger.tree.hashes == updatedTree.hashes
    assert restartedLedger.tree.root_hash == updatedTree.root_hash
Esempio n. 5
0
def createGenesisTxnFile(genesisTxns,
                         targetDir,
                         fileName,
                         fieldOrdering,
                         reset=True):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=targetDir,
                    serializer=CompactSerializer(fields=fieldOrdering),
                    fileName=fileName)

    if reset:
        ledger.reset()

    reqIds = {}
    for txn in genesisTxns:
        identifier = txn.get(f.IDENTIFIER.nm, "")
        if identifier not in reqIds:
            reqIds[identifier] = 0
        reqIds[identifier] += 1
        txn.update({
            f.REQ_ID.nm: reqIds[identifier],
            f.IDENTIFIER.nm: identifier
        })
        ledger.add(txn)
    ledger.stop()
Esempio n. 6
0
def updateGenesisPoolTxnFile(genesisTxnDir, genesisTxnFile, txn):
    # The lock is an advisory lock, it might not work on linux filesystems
    # not mounted with option `-o mand`, another approach can be to use a .lock
    # file to indicate presence or absence of .lock
    try:
        # Exclusively lock file in a non blocking manner. Locking is neccessary
        # since there might be multiple clients running on a machine so genesis
        #  files should be updated safely.
        # TODO: There is no automated test in the codebase that confirms it.
        # It has only been manaully tested in the python terminal. Add a test
        # for it using multiple processes writing concurrently
        with portalocker.Lock(os.path.join(genesisTxnDir, genesisTxnFile),
                              truncate=None,
                              flags=portalocker.LOCK_EX | portalocker.LOCK_NB):
            seqNo = txn[F.seqNo.name]
            ledger = Ledger(CompactMerkleTree(hashStore=FileHashStore(
                dataDir=genesisTxnDir)),
                            dataDir=genesisTxnDir,
                            fileName=genesisTxnFile)
            ledgerSize = len(ledger)
            if seqNo - ledgerSize == 1:
                ledger.add({k: v for k, v in txn.items() if k != F.seqNo.name})
                logger.debug('Adding transaction with sequence number {} in'
                             ' genesis pool transaction file'.format(seqNo))
            else:
                logger.debug('Already {} genesis pool transactions present so '
                             'transaction with sequence number {} '
                             'not applicable'.format(ledgerSize, seqNo))
    except (portalocker.LockException, portalocker.LockException) as ex:
        return
Esempio n. 7
0
def tdirWithPoolTxns(poolTxnData, tdir, tconf):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    fileName=tconf.poolTransactionsFile)
    for item in poolTxnData["txns"]:
        if item.get(TXN_TYPE) in (NEW_NODE, CHANGE_HA, CHANGE_KEYS):
            ledger.add(item)
    return tdir
Esempio n. 8
0
def updatedDomainTxnFile(tdir, tdirWithDomainTxns, genesisTxns,
                         domainTxnOrderedFields, tconf):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    serializer=CompactSerializer(fields=domainTxnOrderedFields),
                    fileName=tconf.domainTransactionsFile)
    for txn in genesisTxns:
        ledger.add(txn)
Esempio n. 9
0
def addTxnToFile(dir, file, txns, fields=getTxnOrderedFields()):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=dir,
                    serializer=CompactSerializer(fields=fields),
                    fileName=file)
    for txn in txns:
        ledger.add(txn)
    ledger.stop()
Esempio n. 10
0
def tdirWithPoolTxns(poolTxnData, tdir, tconf):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    fileName=tconf.poolTransactionsFile)
    for item in poolTxnData["txns"]:
        if item.get(TXN_TYPE) in (NEW_NODE, CHANGE_HA, CHANGE_KEYS):
            ledger.add(item)
    return tdir
Esempio n. 11
0
def addTxnToFile(dir, file, txns, fields=getTxnOrderedFields()):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=dir,
                    serializer=CompactSerializer(fields=fields),
                    fileName=file)
    for txn in txns:
        ledger.add(txn)
    ledger.stop()
Esempio n. 12
0
def tdirWithPoolTxns(poolTxnData, tdir, tconf):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    fileName=tconf.poolTransactionsFile)
    for item in poolTxnData["txns"]:
        if item.get(TXN_TYPE) == NODE:
            ledger.add(item)
    ledger.stop()
    return tdir
Esempio n. 13
0
def tdirWithDomainTxns(poolTxnData, tdir, tconf, domainTxnOrderedFields):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    serializer=CompactSerializer(fields=domainTxnOrderedFields),
                    fileName=tconf.domainTransactionsFile)
    for item in poolTxnData["txns"]:
        if item.get(TXN_TYPE) == NYM:
            ledger.add(item)
    return tdir
Esempio n. 14
0
def tdirWithDomainTxns(poolTxnData, tdir, tconf, domainTxnOrderedFields):
    ledger = Ledger(
        CompactMerkleTree(),
        dataDir=tdir,
        serializer=CompactSerializer(fields=domainTxnOrderedFields),
        fileName=tconf.domainTransactionsFile)
    for item in poolTxnData["txns"]:
        if item.get(TXN_TYPE) == NYM:
            ledger.add(item)
    return tdir
Esempio n. 15
0
def invalid_identifier_tdir(tdir_for_func):
    ledger = Ledger(CompactMerkleTree(), dataDir=tdir_for_func)
    txn = Steward.node_txn(nym=base58.b58encode(b'whatever').decode("utf-8"),
                           steward_nym="invalid====",
                           node_name='test' + str(2),
                           ip='127.0.0.1',
                           node_port=8080,
                           client_port=8081,
                           client_ip='127.0.0.1')
    ledger.add(txn)
    ledger.stop()
Esempio n. 16
0
def tdirWithPoolTxns(poolTxnData, tdir, tconf):
    import getpass
    logging.debug("current user when creating new pool txn file: {}".format(
        getpass.getuser()))
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    fileName=tconf.poolTransactionsFile)
    for item in poolTxnData["txns"]:
        if item.get(TXN_TYPE) == NODE:
            ledger.add(item)
    ledger.stop()
    return tdir
Esempio n. 17
0
def invalid_verkey_tdir(tdir_for_func):
    ledger = Ledger(CompactMerkleTree(), dataDir=tdir_for_func)
    for d in range(3):
        txn = Steward.node_txn(steward_nym="Th7MpTaRZVRYnPiabds81Y",
                               node_name='test' + str(d),
                               nym=base58.b58encode(b'whatever') if d != 1 else "invalid====",
                               ip='127.0.0.1',
                               node_port=8080,
                               client_port=8081,
                               client_ip='127.0.0.1')
        ledger.add(txn)
    ledger.stop()
    def init_ledger_from_genesis_txn(self, ledger: Ledger):
        # TODO: it's possible that the file to be used for initialization does not exist.
        # This is not considered as an error as of now.
        init_file = os.path.join(self.__data_dir, self.__db_name)
        if not os.path.exists(init_file):
            logger.display("File that should be used for initialization of "
                           "Ledger does not exist: {}".format(init_file))
            return

        with open(init_file, 'r') as f:
            for line in store_utils.cleanLines(f):
                txn = self.__serializer.deserialize(line)
                ledger.add(txn)
Esempio n. 19
0
def invalid_identifier_tdir(tdir_for_func):
    ledger = Ledger(CompactMerkleTree(), dataDir=tdir_for_func)
    txn = {TXN_TYPE: '0',
           TARGET_NYM: base58.b58encode(b'whatever').decode("utf-8"),
           IDENTIFIER: "invalid====",
           DATA: {
               NAME: str(2),
               ALIAS: 'test' + str(2),
               SERVICES: [VALIDATOR],
           }
           }
    ledger.add(txn)
    ledger.stop()
Esempio n. 20
0
def __migrate_ledger(data_directory,
                     old_ledger_file, new_ledger_file,
                     serializer: MappingSerializer = None):
    """
    Test for the directory, open old and new ledger, migrate data, rename directories
    """

    # we should have ChunkedFileStorage implementation of the Ledger
    if not os.path.isdir(os.path.join(data_directory, old_ledger_file)):
        msg = 'Could not find directory {} for migration.'.format(
            old_ledger_file)
        logger.error(msg)
        raise Exception(msg)

    # open the old ledger using the specified serializer
    old_ledger_file_backup = old_ledger_file + "_new"
    old_txn_log_store = ChunkedFileStore(data_directory,
                                         old_ledger_file_backup,
                                         isLineNoKey=True,
                                         storeContentHash=False)
    old_ledger = Ledger(CompactMerkleTree(),
                        dataDir=data_directory,
                        txn_serializer=serializer,
                        hash_serializer=serializer,
                        fileName=old_ledger_file_backup,
                        transactionLogStore=old_txn_log_store)

    # open the new ledger with new serialization
    new_ledger = Ledger(CompactMerkleTree(),
                        dataDir=data_directory,
                        fileName=new_ledger_file)
    logger.info("new size for {}: {}".format(
        old_ledger_file_backup, str(new_ledger.size)))

    # add all txns into the old ledger
    for _, txn in new_ledger.getAllTxn():
        old_ledger.add(txn)
    logger.info("old size for {}: {}".format(
        new_ledger_file, str(old_ledger.size)))

    old_ledger.stop()
    new_ledger.stop()

    # now that everything succeeded, remove the new files and move the old
    # files into place
    shutil.rmtree(
        os.path.join(data_directory, new_ledger_file))
    os.rename(
        os.path.join(data_directory, old_ledger_file_backup),
        os.path.join(data_directory, old_ledger_file))
def _migrate_ledger(data_directory,
                    old_ledger_file, new_ledger_file,
                    serializer: MappingSerializer = None):
    """
    Test for the directory, open old and new ledger, migrate data, rename directories
    """

    # we should have ChunkedFileStorage implementation of the Ledger
    if not os.path.isdir(os.path.join(data_directory, old_ledger_file)):
        msg = 'Could not find directory {} for migration.'.format(
            old_ledger_file)
        logger.error(msg)
        raise Exception(msg)

    # open the old ledger using the specified serializer
    old_ledger_file_backup = old_ledger_file + "_new"
    old_txn_log_store = ChunkedFileStore(data_directory,
                                         old_ledger_file_backup,
                                         isLineNoKey=True,
                                         storeContentHash=False)
    old_ledger = Ledger(CompactMerkleTree(),
                        dataDir=data_directory,
                        txn_serializer=serializer,
                        hash_serializer=serializer,
                        fileName=old_ledger_file_backup,
                        transactionLogStore=old_txn_log_store)

    # open the new ledger with new serialization
    new_ledger = Ledger(CompactMerkleTree(),
                        dataDir=data_directory,
                        fileName=new_ledger_file)
    logger.info("new size for {}: {}".format(
        old_ledger_file_backup, str(new_ledger.size)))

    # add all txns into the old ledger
    for _, txn in new_ledger.getAllTxn():
        old_ledger.add(txn)
    logger.info("old size for {}: {}".format(
        new_ledger_file, str(old_ledger.size)))

    old_ledger.stop()
    new_ledger.stop()

    # now that everything succeeded, remove the new files and move the old
    # files into place
    shutil.rmtree(
        os.path.join(data_directory, new_ledger_file))
    os.rename(
        os.path.join(data_directory, old_ledger_file_backup),
        os.path.join(data_directory, old_ledger_file))
Esempio n. 22
0
def testRecoverLedgerFromHashStore(tempdir):
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir)
    for d in range(10):
        ledger.add(str(d).encode())
    updatedTree = ledger.tree
    ledger.stop()

    tree = CompactMerkleTree(hashStore=fhs)
    restartedLedger = Ledger(tree=tree, dataDir=tempdir)
    assert restartedLedger.size == ledger.size
    assert restartedLedger.root_hash == ledger.root_hash
    assert restartedLedger.tree.hashes == updatedTree.hashes
    assert restartedLedger.tree.root_hash == updatedTree.root_hash
Esempio n. 23
0
def testRecoverLedgerFromHashStore(odbhs, tdir):
    cleanup(odbhs)
    tree = CompactMerkleTree(hashStore=odbhs)
    ledger = Ledger(tree=tree, dataDir=tdir)
    for d in range(10):
        ledger.add(str(d).encode())
    updatedTree = ledger.tree
    ledger.stop()

    tree = CompactMerkleTree(hashStore=odbhs)
    restartedLedger = Ledger(tree=tree, dataDir=tdir)
    assert restartedLedger.size == ledger.size
    assert restartedLedger.root_hash == ledger.root_hash
    assert restartedLedger.tree.hashes == updatedTree.hashes
    assert restartedLedger.tree.root_hash == updatedTree.root_hash
Esempio n. 24
0
def updateGenesisPoolTxnFile(genesisTxnDir,
                             genesisTxnFile,
                             txn,
                             waitTimeIfAlreadyLocked=5):
    # The lock is an advisory lock, it might not work on linux filesystems
    # not mounted with option `-o mand`, another approach can be to use a .lock
    # file to indicate presence or absence of .lock
    genesisFilePath = open(os.path.join(genesisTxnDir, genesisTxnFile), 'a+')
    try:
        # Exclusively lock file in a non blocking manner. Locking is neccessary
        # since there might be multiple clients running on a machine so genesis
        #  files should be updated safely.
        # TODO: There is no automated test in the codebase that confirms it.
        # It has only been manaully tested in the python terminal. Add a test
        # for it using multiple processes writing concurrently
        portalocker.Lock(genesisFilePath,
                         truncate=None,
                         flags=portalocker.LOCK_EX | portalocker.LOCK_NB)
        seqNo = txn[F.seqNo.name]
        ledger = Ledger(
            CompactMerkleTree(hashStore=FileHashStore(dataDir=genesisTxnDir)),
            dataDir=genesisTxnDir,
            fileName=genesisTxnFile)
        ledgerSize = len(ledger)
        if seqNo - ledgerSize == 1:
            ledger.add({k: v for k, v in txn.items() if k != F.seqNo.name})
            logger.debug('Adding transaction with sequence number {} in'
                         ' genesis pool transaction file'.format(seqNo))
        else:
            logger.debug('Already {} genesis pool transactions present so '
                         'transaction with sequence number {} '
                         'not applicable'.format(ledgerSize, seqNo))
        portalocker.unlock(genesisFilePath)
    except portalocker.AlreadyLocked as ex:
        logger.info(
            "file is already locked: {}, will retry in few seconds".format(
                genesisFilePath))
        if waitTimeIfAlreadyLocked <= 15:
            time.sleep(waitTimeIfAlreadyLocked)
            updateGenesisPoolTxnFile(genesisTxnDir, genesisTxnFile, txn,
                                     waitTimeIfAlreadyLocked + 5)
        else:
            logger.error(
                "already locked error even after few attempts {}: {}".format(
                    genesisFilePath, str(ex)))
    except portalocker.LockException as ex:
        logger.error("error occurred during locking file {}: {}".format(
            genesisFilePath, str(ex)))
Esempio n. 25
0
def testRecoverLedgerFromHashStore(hashStore, tconf, tdir):
    cleanup(hashStore)
    tree = CompactMerkleTree(hashStore=hashStore)
    ledger = Ledger(tree=tree, dataDir=tdir)
    for d in range(10):
        ledger.add(str(d).encode())
    updatedTree = ledger.tree
    ledger.stop()

    tree = CompactMerkleTree(hashStore=hashStore)
    restartedLedger = Ledger(tree=tree, dataDir=tdir)
    assert restartedLedger.size == ledger.size
    assert restartedLedger.root_hash == ledger.root_hash
    assert restartedLedger.tree.hashes == updatedTree.hashes
    assert restartedLedger.tree.root_hash == updatedTree.root_hash
    restartedLedger.stop()
Esempio n. 26
0
def invalid_verkey_tdir(tdir_for_func):
    ledger = Ledger(CompactMerkleTree(), dataDir=tdir_for_func)
    for d in range(3):
        txn = {TXN_TYPE: '0',
               TARGET_NYM: base58.b58encode(b'whatever').decode("utf-8"),
               IDENTIFIER: "Th7MpTaRZVRYnPiabds81Y",
               DATA: {
                   NAME: str(d),
                   ALIAS: 'test' + str(d),
                   SERVICES: [VALIDATOR],
               }
               }
        if d == 1:
            txn[TARGET_NYM] = "invalid===="
        ledger.add(txn)
    ledger.stop()
Esempio n. 27
0
def tdirWithLedger(tdir):
    tree = CompactMerkleTree()
    ledger = Ledger(CompactMerkleTree(), dataDir=tdir)
    for d in range(3):
        txn = {
            TXN_TYPE: '0',
            TARGET_NYM: base58.b58encode(b'whatever'),
            DATA: {
                NAME: str(d),
                ALIAS: 'test' + str(d),
                SERVICES: [VALIDATOR],
            }
        }
        if d == 1:
            txn[TARGET_NYM] = "invalid===="
        ledger.add(txn)
    return ledger
Esempio n. 28
0
def testConsistencyVerificationOnStartupCase1(tempdir):
    """
    One more node was added to nodes file
    """
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir)
    tranzNum = 10
    for d in range(tranzNum):
        ledger.add(str(d).encode())
    ledger.stop()

    # Writing one more node without adding of it to leaf and transaction logs
    badNode = (None, None, ('X' * 32))
    fhs.writeNode(badNode)

    with pytest.raises(ConsistencyVerificationFailed):
        tree = CompactMerkleTree(hashStore=fhs)
        ledger = NoTransactionRecoveryLedger(tree=tree, dataDir=tempdir)
        ledger.recoverTreeFromHashStore()
    ledger.stop()
Esempio n. 29
0
def testConsistencyVerificationOnStartupCase1(tempdir):
    """
    One more node was added to nodes file
    """
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir)
    tranzNum = 10
    for d in range(tranzNum):
        ledger.add(str(d).encode())
    ledger.stop()

    # Writing one more node without adding of it to leaf and transaction logs
    badNode = (None, None, ('X' * 32))
    fhs.writeNode(badNode)

    with pytest.raises(ConsistencyVerificationFailed):
        tree = CompactMerkleTree(hashStore=fhs)
        ledger = NoTransactionRecoveryLedger(tree=tree, dataDir=tempdir)
        ledger.recoverTreeFromHashStore()
    ledger.stop()
Esempio n. 30
0
def createGenesisTxnFile(genesisTxns, targetDir, fileName, fieldOrdering,
                         reset=True):
    ledger = Ledger(CompactMerkleTree(), dataDir=targetDir,
                    serializer=CompactSerializer(fields=fieldOrdering),
                    fileName=fileName)

    if reset:
        ledger.reset()

    reqIds = {}
    for txn in genesisTxns:
        identifier = txn.get(f.IDENTIFIER.nm, "")
        if identifier not in reqIds:
            reqIds[identifier] = 0
        reqIds[identifier] += 1
        txn.update({
            f.REQ_ID.nm: reqIds[identifier],
            f.IDENTIFIER.nm: identifier
        })
        ledger.add(txn)
    ledger.stop()
Esempio n. 31
0
def testRecoverLedgerNewFieldsToTxnsAdded(tempdir):
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir, serializer=ledgerSerializer)
    for d in range(10):
        ledger.add({"identifier": "i{}".format(d), "reqId": d, "op": "operation"})
    updatedTree = ledger.tree
    ledger.stop()

    newOrderedFields = OrderedDict([
        ("identifier", (str, str)),
        ("reqId", (str, int)),
        ("op", (str, str)),
        ("newField", (str, str))
    ])
    newLedgerSerializer = CompactSerializer(newOrderedFields)

    tree = CompactMerkleTree(hashStore=fhs)
    restartedLedger = Ledger(tree=tree, dataDir=tempdir, serializer=newLedgerSerializer)
    assert restartedLedger.size == ledger.size
    assert restartedLedger.root_hash == ledger.root_hash
    assert restartedLedger.tree.hashes == updatedTree.hashes
    assert restartedLedger.tree.root_hash == updatedTree.root_hash
Esempio n. 32
0
def testConsistencyVerificationOnStartupCase2(tempdir):
    """
    One more transaction added to transactions file
    """
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir)
    tranzNum = 10
    for d in range(tranzNum):
        ledger.add(str(d).encode())

    # Adding one more entry to transaction log without adding it to merkle tree
    badData = 'X' * 32
    value = ledger.leafSerializer.serialize(badData, toBytes=False)
    key = str(tranzNum + 1)
    ledger._transactionLog.put(key=key, value=value)

    ledger.stop()

    with pytest.raises(ConsistencyVerificationFailed):
        tree = CompactMerkleTree(hashStore=fhs)
        ledger = NoTransactionRecoveryLedger(tree=tree, dataDir=tempdir)
        ledger.recoverTreeFromHashStore()
    ledger.stop()
Esempio n. 33
0
def testConsistencyVerificationOnStartupCase2(tempdir):
    """
    One more transaction added to transactions file
    """
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir)
    tranzNum = 10
    for d in range(tranzNum):
        ledger.add(str(d).encode())

    # Adding one more entry to transaction log without adding it to merkle tree
    badData = 'X' * 32
    value = ledger.leafSerializer.serialize(badData, toBytes=False)
    key = str(tranzNum + 1)
    ledger._transactionLog.put(key=key, value=value)

    ledger.stop()

    with pytest.raises(ConsistencyVerificationFailed):
        tree = CompactMerkleTree(hashStore=fhs)
        ledger = NoTransactionRecoveryLedger(tree=tree, dataDir=tempdir)
        ledger.recoverTreeFromHashStore()
    ledger.stop()
Esempio n. 34
0
 def _processCatchupReplies(self, ledgerType, ledger: Ledger,
                            catchUpReplies: List):
     # Removing transactions for sequence numbers are already
     # present in the ledger
     # TODO: Inefficient, should check list in reverse and stop at first
     # match since list is already sorted
     numProcessed = sum(1 for s, _ in catchUpReplies if s <= ledger.size)
     if numProcessed:
         logger.debug("{} found {} already processed transactions in the "
                      "catchup replies".format(self, numProcessed))
     catchUpReplies = catchUpReplies[numProcessed:]
     if catchUpReplies:
         seqNo = catchUpReplies[0][0]
         if seqNo - ledger.seqNo == 1:
             result, nodeName, toBeProcessed = self.hasValidCatchupReplies(
                 ledgerType, ledger, seqNo, catchUpReplies)
             if result:
                 for _, txn in catchUpReplies[:toBeProcessed]:
                     merkleInfo = ledger.add(txn)
                     txn[F.seqNo.name] = merkleInfo[F.seqNo.name]
                     self.ledgers[ledgerType]["postTxnAddedToLedgerClbk"](
                         ledgerType, txn)
                 self._removePrcdCatchupReply(ledgerType, nodeName, seqNo)
                 return numProcessed + toBeProcessed + \
                     self._processCatchupReplies(ledgerType, ledger,
                                                 catchUpReplies[toBeProcessed:])
             else:
                 if self.ownedByNode:
                     self.owner.blacklistNode(nodeName,
                                              reason="Sent transactions "
                                              "that could not be "
                                              "verified")
                     self._removePrcdCatchupReply(ledgerType, nodeName,
                                                  seqNo)
                     # Invalid transactions have to be discarded so letting
                     # the caller know how many txns have to removed from
                     # `self.receivedCatchUpReplies`
                     return numProcessed + toBeProcessed
     return numProcessed
Esempio n. 35
0
 def _processCatchupReplies(self, ledgerType, ledger: Ledger,
                            catchUpReplies: List):
     # Removing transactions for sequence numbers are already
     # present in the ledger
     numProcessed = sum(1 for s, _ in catchUpReplies if s <= ledger.size)
     catchUpReplies = catchUpReplies[numProcessed:]
     if numProcessed:
         logger.debug("{} found {} already processed transactions in the "
                      "catchup replies".format(self, numProcessed))
     if catchUpReplies:
         seqNo = catchUpReplies[0][0]
         if seqNo - ledger.seqNo == 1:
             result, nodeName, toBeProcessed = self.hasValidCatchupReplies(
                 ledgerType, ledger, seqNo, catchUpReplies)
             if result:
                 for _, txn in catchUpReplies[:toBeProcessed]:
                     merkleInfo = ledger.add(txn)
                     txn[F.seqNo.name] = merkleInfo[F.seqNo.name]
                     self.ledgers[ledgerType]["postTxnAddedToLedgerClbk"](
                         ledgerType, txn)
                 self._removePrcdCatchupReply(ledgerType, nodeName, seqNo)
                 return numProcessed + toBeProcessed + \
                     self._processCatchupReplies(ledgerType, ledger,
                                             catchUpReplies[toBeProcessed:])
             else:
                 if self.ownedByNode:
                     self.owner.blacklistNode(nodeName,
                                              reason="Sent transactions "
                                                     "that could not be "
                                                     "verified")
                     self._removePrcdCatchupReply(ledgerType, nodeName,
                                                  seqNo)
                     # Invalid transactions have to be discarded so letting
                     # the caller know how many txns have to removed from
                     # `self.receivedCatchUpReplies`
                     return numProcessed + toBeProcessed
     return numProcessed
Esempio n. 36
0
    def bootstrapTestNodesCore(config, envName, appendToLedgers,
                               domainTxnFieldOrder, ips, nodeCount,
                               clientCount, nodeNum, startingPort):

        baseDir = config.baseDir
        if not os.path.exists(baseDir):
            os.makedirs(baseDir, exist_ok=True)

        if not ips:
            ips = ['127.0.0.1'] * nodeCount
        else:
            ips = ips.split(",")
            if len(ips) != nodeCount:
                if len(ips) > nodeCount:
                    ips = ips[:nodeCount]
                else:
                    ips += ['127.0.0.1'] * (nodeCount - len(ips))

        if hasattr(config, "ENVS") and envName:
            poolTxnFile = config.ENVS[envName].poolLedger
            domainTxnFile = config.ENVS[envName].domainLedger
        else:
            poolTxnFile = config.poolTransactionsFile
            domainTxnFile = config.domainTransactionsFile

        poolLedger = Ledger(CompactMerkleTree(),
                            dataDir=baseDir,
                            fileName=poolTxnFile)

        domainLedger = Ledger(
            CompactMerkleTree(),
            serializer=CompactSerializer(fields=domainTxnFieldOrder),
            dataDir=baseDir,
            fileName=domainTxnFile)

        if not appendToLedgers:
            poolLedger.reset()
            domainLedger.reset()

        steward1Nym = None
        for num in range(1, nodeCount + 1):
            stewardName = "Steward" + str(num)
            sigseed = TestNetworkSetup.getSigningSeed(stewardName)
            verkey = Signer(sigseed).verhex
            stewardNym = TestNetworkSetup.getNymFromVerkey(verkey)
            txn = {
                TARGET_NYM: stewardNym,
                TXN_TYPE: NYM,
                ROLE: STEWARD,
                ALIAS: stewardName,
                TXN_ID: sha256(stewardName.encode()).hexdigest()
            }
            if num == 1:
                steward1Nym = stewardNym
            else:
                # The first steward adds every steward
                txn[f.IDENTIFIER.nm] = steward1Nym
            domainLedger.add(txn)

            nodeName = "Node" + str(num)
            nodePort, clientPort = startingPort + (num * 2 - 1), startingPort \
                                   + (num * 2)
            ip = ips[num - 1]
            sigseed = TestNetworkSetup.getSigningSeed(nodeName)
            if nodeNum == num:
                _, verkey = initLocalKeep(nodeName, baseDir, sigseed, True)
                verkey = verkey.encode()
                print("This node with name {} will use ports {} and {} for "
                      "nodestack and clientstack respectively".format(
                          nodeName, nodePort, clientPort))
            else:
                verkey = Signer(sigseed).verhex
            txn = {
                TARGET_NYM: TestNetworkSetup.getNymFromVerkey(verkey),
                TXN_TYPE: NODE,
                f.IDENTIFIER.nm: stewardNym,
                DATA: {
                    CLIENT_IP: ip,
                    ALIAS: nodeName,
                    CLIENT_PORT: clientPort,
                    NODE_IP: ip,
                    NODE_PORT: nodePort,
                    SERVICES: [VALIDATOR]
                },
                TXN_ID: sha256(nodeName.encode()).hexdigest()
            }
            poolLedger.add(txn)

        for num in range(1, clientCount + 1):
            clientName = "Client" + str(num)
            sigseed = TestNetworkSetup.getSigningSeed(clientName)
            verkey = Signer(sigseed).verhex
            txn = {
                f.IDENTIFIER.nm: steward1Nym,
                TARGET_NYM: TestNetworkSetup.getNymFromVerkey(verkey),
                TXN_TYPE: NYM,
                ALIAS: clientName,
                TXN_ID: sha256(clientName.encode()).hexdigest()
            }
            domainLedger.add(txn)

        poolLedger.stop()
        domainLedger.stop()
Esempio n. 37
0
    def bootstrapTestNodesCore(baseDir,
                           poolTransactionsFile,
                           domainTransactionsFile,
                           domainTxnFieldOrder,
                           ips, nodeCount, clientCount,
                           nodeNum, startingPort):
        if not ips:
            ips = ['127.0.0.1'] * nodeCount
        else:
            ips = ips.split(",")
            if len(ips) != nodeCount:
                if len(ips) > nodeCount:
                    ips = ips[:nodeCount]
                else:
                    ips = ips + ['127.0.0.1'] * (nodeCount - len(ips))

        poolLedger = Ledger(CompactMerkleTree(),
                            dataDir=baseDir,
                            fileName=poolTransactionsFile)
        poolLedger.reset()

        domainLedger = Ledger(CompactMerkleTree(),
                              serializer=CompactSerializer(fields=
                                                           domainTxnFieldOrder),
                              dataDir=baseDir,
                              fileName=domainTransactionsFile)
        domainLedger.reset()

        steward1Nym = None
        for num in range(1, nodeCount + 1):
            stewardName = "Steward" + str(num)
            sigseed = TestNetworkSetup.getSigningSeed(stewardName)
            verkey = Signer(sigseed).verhex
            stewardNym = TestNetworkSetup.getNymFromVerkey(verkey)
            txn = {
                TARGET_NYM: stewardNym,
                TXN_TYPE: NYM,
                ROLE: STEWARD,
                ALIAS: stewardName,
                TXN_ID: sha256(stewardName.encode()).hexdigest()
            }
            if num == 1:
                steward1Nym = stewardNym
            else:
                # The first steward adds every steward
                txn[f.IDENTIFIER.nm] = steward1Nym
            domainLedger.add(txn)

            nodeName = "Node" + str(num)
            nodePort, clientPort = startingPort + (num * 2 - 1), startingPort \
                                   + (num * 2)
            ip = ips[num - 1]
            sigseed = TestNetworkSetup.getSigningSeed(nodeName)
            if nodeNum == num:
                _, verkey = initLocalKeep(nodeName, baseDir, sigseed, True)
                verkey = verkey.encode()
                print("This node with name {} will use ports {} and {} for "
                      "nodestack and clientstack respectively"
                      .format(nodeName, nodePort, clientPort))
            else:
                verkey = Signer(sigseed).verhex
            txn = {
                TARGET_NYM: TestNetworkSetup.getNymFromVerkey(verkey),
                TXN_TYPE: NEW_NODE,
                f.IDENTIFIER.nm: stewardNym,
                DATA: {
                    CLIENT_IP: ip,
                    ALIAS: nodeName,
                    CLIENT_PORT: clientPort,
                    NODE_IP: ip,
                    NODE_PORT: nodePort
                },
                TXN_ID: sha256(nodeName.encode()).hexdigest()
            }
            poolLedger.add(txn)

        for num in range(1, clientCount + 1):
            clientName = "Client" + str(num)
            sigseed = TestNetworkSetup.getSigningSeed(clientName)
            verkey = Signer(sigseed).verhex
            txn = {
                f.IDENTIFIER.nm: steward1Nym,
                TARGET_NYM: TestNetworkSetup.getNymFromVerkey(verkey),
                TXN_TYPE: NYM,
                ALIAS: clientName,
                TXN_ID: sha256(clientName.encode()).hexdigest()
            }
            domainLedger.add(txn)

        poolLedger.stop()
        domainLedger.stop()