Exemplo n.º 1
0
def updateGenesisPoolTxnFile(genesisTxnDir, genesisTxnFile, txn):
    # The lock is an advisory lock, it might not work on linux filesystems
    # not mounted with option `-o mand`, another approach can be to use a .lock
    # file to indicate presence or absence of .lock
    try:
        # Exclusively lock file in a non blocking manner. Locking is neccessary
        # since there might be multiple clients running on a machine so genesis
        #  files should be updated safely.
        # TODO: There is no automated test in the codebase that confirms it.
        # It has only been manaully tested in the python terminal. Add a test
        # for it using multiple processes writing concurrently
        with portalocker.Lock(os.path.join(genesisTxnDir, genesisTxnFile),
                              truncate=None,
                              flags=portalocker.LOCK_EX | portalocker.LOCK_NB):
            seqNo = txn[F.seqNo.name]
            ledger = Ledger(CompactMerkleTree(hashStore=FileHashStore(
                dataDir=genesisTxnDir)),
                            dataDir=genesisTxnDir,
                            fileName=genesisTxnFile)
            ledgerSize = len(ledger)
            if seqNo - ledgerSize == 1:
                ledger.add({k: v for k, v in txn.items() if k != F.seqNo.name})
                logger.debug('Adding transaction with sequence number {} in'
                             ' genesis pool transaction file'.format(seqNo))
            else:
                logger.debug('Already {} genesis pool transactions present so '
                             'transaction with sequence number {} '
                             'not applicable'.format(ledgerSize, seqNo))
    except (portalocker.LockException, portalocker.LockException) as ex:
        return
Exemplo n.º 2
0
def test_parse_non_base58_txn_type_field_raises_descriptive_error(
        tdirWithLedger, tdir):
    with pytest.raises(ValueError) as excinfo:
        ledger = Ledger(CompactMerkleTree(), dataDir=tdir)
        _, _, nodeKeys = TxnStackManager.parseLedgerForHaAndKeys(ledger)
    assert ("verkey" in str(excinfo.value))
    ledger.stop()
Exemplo n.º 3
0
def updateGenesisPoolTxnFile(genesisTxnDir, genesisTxnFile, txn):
    # The lock is an advisory lock, it might not work on linux filesystems
    # not mounted with option `-o mand`, another approach can be to use a .lock
    # file to indicate presence or absence of .lock
    try:
        # Exclusively lock file in a non blocking manner. Locking is neccessary
        # since there might be multiple clients running on a machine so genesis
        #  files should be updated safely.
        # TODO: There is no automated test in the codebase that confirms it.
        # It has only been manaully tested in the python terminal. Add a test
        # for it using multiple processes writing concurrently
        with portalocker.Lock(os.path.join(genesisTxnDir, genesisTxnFile),
                              flags=portalocker.LOCK_EX | portalocker.LOCK_NB):
            seqNo = txn[F.seqNo.name]
            ledger = Ledger(CompactMerkleTree(hashStore=FileHashStore(
                dataDir=genesisTxnDir)), dataDir=genesisTxnDir,
                fileName=genesisTxnFile)
            ledgerSize = len(ledger)
            if seqNo - ledgerSize == 1:
                ledger.add({k:v for k,v in txn.items() if k != F.seqNo.name})
                logger.debug('Adding transaction with sequence number {} in'
                             ' genesis pool transaction file'.format(seqNo))
            else:
                logger.debug('Already {} genesis pool transactions present so '
                             'transaction with sequence number {} '
                             'not applicable'.format(ledgerSize, seqNo))
    except (portalocker.exceptions.LockException,
            portalocker.exceptions.LockException) as ex:
        return
Exemplo n.º 4
0
def tdirWithPoolTxns(poolTxnData, tdir, tconf):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    fileName=tconf.poolTransactionsFile)
    for item in poolTxnData["txns"]:
        ledger.add(item)
    return tdir
Exemplo n.º 5
0
def ledger(tempdir):
    ledger = Ledger(
        CompactMerkleTree(hashStore=FileHashStore(dataDir=tempdir)),
        dataDir=tempdir,
        serializer=ledgerSerializer)
    ledger.reset()
    return ledger
Exemplo n.º 6
0
def tdirWithPoolTxns(poolTxnData, tdir, tconf):
    ledger = Ledger(CompactMerkleTree(),
           dataDir=tdir,
           fileName=tconf.poolTransactionsFile)
    for item in poolTxnData["txns"]:
        ledger.add(item)
    return tdir
Exemplo n.º 7
0
def check_audit_txn(txn,
                    view_no,
                    pp_seq_no,
                    seq_no,
                    txn_time,
                    txn_roots,
                    state_roots,
                    pool_size,
                    domain_size,
                    config_size,
                    last_domain_seqno,
                    last_pool_seqno,
                    last_config_seqno,
                    primaries,
                    digest='',
                    other_sizes={}):
    expectedLedgerRoots = {}
    txn_roots = {k: Ledger.hashToStr(v) for k, v in txn_roots.items()}
    state_roots = {k: Ledger.hashToStr(v) for k, v in state_roots.items()}
    # we expect deltas here, that is a difference from the current audit ledger txn to
    # the audit txn where the corresponding ledger was updated
    if last_domain_seqno:
        expectedLedgerRoots[1] = seq_no - last_domain_seqno
    if last_pool_seqno:
        expectedLedgerRoots[0] = seq_no - last_pool_seqno
    if last_config_seqno:
        expectedLedgerRoots[2] = seq_no - last_config_seqno
    expectedLedgerRoots.update(txn_roots)
    ledger_size = {0: pool_size, 1: domain_size, 2: config_size}
    ledger_size.update(other_sizes)

    expected = {
        "reqSignature": {},
        "txn": {
            "data": {
                "ledgerRoot": expectedLedgerRoots,
                "ver": "1",
                "viewNo": view_no,
                "ppSeqNo": pp_seq_no,
                "ledgerSize": ledger_size,
                "stateRoot": state_roots,
                "primaries": primaries,
                "digest": digest,
            },
            "metadata": {},
            "protocolVersion": CURRENT_PROTOCOL_VERSION,
            "type": "2",  # AUDIT
        },
        "txnMetadata": {
            "seqNo": seq_no,
            "txnTime": txn_time
        },
        "ver": "1"
    }
    txn = JsonSerializer().serialize(txn)
    expected = JsonSerializer().serialize(expected)
    print(txn)
    print(expected)
    assert expected == txn
Exemplo n.º 8
0
def addTxnToFile(dir, file, txns, fields=getTxnOrderedFields()):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=dir,
                    serializer=CompactSerializer(fields=fields),
                    fileName=file)
    for txn in txns:
        ledger.add(txn)
    ledger.stop()
Exemplo n.º 9
0
def tdirWithPoolTxns(poolTxnData, tdir, tconf):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    fileName=tconf.poolTransactionsFile)
    for item in poolTxnData["txns"]:
        if item.get(TXN_TYPE) in (NEW_NODE, CHANGE_HA, CHANGE_KEYS):
            ledger.add(item)
    return tdir
Exemplo n.º 10
0
 def init_pool_ledger(cls, appendToLedgers, baseDir, config, envName):
     poolTxnFile = cls.pool_ledger_file_name(config, envName)
     pool_ledger = Ledger(CompactMerkleTree(),
                          dataDir=baseDir,
                          fileName=poolTxnFile)
     if not appendToLedgers:
         pool_ledger.reset()
     return pool_ledger
Exemplo n.º 11
0
def tdirWithPoolTxns(poolTxnData, tdir, tconf):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    fileName=tconf.poolTransactionsFile)
    for item in poolTxnData["txns"]:
        if item.get(TXN_TYPE) in (NEW_NODE, CHANGE_HA, CHANGE_KEYS):
            ledger.add(item)
    return tdir
Exemplo n.º 12
0
def updatedDomainTxnFile(tdir, tdirWithDomainTxns, genesisTxns,
                         domainTxnOrderedFields, tconf):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    serializer=CompactSerializer(fields=domainTxnOrderedFields),
                    fileName=tconf.domainTransactionsFile)
    for txn in genesisTxns:
        ledger.add(txn)
Exemplo n.º 13
0
def tdirWithDomainTxns(poolTxnData, tdir, tconf, domainTxnOrderedFields):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=tdir,
                    serializer=CompactSerializer(fields=domainTxnOrderedFields),
                    fileName=tconf.domainTransactionsFile)
    for item in poolTxnData["txns"]:
        if item.get(TXN_TYPE) == NYM:
            ledger.add(item)
    return tdir
Exemplo n.º 14
0
def test_parse_verkey_non_base58_txn_type_field_raises_SystemExit_has_descriptive_error(
        invalid_verkey_tdir, tdir_for_func):
    """
    Test that invalid base58 TARGET_NYM in pool_transaction raises the proper exception (INDY-150)
    """
    with pytest.raises(SystemExit) as excinfo:
        ledger = Ledger(CompactMerkleTree(), dataDir=tdir_for_func)
        _, _, nodeKeys = TxnStackManager.parseLedgerForHaAndKeys(ledger)
    assert excinfo.value.code == 'Invalid verkey. Rebuild pool transactions.'
    ledger.stop()
Exemplo n.º 15
0
def test_parse_identifier_non_base58_txn_type_field_raises_SystemExit_has_descriptive_error(
        invalid_identifier_tdir, tdir_for_func):
    """
    Test that invalid base58 IDENTIFIER in pool_transaction raises the proper exception (INDY-150)
    """
    with pytest.raises(SystemExit) as excinfo:
        ledger = Ledger(CompactMerkleTree(), dataDir=tdir_for_func)
        _, _, nodeKeys = TxnStackManager.parseLedgerForHaAndKeys(ledger)
    assert excinfo.value.code == errMsg2
    ledger.stop()
Exemplo n.º 16
0
def _open_new_ledger(data_directory, new_ledger_file, hash_store_name):
    # open new Ledger with leveldb hash store (to re-init it)
    logger.info("Open new ledger folder: {}".format(
        os.path.join(data_directory, new_ledger_file)))
    new_ledger = Ledger(CompactMerkleTree(
        hashStore=LevelDbHashStore(
            dataDir=data_directory, fileNamePrefix=hash_store_name)),
        dataDir=data_directory,
        fileName=new_ledger_file)
    new_ledger.stop()
Exemplo n.º 17
0
def __open_new_ledger(data_directory, new_ledger_file, hash_store_name):
    # open new Ledger with leveldb hash store (to re-init it)
    logger.info("Open new ledger folder: {}".format(
        os.path.join(data_directory, new_ledger_file)))
    new_ledger = Ledger(CompactMerkleTree(
        hashStore=LevelDbHashStore(
            dataDir=data_directory, fileNamePrefix=hash_store_name)),
        dataDir=data_directory,
        fileName=new_ledger_file)
    new_ledger.stop()
Exemplo n.º 18
0
def tdirWithDomainTxns(poolTxnData, tdir, tconf, domainTxnOrderedFields):
    ledger = Ledger(
        CompactMerkleTree(),
        dataDir=tdir,
        serializer=CompactSerializer(fields=domainTxnOrderedFields),
        fileName=tconf.domainTransactionsFile)
    for item in poolTxnData["txns"]:
        if item.get(TXN_TYPE) == NYM:
            ledger.add(item)
    return tdir
    def _do_apply_batch(self, batch):
        reqs = [Request(**req_dict) for req_dict in batch[f.REQUESTS.nm]]

        ledger_id = batch[f.LEDGER_ID.nm]
        three_pc_batch = ThreePcBatch.from_batch_committed_dict(batch)
        self._node.apply_reqs(reqs, three_pc_batch)

        # We need hashes in apply and str in commit
        three_pc_batch.txn_root = Ledger.hashToStr(three_pc_batch.txn_root)
        three_pc_batch.state_root = Ledger.hashToStr(three_pc_batch.state_root)
        self._node.get_executer(ledger_id)(three_pc_batch)
Exemplo n.º 20
0
 def init_domain_ledger(cls, appendToLedgers, baseDir, config, envName,
                        domainTxnFieldOrder):
     domainTxnFile = cls.domain_ledger_file_name(config, envName)
     ser = CompactSerializer(fields=domainTxnFieldOrder)
     domain_ledger = Ledger(CompactMerkleTree(),
                            serializer=ser,
                            dataDir=baseDir,
                            fileName=domainTxnFile)
     if not appendToLedgers:
         domain_ledger.reset()
     return domain_ledger
Exemplo n.º 21
0
def ledger():
    ledger = Ledger()
    transactions = (
        ("2015-01-16", "john", "mary", "120.50"),
        ("2015-01-17", "john", "supermarket", "20.00"),
        ("2015-01-17", "mary", "insurance", "100.00"),
        ("2015-01-18", "john", "insurance", "50.00"),
    )
    ledger._process_transactions(transactions)

    return ledger
Exemplo n.º 22
0
def ledger(tempdir):
    store = ChunkedFileStore(tempdir,
                             'transactions',
                             isLineNoKey=True,
                             chunkSize=chunk_size,
                             storeContentHash=False,
                             ensureDurability=False)
    ledger = Ledger(CompactMerkleTree(hashStore=FileHashStore(dataDir=tempdir)),
                    dataDir=tempdir, serializer=JsonSerializer(),
                    transactionLogStore=store)
    ledger.reset()
    return ledger
    def init_ledger_from_genesis_txn(self, ledger: Ledger):
        # TODO: it's possible that the file to be used for initialization does not exist.
        # This is not considered as an error as of now.
        init_file = os.path.join(self.__data_dir, self.__db_name)
        if not os.path.exists(init_file):
            logger.display("File that should be used for initialization of "
                           "Ledger does not exist: {}".format(init_file))
            return

        with open(init_file, 'r') as f:
            for line in store_utils.cleanLines(f):
                txn = self.__serializer.deserialize(line)
                ledger.add(txn)
Exemplo n.º 24
0
    def __fill_ledger_root_hash(self, txn, lid, ledger, last_audit_txn, three_pc_batch):
        last_audit_txn_data = get_payload_data(last_audit_txn) if last_audit_txn is not None else None

        if lid == three_pc_batch.ledger_id:
            txn[AUDIT_TXN_LEDGER_ROOT][lid] = Ledger.hashToStr(ledger.uncommitted_root_hash)
            txn[AUDIT_TXN_STATE_ROOT][lid] = Ledger.hashToStr(self.database_manager.get_state(lid).headHash)

        # 1. it is the first batch and we have something
        elif last_audit_txn_data is None and ledger.uncommitted_size:
            txn[AUDIT_TXN_LEDGER_ROOT][lid] = Ledger.hashToStr(ledger.uncommitted_root_hash)
            txn[AUDIT_TXN_STATE_ROOT][lid] = Ledger.hashToStr(self.database_manager.get_state(lid).headHash)

        # 1.1. Rare case -- we have previous audit txns but don't have this ledger i.e. new plugins
        elif last_audit_txn_data is not None and last_audit_txn_data[AUDIT_TXN_LEDGERS_SIZE][lid] is None and \
                len(ledger.uncommittedTxns):
            txn[AUDIT_TXN_LEDGER_ROOT][lid] = Ledger.hashToStr(ledger.uncommitted_root_hash)
            txn[AUDIT_TXN_STATE_ROOT][lid] = Ledger.hashToStr(self.database_manager.get_state(lid).headHash)

        # 2. Usual case -- this ledger was updated since the last audit txn
        elif last_audit_txn_data is not None and last_audit_txn_data[AUDIT_TXN_LEDGERS_SIZE][lid] is not None and \
                ledger.uncommitted_size > last_audit_txn_data[AUDIT_TXN_LEDGERS_SIZE][lid]:
            txn[AUDIT_TXN_LEDGER_ROOT][lid] = Ledger.hashToStr(ledger.uncommitted_root_hash)
            txn[AUDIT_TXN_STATE_ROOT][lid] = Ledger.hashToStr(self.database_manager.get_state(lid).headHash)

        # 3. This ledger is never audited, so do not add the key
        elif last_audit_txn_data is None or lid not in last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT]:
            return

        # 4. ledger is not changed in last batch => delta = delta + 1
        elif isinstance(last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT][lid], int):
            txn[AUDIT_TXN_LEDGER_ROOT][lid] = last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT][lid] + 1

        # 5. ledger is changed in last batch but not changed now => delta = 1
        elif last_audit_txn_data:
            txn[AUDIT_TXN_LEDGER_ROOT][lid] = 1
Exemplo n.º 25
0
def createGenesisTxnFile(genesisTxns,
                         targetDir,
                         fileName,
                         fieldOrdering,
                         reset=True):
    ledger = Ledger(CompactMerkleTree(),
                    dataDir=targetDir,
                    serializer=CompactSerializer(fields=fieldOrdering),
                    fileName=fileName)

    if reset:
        ledger.reset()

    reqIds = {}
    for txn in genesisTxns:
        identifier = txn.get(f.IDENTIFIER.nm, "")
        if identifier not in reqIds:
            reqIds[identifier] = 0
        reqIds[identifier] += 1
        txn.update({
            f.REQ_ID.nm: reqIds[identifier],
            f.IDENTIFIER.nm: identifier
        })
        ledger.add(txn)
    ledger.stop()
Exemplo n.º 26
0
def testRecoverLedgerFromHashStore(odbhs, tdir):
    cleanup(odbhs)
    tree = CompactMerkleTree(hashStore=odbhs)
    ledger = Ledger(tree=tree, dataDir=tdir)
    for d in range(10):
        ledger.add(str(d).encode())
    updatedTree = ledger.tree
    ledger.stop()

    tree = CompactMerkleTree(hashStore=odbhs)
    restartedLedger = Ledger(tree=tree, dataDir=tdir)
    assert restartedLedger.size == ledger.size
    assert restartedLedger.root_hash == ledger.root_hash
    assert restartedLedger.tree.hashes == updatedTree.hashes
    assert restartedLedger.tree.root_hash == updatedTree.root_hash
Exemplo n.º 27
0
def updateGenesisPoolTxnFile(genesisTxnDir,
                             genesisTxnFile,
                             txn,
                             waitTimeIfAlreadyLocked=5):
    # The lock is an advisory lock, it might not work on linux filesystems
    # not mounted with option `-o mand`, another approach can be to use a .lock
    # file to indicate presence or absence of .lock
    genesisFilePath = open(os.path.join(genesisTxnDir, genesisTxnFile), 'a+')
    try:
        # Exclusively lock file in a non blocking manner. Locking is neccessary
        # since there might be multiple clients running on a machine so genesis
        #  files should be updated safely.
        # TODO: There is no automated test in the codebase that confirms it.
        # It has only been manaully tested in the python terminal. Add a test
        # for it using multiple processes writing concurrently
        portalocker.Lock(genesisFilePath,
                         truncate=None,
                         flags=portalocker.LOCK_EX | portalocker.LOCK_NB)
        seqNo = txn[F.seqNo.name]
        ledger = Ledger(
            CompactMerkleTree(hashStore=FileHashStore(dataDir=genesisTxnDir)),
            dataDir=genesisTxnDir,
            fileName=genesisTxnFile)
        ledgerSize = len(ledger)
        if seqNo - ledgerSize == 1:
            ledger.add({k: v for k, v in txn.items() if k != F.seqNo.name})
            logger.debug('Adding transaction with sequence number {} in'
                         ' genesis pool transaction file'.format(seqNo))
        else:
            logger.debug('Already {} genesis pool transactions present so '
                         'transaction with sequence number {} '
                         'not applicable'.format(ledgerSize, seqNo))
        portalocker.unlock(genesisFilePath)
    except portalocker.AlreadyLocked as ex:
        logger.info(
            "file is already locked: {}, will retry in few seconds".format(
                genesisFilePath))
        if waitTimeIfAlreadyLocked <= 15:
            time.sleep(waitTimeIfAlreadyLocked)
            updateGenesisPoolTxnFile(genesisTxnDir, genesisTxnFile, txn,
                                     waitTimeIfAlreadyLocked + 5)
        else:
            logger.error(
                "already locked error even after few attempts {}: {}".format(
                    genesisFilePath, str(ex)))
    except portalocker.LockException as ex:
        logger.error("error occurred during locking file {}: {}".format(
            genesisFilePath, str(ex)))
Exemplo n.º 28
0
    def emulate_ordered_processing(self, msg: Ordered):
        three_pc_batch = ThreePcBatch.from_ordered(msg)
        three_pc_batch.txn_root = Ledger.hashToStr(three_pc_batch.txn_root)
        three_pc_batch.state_root = Ledger.hashToStr(three_pc_batch.state_root)
        self._write_manager.commit_batch(three_pc_batch)

        possible_added = set(three_pc_batch.node_reg) - set(self._previous_node_reg)
        possible_removed = set(self._previous_node_reg) - set(three_pc_batch.node_reg)
        if possible_added:
            for node_name in list(possible_added):
                self._internal_bus.send(NeedAddNode(node_name, self.name))
        if possible_removed:
            for node_name in list(possible_removed):
                self._internal_bus.send(NeedRemoveNode(node_name, self.name))
        if possible_added or possible_removed:
            self._previous_node_reg = three_pc_batch.node_reg
def _open_old_ledger(data_directory, old_ledger_file, hash_store_name,
                     serializer):
    # open old Ledger with leveldb hash store (to re-init it)
    old_txn_log_store = ChunkedFileStore(data_directory,
                                         old_ledger_file,
                                         isLineNoKey=True,
                                         storeContentHash=False)
    old_ledger = Ledger(CompactMerkleTree(hashStore=LevelDbHashStore(
        dataDir=data_directory, fileNamePrefix=hash_store_name)),
                        dataDir=data_directory,
                        txn_serializer=serializer,
                        hash_serializer=serializer,
                        fileName=old_ledger_file,
                        transactionLogStore=old_txn_log_store)

    old_ledger.stop()
Exemplo n.º 30
0
    def _create_audit_txn_data(self, three_pc_batch, last_audit_txn):
        # 1. general format and (view_no, pp_seq_no)
        txn = {
            TXN_VERSION: "1",
            AUDIT_TXN_VIEW_NO: three_pc_batch.view_no,
            AUDIT_TXN_PP_SEQ_NO: three_pc_batch.pp_seq_no,
            AUDIT_TXN_LEDGERS_SIZE: {},
            AUDIT_TXN_LEDGER_ROOT: {},
            AUDIT_TXN_STATE_ROOT: {},
            AUDIT_TXN_PRIMARIES: None
        }

        for lid, ledger in self.database_manager.ledgers.items():
            if lid == AUDIT_LEDGER_ID:
                continue
            # 2. ledger size
            txn[AUDIT_TXN_LEDGERS_SIZE][lid] = ledger.uncommitted_size

            # 3. ledger root (either root_hash or seq_no to last changed)
            # TODO: support setting for multiple ledgers
            self.__fill_ledger_root_hash(txn, three_pc_batch, lid, last_audit_txn)

        # 4. state root hash
        txn[AUDIT_TXN_STATE_ROOT][three_pc_batch.ledger_id] = Ledger.hashToStr(three_pc_batch.state_root)

        # 5. set primaries field
        self.__fill_primaries(txn, three_pc_batch, last_audit_txn)

        return txn
Exemplo n.º 31
0
    def __fill_ledger_root_hash(self, txn, three_pc_batch, lid,
                                last_audit_txn):
        target_ledger_id = three_pc_batch.ledger_id
        last_audit_txn_data = get_payload_data(
            last_audit_txn) if last_audit_txn is not None else None

        # 1. ledger is changed in this batch => root_hash
        if lid == target_ledger_id:
            txn[AUDIT_TXN_LEDGER_ROOT][str(lid)] = Ledger.hashToStr(
                three_pc_batch.txn_root)

        # 2. This ledger is never audited, so do not add the key
        elif last_audit_txn_data is None or str(
                lid) not in last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT]:
            return

        # 3. ledger is not changed in last batch => the same audit seq no
        elif isinstance(last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT][str(lid)],
                        int):
            txn[AUDIT_TXN_LEDGER_ROOT][str(
                lid)] = last_audit_txn_data[AUDIT_TXN_LEDGER_ROOT][str(lid)]

        # 4. ledger is changed in last batch but not changed now => seq_no of last audit txn
        elif last_audit_txn_data:
            txn[AUDIT_TXN_LEDGER_ROOT][str(lid)] = get_seq_no(last_audit_txn)
Exemplo n.º 32
0
def testRecoverLedgerFromHashStore(hashStore, tconf, tdir):
    cleanup(hashStore)
    tree = CompactMerkleTree(hashStore=hashStore)
    ledger = Ledger(tree=tree, dataDir=tdir)
    for d in range(10):
        ledger.add(str(d).encode())
    updatedTree = ledger.tree
    ledger.stop()

    tree = CompactMerkleTree(hashStore=hashStore)
    restartedLedger = Ledger(tree=tree, dataDir=tdir)
    assert restartedLedger.size == ledger.size
    assert restartedLedger.root_hash == ledger.root_hash
    assert restartedLedger.tree.hashes == updatedTree.hashes
    assert restartedLedger.tree.root_hash == updatedTree.root_hash
    restartedLedger.stop()
Exemplo n.º 33
0
def check_audit_txn(txn, view_no, pp_seq_no, seq_no, txn_time, ledger_id,
                    txn_root, state_root, pool_size, domain_size, config_size,
                    last_domain_seqno, last_pool_seqno, last_config_seqno):
    expectedLedgerRoots = {}
    # we expect deltas here, that is a difference from the current audit ledger txn to
    # the audit txn where the corresponding ledger was updated
    if last_domain_seqno:
        expectedLedgerRoots[1] = seq_no - last_domain_seqno
    if last_pool_seqno:
        expectedLedgerRoots[0] = seq_no - last_pool_seqno
    if last_config_seqno:
        expectedLedgerRoots[2] = seq_no - last_config_seqno
    expectedLedgerRoots[ledger_id] = Ledger.hashToStr(txn_root)

    expected = {
        "reqSignature": {},
        "txn": {
            "data": {
                "ledgerRoot": expectedLedgerRoots,
                "ver": "1",
                "viewNo": view_no,
                "ppSeqNo": pp_seq_no,
                "ledgerSize": {
                    0: pool_size,
                    1: domain_size,
                    2: config_size
                },
                "stateRoot": {
                    ledger_id: Ledger.hashToStr(state_root),
                }
            },
            "metadata": {},
            "protocolVersion": CURRENT_PROTOCOL_VERSION,
            "type": "2",  # AUDIT
        },
        "txnMetadata": {
            "seqNo": seq_no,
            "txnTime": txn_time
        },
        "ver": "1"
    }
    txn = JsonSerializer().serialize(txn)
    expected = JsonSerializer().serialize(expected)
    print(txn)
    print(expected)
    assert expected == txn
Exemplo n.º 34
0
def create_default_ledger(tempdir, init_genesis_txn_file=None):
    genesis_txn_initiator = GenesisTxnInitiatorFromFile(
        tempdir, init_genesis_txn_file) if init_genesis_txn_file else None
    ledger = Ledger(
        CompactMerkleTree(hashStore=FileHashStore(dataDir=tempdir)),
        dataDir=tempdir,
        genesis_txn_initiator=genesis_txn_initiator)
    return ledger
Exemplo n.º 35
0
def tdirWithLedger(tdir):
    tree = CompactMerkleTree()
    ledger = Ledger(CompactMerkleTree(), dataDir=tdir)
    for d in range(3):
        txn = {
            TXN_TYPE: '0',
            TARGET_NYM: base58.b58encode(b'whatever'),
            DATA: {
                NAME: str(d),
                ALIAS: 'test' + str(d),
                SERVICES: [VALIDATOR],
            }
        }
        if d == 1:
            txn[TARGET_NYM] = "invalid===="
        ledger.add(txn)
    return ledger
Exemplo n.º 36
0
def _migrate_ledger(data_directory,
                    old_ledger_file, new_ledger_file,
                    serializer: MappingSerializer = None):
    """
    Test for the directory, open old and new ledger, migrate data, rename directories
    """

    # we should have ChunkedFileStorage implementation of the Ledger
    if not os.path.isdir(os.path.join(data_directory, old_ledger_file)):
        msg = 'Could not find directory {} for migration.'.format(
            old_ledger_file)
        logger.error(msg)
        raise Exception(msg)

    # open the old ledger using the specified serializer
    old_ledger_file_backup = old_ledger_file + "_new"
    old_txn_log_store = ChunkedFileStore(data_directory,
                                         old_ledger_file_backup,
                                         isLineNoKey=True,
                                         storeContentHash=False)
    old_ledger = Ledger(CompactMerkleTree(),
                        dataDir=data_directory,
                        txn_serializer=serializer,
                        hash_serializer=serializer,
                        fileName=old_ledger_file_backup,
                        transactionLogStore=old_txn_log_store)

    # open the new ledger with new serialization
    new_ledger = Ledger(CompactMerkleTree(),
                        dataDir=data_directory,
                        fileName=new_ledger_file)
    logger.info("new size for {}: {}".format(
        old_ledger_file_backup, str(new_ledger.size)))

    # add all txns into the old ledger
    for _, txn in new_ledger.getAllTxn():
        old_ledger.add(txn)
    logger.info("old size for {}: {}".format(
        new_ledger_file, str(old_ledger.size)))

    old_ledger.stop()
    new_ledger.stop()

    # now that everything succeeded, remove the new files and move the old
    # files into place
    shutil.rmtree(
        os.path.join(data_directory, new_ledger_file))
    os.rename(
        os.path.join(data_directory, old_ledger_file_backup),
        os.path.join(data_directory, old_ledger_file))
Exemplo n.º 37
0
def testRecoverLedgerNewFieldsToTxnsAdded(tempdir):
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir, serializer=ledgerSerializer)
    for d in range(10):
        ledger.add({
            "identifier": "i{}".format(d),
            "reqId": d,
            "op": "operation"
        })
    updatedTree = ledger.tree
    ledger.stop()

    newOrderedFields = OrderedDict([("identifier", (str, str)),
                                    ("reqId", (str, int)), ("op", (str, str)),
                                    ("newField", (str, str))])
    newLedgerSerializer = CompactSerializer(newOrderedFields)

    tree = CompactMerkleTree(hashStore=fhs)
    restartedLedger = Ledger(tree=tree,
                             dataDir=tempdir,
                             serializer=newLedgerSerializer)
    assert restartedLedger.size == ledger.size
    assert restartedLedger.root_hash == ledger.root_hash
    assert restartedLedger.tree.hashes == updatedTree.hashes
    assert restartedLedger.tree.root_hash == updatedTree.root_hash
Exemplo n.º 38
0
def _open_old_ledger(data_directory, old_ledger_file,
                     hash_store_name, serializer):
    # open old Ledger with leveldb hash store (to re-init it)
    old_txn_log_store = ChunkedFileStore(data_directory,
                                         old_ledger_file,
                                         isLineNoKey=True,
                                         storeContentHash=False)
    old_ledger = Ledger(CompactMerkleTree(
        hashStore=LevelDbHashStore(
            dataDir=data_directory,
            fileNamePrefix=hash_store_name)),
        dataDir=data_directory,
        txn_serializer=serializer,
        hash_serializer=serializer,
        fileName=old_ledger_file,
        transactionLogStore=old_txn_log_store)

    old_ledger.stop()
Exemplo n.º 39
0
def check_audit_txn(txn, view_no, pp_seq_no, seq_no, txn_time, ledger_id,
                    txn_root, state_root, pool_size, domain_size, config_size,
                    last_domain_seqno, last_pool_seqno, last_config_seqno):
    expectedLedgerRoots = {}
    if last_domain_seqno:
        expectedLedgerRoots["1"] = last_domain_seqno
    if last_pool_seqno:
        expectedLedgerRoots["0"] = last_pool_seqno
    if last_config_seqno:
        expectedLedgerRoots["2"] = last_config_seqno
    expectedLedgerRoots[str(ledger_id)] = Ledger.hashToStr(txn_root)

    expected = {
        "reqSignature": {},
        "txn": {
            "data": {
                "ledgerRoot": expectedLedgerRoots,
                "ver": "1",
                "viewNo": view_no,
                "ppSeqNo": pp_seq_no,
                "ledgerSize": {
                    "0": pool_size,
                    "1": domain_size,
                    "2": config_size
                },
                "stateRoot": {
                    str(ledger_id): Ledger.hashToStr(state_root),
                }
            },
            "metadata": {},
            "protocolVersion": CURRENT_PROTOCOL_VERSION,
            "type": "2",  # AUDIT
        },
        "txnMetadata": {
            "seqNo": seq_no,
            "txnTime": txn_time
        },
        "ver": "1"
    }
    txn = JsonSerializer().serialize(txn)
    expected = JsonSerializer().serialize(expected)
    print(txn)
    print(expected)
    assert expected == txn
Exemplo n.º 40
0
def nodeSetLedger(nodeSet, tdir):
    """
    Overrides the fixture from conftest.py
    """
    for n in nodeSet:
        dirPath = os.path.join(tdir, n.name, "temp")
        if not os.path.exists(dirPath):
            os.makedirs(dirPath)
        n.txnStore = Ledger(CompactMerkleTree(), dirPath)
    yield nodeSet
Exemplo n.º 41
0
def testConsistencyVerificationOnStartupCase1(tempdir):
    """
    One more node was added to nodes file
    """
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir)
    tranzNum = 10
    for d in range(tranzNum):
        ledger.add(str(d).encode())
    ledger.stop()

    # Writing one more node without adding of it to leaf and transaction logs
    badNode = (None, None, ('X' * 32))
    fhs.writeNode(badNode)

    with pytest.raises(ConsistencyVerificationFailed):
        tree = CompactMerkleTree(hashStore=fhs)
        ledger = NoTransactionRecoveryLedger(tree=tree, dataDir=tempdir)
        ledger.recoverTreeFromHashStore()
    ledger.stop()
Exemplo n.º 42
0
def testRecoverLedgerNewFieldsToTxnsAdded(tempdir):
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir, serializer=ledgerSerializer)
    for d in range(10):
        ledger.add({"identifier": "i{}".format(d), "reqId": d, "op": "operation"})
    updatedTree = ledger.tree
    ledger.stop()

    newOrderedFields = OrderedDict([
        ("identifier", (str, str)),
        ("reqId", (str, int)),
        ("op", (str, str)),
        ("newField", (str, str))
    ])
    newLedgerSerializer = CompactSerializer(newOrderedFields)

    tree = CompactMerkleTree(hashStore=fhs)
    restartedLedger = Ledger(tree=tree, dataDir=tempdir, serializer=newLedgerSerializer)
    assert restartedLedger.size == ledger.size
    assert restartedLedger.root_hash == ledger.root_hash
    assert restartedLedger.tree.hashes == updatedTree.hashes
    assert restartedLedger.tree.root_hash == updatedTree.root_hash
Exemplo n.º 43
0
def testConsistencyVerificationOnStartupCase2(tempdir):
    """
    One more transaction added to transactions file
    """
    fhs = FileHashStore(tempdir)
    tree = CompactMerkleTree(hashStore=fhs)
    ledger = Ledger(tree=tree, dataDir=tempdir)
    tranzNum = 10
    for d in range(tranzNum):
        ledger.add(str(d).encode())

    # Adding one more entry to transaction log without adding it to merkle tree
    badData = 'X' * 32
    value = ledger.leafSerializer.serialize(badData, toBytes=False)
    key = str(tranzNum + 1)
    ledger._transactionLog.put(key=key, value=value)

    ledger.stop()

    with pytest.raises(ConsistencyVerificationFailed):
        tree = CompactMerkleTree(hashStore=fhs)
        ledger = NoTransactionRecoveryLedger(tree=tree, dataDir=tempdir)
        ledger.recoverTreeFromHashStore()
    ledger.stop()
Exemplo n.º 44
0
def createGenesisTxnFile(genesisTxns, targetDir, fileName, fieldOrdering,
                         reset=True):
    ledger = Ledger(CompactMerkleTree(), dataDir=targetDir,
                    serializer=CompactSerializer(fields=fieldOrdering),
                    fileName=fileName)

    if reset:
        ledger.reset()

    reqIds = {}
    for txn in genesisTxns:
        identifier = txn.get(f.IDENTIFIER.nm, "")
        if identifier not in reqIds:
            reqIds[identifier] = 0
        reqIds[identifier] += 1
        txn.update({
            f.REQ_ID.nm: reqIds[identifier],
            f.IDENTIFIER.nm: identifier
        })
        ledger.add(txn)
    ledger.stop()
Exemplo n.º 45
0
 def _processCatchupReplies(self, ledgerType, ledger: Ledger,
                            catchUpReplies: List):
     # Removing transactions for sequence numbers are already
     # present in the ledger
     numProcessed = sum(1 for s, _ in catchUpReplies if s <= ledger.size)
     catchUpReplies = catchUpReplies[numProcessed:]
     if numProcessed:
         logger.debug("{} found {} already processed transactions in the "
                      "catchup replies".format(self, numProcessed))
     if catchUpReplies:
         seqNo = catchUpReplies[0][0]
         if seqNo - ledger.seqNo == 1:
             result, nodeName, toBeProcessed = self.hasValidCatchupReplies(
                 ledgerType, ledger, seqNo, catchUpReplies)
             if result:
                 for _, txn in catchUpReplies[:toBeProcessed]:
                     merkleInfo = ledger.add(txn)
                     txn[F.seqNo.name] = merkleInfo[F.seqNo.name]
                     self.ledgers[ledgerType]["postTxnAddedToLedgerClbk"](
                         ledgerType, txn)
                 self._removePrcdCatchupReply(ledgerType, nodeName, seqNo)
                 return numProcessed + toBeProcessed + \
                     self._processCatchupReplies(ledgerType, ledger,
                                             catchUpReplies[toBeProcessed:])
             else:
                 if self.ownedByNode:
                     self.owner.blacklistNode(nodeName,
                                              reason="Sent transactions "
                                                     "that could not be "
                                                     "verified")
                     self._removePrcdCatchupReply(ledgerType, nodeName,
                                                  seqNo)
                     # Invalid transactions have to be discarded so letting
                     # the caller know how many txns have to removed from
                     # `self.receivedCatchUpReplies`
                     return numProcessed + toBeProcessed
     return numProcessed
Exemplo n.º 46
0
def ledger(tempdir):
    ledger = Ledger(CompactMerkleTree(hashStore=FileHashStore(dataDir=tempdir)),
                    dataDir=tempdir, serializer=ledgerSerializer)
    ledger.reset()
    return ledger
Exemplo n.º 47
0
    def bootstrapTestNodesCore(baseDir,
                           poolTransactionsFile,
                           domainTransactionsFile,
                           domainTxnFieldOrder,
                           ips, nodeCount, clientCount,
                           nodeNum, startingPort):
        if not ips:
            ips = ['127.0.0.1'] * nodeCount
        else:
            ips = ips.split(",")
            if len(ips) != nodeCount:
                if len(ips) > nodeCount:
                    ips = ips[:nodeCount]
                else:
                    ips = ips + ['127.0.0.1'] * (nodeCount - len(ips))

        poolLedger = Ledger(CompactMerkleTree(),
                            dataDir=baseDir,
                            fileName=poolTransactionsFile)
        poolLedger.reset()

        domainLedger = Ledger(CompactMerkleTree(),
                              serializer=CompactSerializer(fields=
                                                           domainTxnFieldOrder),
                              dataDir=baseDir,
                              fileName=domainTransactionsFile)
        domainLedger.reset()

        steward1Nym = None
        for num in range(1, nodeCount + 1):
            stewardName = "Steward" + str(num)
            sigseed = TestNetworkSetup.getSigningSeed(stewardName)
            verkey = Signer(sigseed).verhex
            stewardNym = TestNetworkSetup.getNymFromVerkey(verkey)
            txn = {
                TARGET_NYM: stewardNym,
                TXN_TYPE: NYM,
                ROLE: STEWARD,
                ALIAS: stewardName,
                TXN_ID: sha256(stewardName.encode()).hexdigest()
            }
            if num == 1:
                steward1Nym = stewardNym
            else:
                # The first steward adds every steward
                txn[f.IDENTIFIER.nm] = steward1Nym
            domainLedger.add(txn)

            nodeName = "Node" + str(num)
            nodePort, clientPort = startingPort + (num * 2 - 1), startingPort \
                                   + (num * 2)
            ip = ips[num - 1]
            sigseed = TestNetworkSetup.getSigningSeed(nodeName)
            if nodeNum == num:
                _, verkey = initLocalKeep(nodeName, baseDir, sigseed, True)
                verkey = verkey.encode()
                print("This node with name {} will use ports {} and {} for "
                      "nodestack and clientstack respectively"
                      .format(nodeName, nodePort, clientPort))
            else:
                verkey = Signer(sigseed).verhex
            txn = {
                TARGET_NYM: TestNetworkSetup.getNymFromVerkey(verkey),
                TXN_TYPE: NEW_NODE,
                f.IDENTIFIER.nm: stewardNym,
                DATA: {
                    CLIENT_IP: ip,
                    ALIAS: nodeName,
                    CLIENT_PORT: clientPort,
                    NODE_IP: ip,
                    NODE_PORT: nodePort
                },
                TXN_ID: sha256(nodeName.encode()).hexdigest()
            }
            poolLedger.add(txn)

        for num in range(1, clientCount + 1):
            clientName = "Client" + str(num)
            sigseed = TestNetworkSetup.getSigningSeed(clientName)
            verkey = Signer(sigseed).verhex
            txn = {
                f.IDENTIFIER.nm: steward1Nym,
                TARGET_NYM: TestNetworkSetup.getNymFromVerkey(verkey),
                TXN_TYPE: NYM,
                ALIAS: clientName,
                TXN_ID: sha256(clientName.encode()).hexdigest()
            }
            domainLedger.add(txn)

        poolLedger.stop()
        domainLedger.stop()
Exemplo n.º 48
0
def testRecoverMerkleTreeFromLedger(tempdir):
    ledger2 = Ledger(CompactMerkleTree(), dataDir=tempdir,
                     serializer=ledgerSerializer)
    assert ledger2.tree.root_hash is not None
    ledger2.reset()
    ledger2.stop()