コード例 #1
0
ファイル: conftest.py プロジェクト: skhoroshavin/token-plugin
def db_manager(tconf):
    _db_manager = DatabaseManager()
    storage = initKeyValueStorage(
        KeyValueStorageType.Memory,
        None,
        "tokenInMemoryStore",
        txn_serializer=serialization.multi_sig_store_serializer)
    ledger = get_fake_ledger()

    def commit_txns(count):
        ledger.committed_root_hash = ledger.uncommitted_root_hash
        return None, [1]

    ledger.commitTxns = commit_txns
    ledger.root_hash = txn_root_serializer.serialize("1")
    ledger.uncommitted_root_hash = "1"
    ledger.uncommitted_size = 1
    ledger.size = 0
    ledger.discardTxns = lambda x: None
    ledger.committed_root_hash = "-1"
    ledger.append_txns_metadata = lambda txns, txn_time: [
        append_txn_metadata(txn, 2, txn_time, 2) for txn in txns
    ]
    ledger.appendTxns = lambda x: (None, x)
    _db_manager.register_new_database(TOKEN_LEDGER_ID, ledger,
                                      PruningState(storage))
    return _db_manager
コード例 #2
0
 def loadState(self):
     return PruningState(
         initKeyValueStorage(
             self.config.poolStateStorage,
             self.node.dataLocation,
             self.config.poolStateDbName)
     )
コード例 #3
0
def req_ids_to_txn(tconf):
    dataLocation = tconf.GENERAL_CONFIG_DIR + "/req_id_to_txn"
    if not os.path.isdir(dataLocation):
        os.makedirs(dataLocation)
    return ReqIdrToTxn(
        initKeyValueStorage(tconf.reqIdToTxnStorage, dataLocation,
                            tconf.seqNoDbName))
コード例 #4
0
ファイル: node.py プロジェクト: dougives/indy-node
 def loadAttributeStore(self):
     return AttributeStore(
         initKeyValueStorage(
             self.config.attrStorage,
             self.dataLocation,
             self.config.attrDbName)
     )
コード例 #5
0
ファイル: node.py プロジェクト: shsedghi/indy-node
 def loadAttributeStore(self):
     return AttributeStore(
         initKeyValueStorage(
             self.config.attrStorage,
             self.dataLocation,
             self.config.attrDbName)
     )
コード例 #6
0
def storage(request, tdir) -> KeyValueStorage:
    global db_no
    db = initKeyValueStorage(request.param, tdir,
                             'metrics_db_{}'.format(db_no))
    db_no += 1
    yield db
    db.close()
コード例 #7
0
 def __init__(self,
              key_value_type,
              data_location,
              key_value_storage_name,
              serializer=None):
     self._kvs = initKeyValueStorage(key_value_type, data_location,
                                     key_value_storage_name)
     self._serializer = serializer or multi_sig_store_serializer
コード例 #8
0
ファイル: node.py プロジェクト: dougives/indy-node
 def getIdrCache(self):
     if self.idrCache is None:
         self.idrCache = IdrCache(self.name,
                                  initKeyValueStorage(self.config.idrCacheStorage,
                                                      self.dataLocation,
                                                      self.config.idrCacheDbName)
                                  )
     return self.idrCache
コード例 #9
0
 def getIdrCache(self):
     if self.idrCache is None:
         self.idrCache = IdrCache(
             self.name,
             initKeyValueStorage(self.config.idrCacheStorage,
                                 self.dataLocation,
                                 self.config.idrCacheDbName))
     return self.idrCache
コード例 #10
0
ファイル: conftest.py プロジェクト: skhoroshavin/token-plugin
def utxo_cache(db_manager):
    cache = UTXOCache(
        initKeyValueStorage(KeyValueStorageType.Memory, None,
                            "utxoInMemoryStore"))
    db_manager.register_new_store(UTXO_CACHE_LABEL, cache)
    yield cache
    if cache.un_committed:
        cache.reject_batch()
コード例 #11
0
ファイル: node_bootstrap.py プロジェクト: ken-ebert/indy-node
 def init_idr_cache_storage(self):
     idr_cache = IdrCache(self.node.name,
                          initKeyValueStorage(self.node.config.idrCacheStorage,
                                              self.node.dataLocation,
                                              self.node.config.idrCacheDbName,
                                              db_config=self.node.config.db_idr_cache_db_config)
                          )
     self.node.db_manager.register_new_store(IDR_CACHE_LABEL, idr_cache)
コード例 #12
0
ファイル: node_bootstrap.py プロジェクト: ken-ebert/indy-node
 def init_attribute_store(self):
     return AttributeStore(
         initKeyValueStorage(
             self.node.config.attrStorage,
             self.node.dataLocation,
             self.node.config.attrDbName,
             db_config=self.node.config.db_attr_db_config)
     )
コード例 #13
0
def idr_cache(tconf, tdir):
    name = 'name'
    idr_cache = IdrCache(name,
                         initKeyValueStorage(KeyValueStorageType.Rocksdb,
                                             tdir,
                                             tconf.idrCacheDbName,
                                             db_config=tconf.db_idr_cache_db_config))
    return idr_cache
コード例 #14
0
 def init_config_state(self):
     return PruningState(
         initKeyValueStorage(
             self.node.config.configStateStorage,
             self.node.dataLocation,
             self.node.config.configStateDbName,
             db_config=self.node.config.db_state_config)
     )
コード例 #15
0
ファイル: conftest.py プロジェクト: skhoroshavin/token-plugin
def db_manager_with_config(db_manager, utxo_cache):
    storage = initKeyValueStorage(KeyValueStorageType.Memory,
                                  None,
                                  "configInMemoryStore",
                                  txn_serializer=in_memory_serializer)
    ledger = get_fake_ledger()
    db_manager.register_new_database(CONFIG_LEDGER_ID, ledger,
                                     PruningState(storage))
    return db_manager
コード例 #16
0
def req_ids_to_txn(tconf):
    dataLocation = tconf.GENERAL_CONFIG_DIR + "/req_id_to_txn"
    if not os.path.isdir(dataLocation):
        os.makedirs(dataLocation)
    return ReqIdrToTxn(
        initKeyValueStorage(
            tconf.reqIdToTxnStorage,
            dataLocation,
            tconf.seqNoDbName)
    )
コード例 #17
0
def node_status_db(tconf):
    data_location = tconf.GENERAL_CONFIG_DIR + "/node_status_db"
    if not os.path.isdir(data_location):
        os.makedirs(data_location)
    node_status_db = initKeyValueStorage(
        tconf.nodeStatusStorage,
        data_location,
        tconf.nodeStatusDbName,
        db_config=tconf.db_node_status_db_config)
    yield node_status_db
    node_status_db.drop()
コード例 #18
0
 def _create_state(self, name: str) -> PruningState:
     storage_name = getattr(self.config, "{}StateStorage".format(name))
     db_name = getattr(self.config, "{}StateDbName".format(name))
     if self.data_location is not None:
         return PruningState(
             initKeyValueStorage(storage_name,
                                 self.data_location,
                                 db_name,
                                 db_config=self.config.db_state_config))
     else:
         return PruningState(KeyValueStorageInMemory())
コード例 #19
0
ファイル: bls_store.py プロジェクト: michaeldboyd/indy-plenum
 def __init__(self,
              key_value_type,
              data_location,
              key_value_storage_name,
              serializer=None,
              db_config=None):
     self._kvs = initKeyValueStorage(key_value_type,
                                     data_location,
                                     key_value_storage_name,
                                     db_config=db_config)
     self._serializer = serializer or multi_sig_store_serializer
コード例 #20
0
def test_kv_store_metrics_config(looper, txnPoolNodeSet, tdir, tconf,
                                 sdk_pool_handle, sdk_wallet_client):
    sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                              sdk_wallet_client, 15)

    for node in txnPoolNodeSet:
        storage = initKeyValueStorage(tconf.METRICS_KV_STORAGE,
                                      node.dataLocation,
                                      tconf.METRICS_KV_DB_NAME,
                                      read_only=True)

        check_metrics_data(storage)
コード例 #21
0
ファイル: conftest.py プロジェクト: louijose/indy-Dev
def db_manager(tconf, tdir):
    db_manager = DatabaseManager()
    name = 'name'
    idr_cache = IdrCache(
        name,
        initKeyValueStorage(KeyValueStorageType.Rocksdb,
                            tdir,
                            tconf.idrCacheDbName,
                            db_config=tconf.db_idr_cache_db_config))
    db_manager.register_new_store('idr', idr_cache)
    db_manager.register_new_database(DOMAIN_LEDGER_ID, get_fake_ledger(),
                                     State())
    return db_manager
コード例 #22
0
def test_kv_store_metrics_config(looper, txnPoolNodeSet, tdir, tconf, sdk_pool_handle, sdk_wallet_client):
    total_time = 1.5 * tconf.PerfCheckFreq
    total_iters = 5
    iter_time = total_time / total_iters

    for _ in range(total_iters):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_client, 15)
        looper.runFor(iter_time)

    for node in txnPoolNodeSet:
        storage = initKeyValueStorage(tconf.METRICS_KV_STORAGE,
                                      node.dataLocation,
                                      tconf.METRICS_KV_DB_NAME,
                                      read_only=True)

        check_metrics_data(storage)
コード例 #23
0
def db_manager(tconf, tdir):
    db_manager = DatabaseManager()

    state = State()
    state.txn_list = {}
    state.get = lambda key, isCommitted=True: state.txn_list.get(key, None)
    state.set = lambda key, value: state.txn_list.update({key: value})

    name = 'name'
    idr_cache = IdrCache(name,
                         initKeyValueStorage(KeyValueStorageType.Rocksdb,
                                             tdir,
                                             tconf.idrCacheDbName,
                                             db_config=tconf.db_idr_cache_db_config))
    db_manager.register_new_store(IDR_CACHE_LABEL, idr_cache)
    db_manager.register_new_database(DOMAIN_LEDGER_ID, get_fake_ledger(), state)
    return db_manager
コード例 #24
0
def init_storages(node):
    # Token ledger and state init
    if TOKEN_LEDGER_ID not in node.ledger_ids:
        node.ledger_ids.append(TOKEN_LEDGER_ID)
    token_state = init_token_state(node)
    token_ledger = init_token_ledger(node)
    node.db_manager.register_new_database(TOKEN_LEDGER_ID, token_ledger,
                                          token_state)
    init_token_database(node)

    # UTXO store init
    node.db_manager.register_new_store(
        UTXO_CACHE_LABEL,
        UTXOCache(
            initKeyValueStorage(node.config.utxoCacheStorage,
                                node.dataLocation,
                                node.config.utxoCacheDbName)))
コード例 #25
0
def get_utxo_cache(data_dir, name, config):
    return UTXOCache(initKeyValueStorage(
        config.utxoCacheStorage, data_dir, name))
コード例 #26
0
ファイル: main.py プロジェクト: ArtObr/indy-scp
def init_contract_state(node):
    return PruningState(
        initKeyValueStorage(node.config.contractStateStorage,
                            node.dataLocation,
                            node.config.contractStateDbName,
                            db_config=node.config.db_state_config))
コード例 #27
0
def test_kv_store_metrics_config(looper, txnPoolNodeSet, tdir, tconf,
                                 sdk_pool_handle, sdk_wallet_client):
    total_time = 1.5 * tconf.PerfCheckFreq
    total_iters = 5
    iter_time = total_time / total_iters

    for _ in range(total_iters):
        sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
                                  sdk_wallet_client, 15)
        looper.runFor(iter_time)

    for node in txnPoolNodeSet:
        storage = initKeyValueStorage(tconf.METRICS_KV_STORAGE,
                                      node.dataLocation,
                                      tconf.METRICS_KV_DB_NAME,
                                      read_only=True)

        events = [
            KvStoreMetricsFormat.decode(k, v) for k, v in storage.iterator()
        ]

        # Check that metrics are actually written
        assert len(events) > 0

        # Check that all events are stored in correct order
        assert sorted(events, key=lambda v: v.timestamp) == events

        # We don't expect some events in this test
        unexpected_events = {
            MetricsName.CATCHUP_TXNS_SENT,
            MetricsName.CATCHUP_TXNS_RECEIVED,
            MetricsName.GC_GEN2_TIME,
            MetricsName.GC_UNCOLLECTABLE_OBJECTS,
            MetricsName.GC_GEN2_COLLECTED_OBJECTS,
            MetricsName.PROCESS_CHECKPOINT_TIME,
            MetricsName.SEND_CHECKPOINT_TIME,
            MetricsName.BACKUP_PROCESS_CHECKPOINT_TIME,
            MetricsName.BACKUP_SEND_CHECKPOINT_TIME,
            MetricsName.PROCESS_CONSISTENCY_PROOF_TIME,
            MetricsName.PROCESS_CATCHUP_REQ_TIME,
            MetricsName.PROCESS_CATCHUP_REP_TIME,
            MetricsName.NODE_CHECK_NODE_REQUEST_SPIKE,
            MetricsName.NODE_SEND_REJECT_TIME,

            # Obsolete metrics
            MetricsName.DESERIALIZE_DURING_UNPACK_TIME,

            # TODO: reduce monitor window so these events are also captured
            MetricsName.MONITOR_AVG_THROUGHPUT,
            MetricsName.BACKUP_MONITOR_AVG_THROUGHPUT,
            MetricsName.MONITOR_AVG_LATENCY,
            MetricsName.BACKUP_MONITOR_AVG_LATENCY,

            # Temporary metrics
            MetricsName.STORAGE_IDR_CACHE_READERS,
            MetricsName.STORAGE_IDR_CACHE_TABLES_NUM,
            MetricsName.STORAGE_IDR_CACHE_TABLES_SIZE,
            MetricsName.STORAGE_ATTRIBUTE_STORE_READERS,
            MetricsName.STORAGE_ATTRIBUTE_STORE_TABLES_NUM,
            MetricsName.STORAGE_ATTRIBUTE_STORE_TABLES_SIZE
        }

        # Don't expect some metrics from master primary
        if node.master_replica.isPrimary:
            unexpected_events.add(MetricsName.PROCESS_PREPREPARE_TIME)
            unexpected_events.add(MetricsName.SEND_PREPARE_TIME)
        else:
            unexpected_events.add(MetricsName.SEND_PREPREPARE_TIME)
            unexpected_events.add(MetricsName.CREATE_3PC_BATCH_TIME)
            unexpected_events.add(MetricsName.BLS_UPDATE_PREPREPARE_TIME)

        # Don't expect some metrics from backup primary
        assert node.replicas.num_replicas == 2
        if node.replicas[1].isPrimary:
            unexpected_events.add(MetricsName.BACKUP_PROCESS_PREPREPARE_TIME)
            unexpected_events.add(MetricsName.BACKUP_SEND_PREPARE_TIME)
        else:
            unexpected_events.add(MetricsName.BACKUP_SEND_PREPREPARE_TIME)
            unexpected_events.add(MetricsName.BACKUP_CREATE_3PC_BATCH_TIME)
            unexpected_events.add(MetricsName.BLS_UPDATE_PREPREPARE_TIME)

        if not node.primaryDecider:
            unexpected_events.add(MetricsName.PRIMARY_DECIDER_ACTION_QUEUE)
            unexpected_events.add(MetricsName.PRIMARY_DECIDER_AQ_STASH)
            unexpected_events.add(
                MetricsName.PRIMARY_DECIDER_REPEATING_ACTIONS)
            unexpected_events.add(MetricsName.PRIMARY_DECIDER_SCHEDULED)
            unexpected_events.add(MetricsName.PRIMARY_DECIDER_INBOX)
            unexpected_events.add(MetricsName.PRIMARY_DECIDER_OUTBOX)

        # Check that all event types happened during test
        metric_names = {ev.name for ev in events}
        for t in MetricsName:
            if t in unexpected_events or t > TMP_METRIC:
                continue
            assert t in metric_names
コード例 #28
0
def get_graphchain_state(data_dir, name, config):
    logger.info("Creating LEI state with name '{}' in the '{}' dir and storage type equal to '{}'."
                .format(name, data_dir, config.graphchainStateStorage))

    return PruningState(initKeyValueStorage(config.graphchainStateStorage, data_dir, name))
コード例 #29
0
ファイル: conftest.py プロジェクト: michaeldboyd/indy-plenum
def storage(request, tdir) -> KeyValueStorage:
    global db_no
    db = initKeyValueStorage(request.param, tdir, 'metrics_db_{}'.format(db_no))
    db_no += 1
    yield db
    db.close()
コード例 #30
0
def migrate_txn_log(db_dir, db_name):
    def put_into_seq_no_db(txn):
        # If there is no reqId, then it's genesis txn
        if get_req_id(txn) is None:
            return
        txn_new = copy.deepcopy(txn)
        operation = get_payload_data(txn_new)
        operation[TXN_TYPE] = get_type(txn_new)
        dct = {
            f.IDENTIFIER.nm: get_from(txn_new),
            f.REQ_ID.nm: get_req_id(txn_new),
            OPERATION: operation,
        }
        if get_protocol_version(txn_new) is not None:
            dct[f.PROTOCOL_VERSION.nm] = get_protocol_version(txn_new)
        digest = sha256(serialize_msg_for_signing(dct)).hexdigest().encode()
        seq_no = get_seq_no(txn_new)
        ledger_id = get_ledger_id_by_txn_type(operation[TXN_TYPE])
        line_to_record = str(ledger_id) + ReqIdrToTxn.delimiter + str(seq_no)
        dest_seq_no_db_storage.put(digest, line_to_record)
        return digest

    new_db_name = db_name + '_new'
    old_path = os.path.join(db_dir, db_name)
    new_path = os.path.join(db_dir, new_db_name)
    new_seqno_db_name = config.seqNoDbName + '_new'
    try:
        dest_seq_no_db_storage = initKeyValueStorage(config.reqIdToTxnStorage,
                                                     db_dir, new_seqno_db_name)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not open new seq_no_db storage")
        return False

    # open new and old ledgers
    try:
        src_storage = KeyValueStorageRocksdbIntKeys(db_dir,
                                                    db_name,
                                                    read_only=True)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not open old ledger: {}".format(
            os.path.join(db_dir, db_name)))
        return False

    try:
        dest_storage = KeyValueStorageRocksdbIntKeys(db_dir, new_db_name)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not open new ledger: {}".format(
            os.path.join(db_dir, new_db_name)))
        return False

    # put values from old ledger to the new one
    try:
        for key, val in src_storage.iterator():
            key = key.decode()
            val = ledger_txn_serializer.deserialize(val)
            new_val = transform_to_new_format(txn=val, seq_no=int(key))
            digest = put_into_seq_no_db(new_val)
            # add digest into txn
            if get_req_id(new_val):
                new_val[TXN_PAYLOAD][TXN_PAYLOAD_METADATA][
                    TXN_PAYLOAD_METADATA_DIGEST] = digest
            new_val = ledger_txn_serializer.serialize(new_val)
            dest_storage.put(key, new_val)

    except Exception:
        logger.error(traceback.print_exc())
        logger.error(
            "Could not put key/value to the new ledger '{}'".format(db_name))
        return False

    src_storage.close()
    dest_storage.close()
    dest_seq_no_db_storage.close()

    # Remove old ledger
    try:
        shutil.rmtree(old_path)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not remove old ledger: {}".format(old_path))
        return False

    # Rename new ledger to old one
    try:
        shutil.move(new_path, old_path)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error(
            "Could not rename temporary new ledger from '{}' to '{}'".format(
                new_path, old_path))
        return False
    try:
        set_own_perm("indy", old_path)
    except Exception:
        pass

    return True
コード例 #31
0
ファイル: storage.py プロジェクト: zmh0531/indy-plenum
def get_auction_state(data_dir, name, config):
    return PruningState(initKeyValueStorage(
        config.auctionStateStorage, data_dir, name))
コード例 #32
0
def migrate_txn_log(db_dir, db_name):

    def put_into_seq_no_db(txn):
        # If there is no reqId, then it's genesis txn
        if get_req_id(txn) is None:
            return
        txn_new = copy.deepcopy(txn)
        operation = get_payload_data(txn_new)
        operation[TXN_TYPE] = get_type(txn_new)
        dct = {
            f.IDENTIFIER.nm: get_from(txn_new),
            f.REQ_ID.nm: get_req_id(txn_new),
            OPERATION: operation,
        }
        if get_protocol_version(txn_new) is not None:
            dct[f.PROTOCOL_VERSION.nm] = get_protocol_version(txn_new)
        digest = sha256(serialize_msg_for_signing(dct)).hexdigest()
        seq_no = get_seq_no(txn_new)
        ledger_id = get_ledger_id_by_txn_type(operation[TXN_TYPE])
        line_to_record = str(ledger_id) + ReqIdrToTxn.delimiter + str(seq_no)
        dest_seq_no_db_storage.put(digest, line_to_record)
        return digest

    new_db_name = db_name + '_new'
    old_path = os.path.join(db_dir, db_name)
    new_path = os.path.join(db_dir, new_db_name)
    new_seqno_db_name = config.seqNoDbName + '_new'
    try:
        dest_seq_no_db_storage = initKeyValueStorage(config.reqIdToTxnStorage,
                                                     db_dir,
                                                     new_seqno_db_name)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not open new seq_no_db storage")
        return False

    # open new and old ledgers
    try:
        src_storage = KeyValueStorageRocksdbIntKeys(db_dir, db_name, read_only=True)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not open old ledger: {}".format(os.path.join(db_dir, db_name)))
        return False

    try:
        dest_storage = KeyValueStorageRocksdbIntKeys(db_dir, new_db_name)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not open new ledger: {}".format(os.path.join(db_dir, new_db_name)))
        return False

    # put values from old ledger to the new one
    try:
        for key, val in src_storage.iterator():
            key = key.decode()
            val = ledger_txn_serializer.deserialize(val)
            new_val = transform_to_new_format(txn=val, seq_no=int(key))
            digest = put_into_seq_no_db(new_val)
            # add digest into txn
            if get_req_id(new_val):
                new_val[TXN_PAYLOAD][TXN_PAYLOAD_METADATA][TXN_PAYLOAD_METADATA_DIGEST] = digest
            new_val = ledger_txn_serializer.serialize(new_val)
            dest_storage.put(key, new_val)

    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not put key/value to the new ledger '{}'".format(db_name))
        return False

    src_storage.close()
    dest_storage.close()
    dest_seq_no_db_storage.close()

    # Remove old ledger
    try:
        shutil.rmtree(old_path)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not remove old ledger: {}"
                     .format(old_path))
        return False

    # Rename new ledger to old one
    try:
        shutil.move(new_path, old_path)
    except Exception:
        logger.error(traceback.print_exc())
        logger.error("Could not rename temporary new ledger from '{}' to '{}'"
                     .format(new_path, old_path))
        return False
    try:
        set_own_perm("indy", old_path)
    except Exception:
        pass

    return True
コード例 #33
0
def get_token_state(data_dir, name, config):
    return PruningState(initKeyValueStorage(
        config.tokenStateStorage, data_dir, name))