Пример #1
0
def create_ledger_chunked_file_storage(txn_serializer,
                                       hash_serializer,
                                       tempdir,
                                       init_genesis_txn_file=None):
    chunk_creator = None
    db_name = 'transactions'
    if isinstance(txn_serializer, MsgPackSerializer):
        # TODO: fix chunk_creator support
        def chunk_creator(name):
            return BinarySerializerBasedFileStore(txn_serializer,
                                                  os.path.join(
                                                      tempdir, db_name),
                                                  name,
                                                  isLineNoKey=True,
                                                  storeContentHash=False,
                                                  ensureDurability=False)

    store = ChunkedFileStore(tempdir,
                             db_name,
                             isLineNoKey=True,
                             chunkSize=5,
                             chunk_creator=chunk_creator,
                             storeContentHash=False,
                             ensureDurability=False)
    return __create_ledger(store, txn_serializer, hash_serializer, tempdir,
                           init_genesis_txn_file)
Пример #2
0
def test_chunked_and_text_store_equality(tmpdir):
    """
    This test verifies that TextFileStore and ChunkedFileStore behave equally
    """
    isLineNoKey = True
    storeContentHash = False
    ensureDurability = True
    dbDir = str(tmpdir)

    chunkSize = 4
    chunkedStore = ChunkedFileStore(dbDir=dbDir,
                                    dbName="chunked_data",
                                    isLineNoKey=isLineNoKey,
                                    storeContentHash=storeContentHash,
                                    chunkSize=chunkSize,
                                    ensureDurability=ensureDurability)

    textStore = TextFileStore(dbDir=dbDir,
                              dbName="text_data",
                              isLineNoKey=isLineNoKey,
                              storeContentHash=storeContentHash,
                              ensureDurability=ensureDurability)

    for i in range(1, 5 * chunkSize):
        value = "Some data {}".format(str(i))
        chunkedStore.put(None, value)
        textStore.put(None, value)
        assert textStore.get(str(i))
        assert textStore.get(str(i)) == chunkedStore.get(str(i))

    assert list(chunkedStore.iterator()) == \
        list(textStore.iterator())
Пример #3
0
def __migrate_ledger(data_directory,
                     old_ledger_file, new_ledger_file,
                     serializer: MappingSerializer = None):
    """
    Test for the directory, open old and new ledger, migrate data, rename directories
    """

    # we should have ChunkedFileStorage implementation of the Ledger
    if not os.path.isdir(os.path.join(data_directory, old_ledger_file)):
        msg = 'Could not find directory {} for migration.'.format(
            old_ledger_file)
        logger.error(msg)
        raise Exception(msg)

    # open the old ledger using the specified serializer
    old_ledger_file_backup = old_ledger_file + "_new"
    old_txn_log_store = ChunkedFileStore(data_directory,
                                         old_ledger_file_backup,
                                         isLineNoKey=True,
                                         storeContentHash=False)
    old_ledger = Ledger(CompactMerkleTree(),
                        dataDir=data_directory,
                        txn_serializer=serializer,
                        hash_serializer=serializer,
                        fileName=old_ledger_file_backup,
                        transactionLogStore=old_txn_log_store)

    # open the new ledger with new serialization
    new_ledger = Ledger(CompactMerkleTree(),
                        dataDir=data_directory,
                        fileName=new_ledger_file)
    logger.info("new size for {}: {}".format(
        old_ledger_file_backup, str(new_ledger.size)))

    # add all txns into the old ledger
    for _, txn in new_ledger.getAllTxn():
        old_ledger.add(txn)
    logger.info("old size for {}: {}".format(
        new_ledger_file, str(old_ledger.size)))

    old_ledger.stop()
    new_ledger.stop()

    # now that everything succeeded, remove the new files and move the old
    # files into place
    shutil.rmtree(
        os.path.join(data_directory, new_ledger_file))
    os.rename(
        os.path.join(data_directory, old_ledger_file_backup),
        os.path.join(data_directory, old_ledger_file))
def _open_old_ledger(data_directory, old_ledger_file, hash_store_name,
                     serializer):
    # open old Ledger with leveldb hash store (to re-init it)
    old_txn_log_store = ChunkedFileStore(data_directory,
                                         old_ledger_file,
                                         isLineNoKey=True,
                                         storeContentHash=False)
    old_ledger = Ledger(CompactMerkleTree(hashStore=LevelDbHashStore(
        dataDir=data_directory, fileNamePrefix=hash_store_name)),
                        dataDir=data_directory,
                        txn_serializer=serializer,
                        hash_serializer=serializer,
                        fileName=old_ledger_file,
                        transactionLogStore=old_txn_log_store)

    old_ledger.stop()
Пример #5
0
def initKeyValueStorage(keyValueType, dataLocation, keyValueStorageName,
                        open=True, read_only=False, db_config=None, txn_serializer=None) -> KeyValueStorage:
    from storage.kv_store_leveldb import KeyValueStorageLeveldb
    from storage.kv_store_rocksdb import KeyValueStorageRocksdb

    if keyValueType == KeyValueStorageType.Leveldb:
        return KeyValueStorageLeveldb(dataLocation, keyValueStorageName, open,
                                      read_only)

    if keyValueType == KeyValueStorageType.Rocksdb:
        return KeyValueStorageRocksdb(dataLocation, keyValueStorageName, open,
                                      read_only, db_config)

    if keyValueType == KeyValueStorageType.Memory:
        return KeyValueStorageInMemory()

    if keyValueType == KeyValueStorageType.ChunkedBinaryFile:
        def chunk_creator(name):
            return BinarySerializerBasedFileStore(txn_serializer,
                                                  os.path.join(dataLocation, keyValueStorageName),
                                                  name,
                                                  isLineNoKey=True,
                                                  storeContentHash=False,
                                                  ensureDurability=False)
        return ChunkedFileStore(dataLocation,
                                keyValueStorageName,
                                isLineNoKey=True,
                                chunkSize=5,
                                chunk_creator=chunk_creator,
                                storeContentHash=False,
                                ensureDurability=False)

    if keyValueType == KeyValueStorageType.BinaryFile:
        return BinaryFileStore(dataLocation, keyValueStorageName,
                               delimiter=b'\0x8b\0xad\0xf0\0x0d\0x8b\0xad\0xf0\0x0d',
                               lineSep=b'\0xde\0xad\0xbe\0xef\0xde\0xad\0xbe\0xef',
                               storeContentHash=False)

    raise KeyValueStorageConfigNotFound
Пример #6
0
def chunkedTextFileStore(tempdir) -> ChunkedFileStore:
    return ChunkedFileStore(tempdir, "chunked_data", True, True, chunkSize)