def migrate_genesis_txn(base_dir): for suffix in ('sandbox', 'live', 'local'): old_domain_genesis = os.path.join( base_dir, 'transactions_{}'.format(suffix)) old_pool_genesis = os.path.join( base_dir, 'pool_transactions_{}'.format(suffix)) new_domain_genesis = os.path.join( base_dir, 'domain_transactions_{}_genesis'.format(suffix)) new_pool_genesis = os.path.join( base_dir, 'pool_transactions_{}_genesis'.format(suffix)) if os.path.exists(old_domain_genesis): os.remove(old_domain_genesis) if os.path.exists(old_pool_genesis): os.remove(old_pool_genesis) if os.path.exists(new_domain_genesis): old_ser = CompactSerializer(getTxnOrderedFields()) new_ser = JsonSerializer() with open(new_domain_genesis, 'r') as f1: with open(old_domain_genesis, 'w') as f2: for line in store_utils.cleanLines(f1): txn = new_ser.deserialize(line) txn = old_ser.serialize(txn) f2.write(txn) os.remove(new_domain_genesis) if os.path.exists(new_pool_genesis): os.rename(new_pool_genesis, old_domain_genesis)
def txn_serializer(request): if request.param == 'MsgPack': return MsgPackSerializer() if request.param == 'Json': return JsonSerializer() if request.param == 'Compact': return CompactSerializer(orderedFields)
def migrate_all_hash_stores(node_data_directory): # the new hash store (merkle tree) will be recovered from the new transaction log after re-start # just delete the current hash store new_merkle_nodes = os.path.join(node_data_directory, '_merkleNodes') new_merkle_leaves = os.path.join(node_data_directory, '_merkleLeaves') new_merkle_nodes_bin = os.path.join( node_data_directory, '_merkleNodes.bin') new_merkle_leaves_bin = os.path.join( node_data_directory, '_merkleLeaves.bin') new_merkle_nodes_config_bin = os.path.join( node_data_directory, 'config_merkleNodes.bin') new_merkle_leaves_config_bin = os.path.join( node_data_directory, 'config_merkleLeaves.bin') if os.path.exists(new_merkle_nodes): shutil.rmtree(new_merkle_nodes) if os.path.exists(new_merkle_leaves): shutil.rmtree(new_merkle_leaves) if os.path.exists(new_merkle_nodes_bin): os.remove(new_merkle_nodes_bin) if os.path.exists(new_merkle_leaves_bin): os.remove(new_merkle_leaves_bin) if os.path.exists(new_merkle_nodes_config_bin): os.remove(new_merkle_nodes_config_bin) if os.path.exists(new_merkle_leaves_config_bin): os.remove(new_merkle_leaves_config_bin) # open new Ledgers fields = getTxnOrderedFields() __open_old_ledger(node_data_directory, config.poolTransactionsFile, 'pool', serializer=JsonSerializer()) __open_old_ledger(node_data_directory, config.domainTransactionsFile, 'domain', serializer=CompactSerializer(fields=fields)) __open_old_ledger(node_data_directory, config.configTransactionsFile, 'config', serializer=JsonSerializer())
def testDeserializeSubfields(): fields = OrderedDict([ ("f1.a", (str, str)), ("f1.b", (str, int)), ("f1.c", (str, float)), ("f2.d", (str, str)), ("f2.e", (str, int)), ("f2.f", (str, float)), ]) serializer = CompactSerializer(fields) json = { "f1": {"a": "v1", "b": 2, "c": 3.0}, "f2": {"d": "v1", "e": 3, "f": 4.0}, } assert json == serializer.deserialize(b"v1|2|3.0|v1|3|4.0")
def hash_serializer(request): if request.param == 'MsgPack': return MsgPackSerializer() if request.param == 'Json': return JsonSerializer() if request.param == 'Signing': return SigningSerializer() if request.param == 'Compact': return CompactSerializer(orderedFields)
def test_recover_ledger_new_fields_to_txns_added(tempdir): ledger = create_ledger_text_file_storage(CompactSerializer(orderedFields), None, tempdir) for d in range(5): ledger.add(random_txn(d)) updatedTree = ledger.tree ledger.stop() newOrderedFields = OrderedDict([("identifier", (str, str)), ("reqId", (str, int)), ("op", (str, str)), ("newField", (str, str))]) restartedLedger = create_ledger_text_file_storage( CompactSerializer(newOrderedFields), None, tempdir) assert restartedLedger.size == ledger.size assert restartedLedger.root_hash == ledger.root_hash assert restartedLedger.tree.hashes == updatedTree.hashes assert restartedLedger.tree.root_hash == updatedTree.root_hash
def testDeserializeSubfields(): fields = OrderedDict([ ("f1.a", (str, str)), ("f1.b", (str, int)), ("f1.c", (str, float)), ("f2.d", (str, str)), ("f2.e", (str, int)), ("f2.f", (str, float)), ]) serializer = CompactSerializer(fields) json = { "f1": { "a": "v1", "b": 2, "c": 3.0 }, "f2": { "d": "v1", "e": 3, "f": 4.0 }, } assert json == serializer.deserialize(b"v1|2|3.0|v1|3|4.0")
def migrate_all_ledgers_for_node(node_data_directory): # using default ledger names __migrate_ledger(node_data_directory, config.poolTransactionsFile, config.poolTransactionsFile, serializer=JsonSerializer()) __migrate_ledger( node_data_directory, config.configTransactionsFile, config.configTransactionsFile, serializer=JsonSerializer()) # domain ledger uses custom CompactSerializer and old file name fields = getTxnOrderedFields() __migrate_ledger(node_data_directory, config.domainTransactionsFile.replace( 'domain_', ''), config.domainTransactionsFile, serializer=CompactSerializer(fields=fields))
def testInitCompactSerializerEmptyFileds(): fields = OrderedDict([]) CompactSerializer(fields)
def testInitCompactSerializerNoFileds(): CompactSerializer()
def testInitCompactSerializerWithCorrectFileds(): CompactSerializer(fields)
def serializer(): return CompactSerializer(fields)