def insert_raw_transaction(raw_transaction, db, rawtransactions_db): # one transaction per block block_index, block_hash, block_time = create_next_block(db) cursor = db.cursor() tx_index = block_index - config.BURN_START + 1 tx_hash = hashlib.sha256('{}{}'.format( tx_index, raw_transaction).encode('utf-8')).hexdigest() # print(tx_hash) if pytest.config.option.savescenarios: save_rawtransaction(rawtransactions_db, tx_hash, raw_transaction) source, destination, btc_amount, fee, data = blocks.get_tx_info2( get_proxy(), raw_transaction) transaction = (tx_index, tx_hash, block_index, block_hash, block_time, source, destination, btc_amount, fee, data, True) cursor.execute( '''INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?,?,?,?)''', transaction) tx = list( cursor.execute('''SELECT * FROM transactions WHERE tx_index = ?''', (tx_index, )))[0] cursor.close() blocks.parse_block(db, block_index, block_time) return tx
def test_parse_from_the_start(): logging.info('\n') blocks.initialise(db) for i in range(tx_index): blocks.parse_block(db, i) # Check balances of source_default. output_new['get_balances'] = util.get_balances(db, address=source_default)
def insert_block(db, block_index, parse_block=False): cursor = db.cursor() block_hash = hashlib.sha512(chr(block_index).encode('utf-8')).hexdigest() block_time = block_index * 10000000 block = (block_index, block_hash, block_time, None, None) cursor.execute('''INSERT INTO blocks VALUES (?,?,?,?,?)''', block) cursor.close() if parse_block: blocks.parse_block(db, block_index, block_time) return block_index, block_hash, block_time
def test_order_buy_xcp(): global db, cursor unsigned_tx_hex = order.create(source, 0, small, 1, small * 2, expiration, 0, fee_provided, test=True) assert unsigned_tx_hex == '0100000001c1d8c075936c3495f6d653c50f73d987f75448d97a750249b1eb83bee71b24ae0000000000ffffffff029e07db0b000000001976a9144838d8b3588c4c7ba7c1d06f866e9b3739c6303788ac0000000000000000346a32544553540000000a000000000000000000000000004c4b4000000000000000010000000000989680000a000000000000000000000000' fee = config.MIN_FEE data = get_tx_data(unsigned_tx_hex) tx_insert(source, destination, config.DUST_SIZE, config.MIN_FEE, data) cursor = blocks.parse_block(db, cursor, tx_index - 1)
def test_send(): global db, cursor unsigned_tx_hex = send.create(source, destination, small, 1, test=True) assert unsigned_tx_hex == '0100000001c1d8c075936c3495f6d653c50f73d987f75448d97a750249b1eb83bee71b24ae0000000000ffffffff0336150000000000001976a914edb5c902eadd71e698a8ce05ba1d7b31efbaa57b88ac980dea0b000000001976a9144838d8b3588c4c7ba7c1d06f866e9b3739c6303788ac00000000000000001a6a185445535400000000000000000000000100000000004c4b4000000000' fee = config.MIN_FEE data = get_tx_data(unsigned_tx_hex) tx_insert(source, destination, config.DUST_SIZE, config.MIN_FEE, data) cursor = blocks.parse_block(db, cursor, tx_index - 1)
def test_burn(): global db, cursor unsigned_tx_hex = burn.create(source, quantity, test=True) assert unsigned_tx_hex == '0100000001c1d8c075936c3495f6d653c50f73d987f75448d97a750249b1eb83bee71b24ae0000000000ffffffff02de68f405000000001976a9144838d8b3588c4c7ba7c1d06f866e9b3739c6303788ac0000000000000000156a13544553540000003c50726f6f664f664275726e00000000' fee = quantity data = get_tx_data(unsigned_tx_hex) tx_insert(source, None, None, fee, data) cursor = blocks.parse_block(db, cursor, tx_index - 1)
def reparse(testnet=True): options = dict(COUNTERPARTYD_OPTIONS) options.pop('data_dir') counterpartyd.set_options(database_file=':memory:', testnet=testnet, **options) if testnet: config.PREFIX = b'TESTXXXX' logger = logging.getLogger() console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) logger.addHandler(console) memory_db = util.connect_to_db() initialise_db(memory_db) prod_db_path = os.path.join( config.DATA_DIR, '{}.{}{}.db'.format(config.XCP_CLIENT, str(config.VERSION_MAJOR), '.testnet' if testnet else '')) prod_db = apsw.Connection(prod_db_path) prod_db.setrowtrace(util.rowtracer) with memory_db.backup("main", prod_db, "main") as backup: backup.step() # here we don't use block.reparse() because it reparse db in transaction (`with db`) memory_cursor = memory_db.cursor() for table in blocks.TABLES + ['balances']: memory_cursor.execute('''DROP TABLE IF EXISTS {}'''.format(table)) blocks.initialise(memory_db) previous_ledger_hash = None previous_txlist_hash = None memory_cursor.execute('''SELECT * FROM blocks ORDER BY block_index''') for block in memory_cursor.fetchall(): try: logger.info('Block (re‐parse): {}'.format(str( block['block_index']))) previous_ledger_hash, previous_txlist_hash = blocks.parse_block( memory_db, block['block_index'], block['block_time'], previous_ledger_hash, block['ledger_hash'], previous_txlist_hash, block['txlist_hash']) except ConsensusError as e: message = str(e) if message.find('ledger_hash') != -1: new_ledger = get_block_ledger(memory_db, block['block_index']) old_ledger = get_block_ledger(prod_db, block['block_index']) compare_strings(old_ledger, new_ledger) elif message.find('txlist_hash') != -1: new_txlist = get_block_txlist(memory_db, block['block_index']) old_txlist = get_block_txlist(prod_db, block['block_index']) compare_strings(old_txlist, new_txlist) raise (e)
def insert_raw_transaction(raw_transaction, db, rawtransactions_db): # one transaction per block block_index, block_hash, block_time = create_next_block(db) cursor = db.cursor() tx_index = block_index - config.BURN_START + 1 tx_hash = hashlib.sha256('{}{}'.format(tx_index,raw_transaction).encode('utf-8')).hexdigest() # print(tx_hash) if pytest.config.option.savescenarios: save_rawtransaction(rawtransactions_db, tx_hash, raw_transaction) source, destination, btc_amount, fee, data = blocks.get_tx_info2(raw_transaction) transaction = (tx_index, tx_hash, block_index, block_hash, block_time, source, destination, btc_amount, fee, data, True) cursor.execute('''INSERT INTO transactions VALUES (?,?,?,?,?,?,?,?,?,?,?)''', transaction) tx = list(cursor.execute('''SELECT * FROM transactions WHERE tx_index = ?''', (tx_index,)))[0] cursor.close() blocks.parse_block(db, block_index, block_time) return tx
def reparse(testnet=True): options = dict(COUNTERPARTYD_OPTIONS) options.pop('data_dir') bluejudyd.set_options(database_file=':memory:', testnet=testnet, **options) if testnet: config.PREFIX = b'TESTXXXX' logger = logging.getLogger() console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) logger.addHandler(console) memory_db = util.connect_to_db() initialise_db(memory_db) prod_db_path = os.path.join(config.DATA_DIR, '{}.{}{}.db'.format(config.XBJ_CLIENT, str(config.VERSION_MAJOR), '.testnet' if testnet else '')) prod_db = apsw.Connection(prod_db_path) prod_db.setrowtrace(util.rowtracer) with memory_db.backup("main", prod_db, "main") as backup: backup.step() # here we don't use block.reparse() because it reparse db in transaction (`with db`) memory_cursor = memory_db.cursor() for table in blocks.TABLES + ['balances']: memory_cursor.execute('''DROP TABLE IF EXISTS {}'''.format(table)) blocks.initialise(memory_db) previous_ledger_hash = None previous_txlist_hash = None memory_cursor.execute('''SELECT * FROM blocks ORDER BY block_index''') for block in memory_cursor.fetchall(): try: logger.info('Block (re‐parse): {}'.format(str(block['block_index']))) previous_ledger_hash, previous_txlist_hash = blocks.parse_block(memory_db, block['block_index'], block['block_time'], previous_ledger_hash, block['ledger_hash'], previous_txlist_hash, block['txlist_hash']) except ConsensusError as e: message = str(e) if message.find('ledger_hash') != -1: new_ledger = get_block_ledger(memory_db, block['block_index']) old_ledger = get_block_ledger(prod_db, block['block_index']) compare_strings(old_ledger, new_ledger) elif message.find('txlist_hash') != -1: new_txlist = get_block_txlist(memory_db, block['block_index']) old_txlist = get_block_txlist(prod_db, block['block_index']) compare_strings(old_txlist, new_txlist) raise(e)
def reparse(testnet=True): options = dict(COUNTERPARTYD_OPTIONS) options.pop('data_dir') counterpartyd.set_options(database_file=':memory:', testnet=testnet, **options) if testnet: config.PREFIX = b'TESTXXXX' logger = logging.getLogger() console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) logger.addHandler(console) memory_db = database.get_connection(read_only=False) initialise_db(memory_db) prod_db_path = os.path.join(config.DATA_DIR, '{}.{}{}.db'.format(config.XCP_CLIENT, str(config.VERSION_MAJOR), '.testnet' if testnet else '')) prod_db = apsw.Connection(prod_db_path) prod_db.setrowtrace(database.rowtracer) with memory_db.backup("main", prod_db, "main") as backup: backup.step() # here we don't use block.reparse() because it reparse db in transaction (`with db`) memory_cursor = memory_db.cursor() for table in blocks.TABLES + ['balances']: memory_cursor.execute('''DROP TABLE IF EXISTS {}'''.format(table)) # clean consensus hashes if first block hash don't match with checkpoint. checkpoints = check.CHECKPOINTS_TESTNET if config.TESTNET else check.CHECKPOINTS_MAINNET columns = [column['name'] for column in memory_cursor.execute('''PRAGMA table_info(blocks)''')] for field in ['ledger_hash', 'txlist_hash']: if field in columns: sql = '''SELECT {} FROM blocks WHERE block_index = ?'''.format(field) first_hash = list(memory_cursor.execute(sql, (config.BLOCK_FIRST,)))[0][field] if first_hash != checkpoints[config.BLOCK_FIRST][field]: logging.info('First hash changed. Cleaning {}.'.format(field)) memory_cursor.execute('''UPDATE blocks SET {} = NULL'''.format(field)) blocks.initialise(memory_db) previous_ledger_hash = None previous_txlist_hash = None memory_cursor.execute('''SELECT * FROM blocks ORDER BY block_index''') for block in memory_cursor.fetchall(): try: logger.info('Block (re‐parse): {}'.format(str(block['block_index']))) previous_ledger_hash, previous_txlist_hash = blocks.parse_block(memory_db, block['block_index'], block['block_time'], previous_ledger_hash, block['ledger_hash'], previous_txlist_hash, block['txlist_hash']) except check.ConsensusError as e: message = str(e) if message.find('ledger_hash') != -1: new_ledger = get_block_ledger(memory_db, block['block_index']) old_ledger = get_block_ledger(prod_db, block['block_index']) compare_strings(old_ledger, new_ledger) elif message.find('txlist_hash') != -1: new_txlist = get_block_txlist(memory_db, block['block_index']) old_txlist = get_block_txlist(prod_db, block['block_index']) compare_strings(old_txlist, new_txlist) raise(e)
def test_parse_from_the_start(): global db, cursor blocks.initialise(db, cursor) for i in range(tx_index): cursor = blocks.parse_block(db, cursor, i)
def reparse(testnet=True): options = dict(COUNTERPARTYD_OPTIONS) options.pop('data_dir') counterpartyd.set_options(database_file=':memory:', testnet=testnet, **options) if testnet: config.PREFIX = b'TESTXXXX' logger = logging.getLogger() console = logging.StreamHandler() console.setLevel(logging.INFO) formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) logger.addHandler(console) memory_db = database.get_connection(read_only=False) initialise_db(memory_db) prod_db_path = os.path.join( config.DATA_DIR, '{}.{}{}.db'.format(config.XCP_CLIENT, str(config.VERSION_MAJOR), '.testnet' if testnet else '')) prod_db = apsw.Connection(prod_db_path) prod_db.setrowtrace(database.rowtracer) with memory_db.backup("main", prod_db, "main") as backup: backup.step() # here we don't use block.reparse() because it reparse db in transaction (`with db`) memory_cursor = memory_db.cursor() for table in blocks.TABLES + ['balances']: memory_cursor.execute('''DROP TABLE IF EXISTS {}'''.format(table)) # clean consensus hashes if first block hash don't match with checkpoint. checkpoints = check.CHECKPOINTS_TESTNET if config.TESTNET else check.CHECKPOINTS_MAINNET columns = [ column['name'] for column in memory_cursor.execute('''PRAGMA table_info(blocks)''') ] for field in ['ledger_hash', 'txlist_hash']: if field in columns: sql = '''SELECT {} FROM blocks WHERE block_index = ?'''.format( field) first_hash = list( memory_cursor.execute(sql, (config.BLOCK_FIRST, )))[0][field] if first_hash != checkpoints[config.BLOCK_FIRST][field]: logger.info('First hash changed. Cleaning {}.'.format(field)) memory_cursor.execute( '''UPDATE blocks SET {} = NULL'''.format(field)) blocks.initialise(memory_db) previous_ledger_hash = None previous_txlist_hash = None memory_cursor.execute('''SELECT * FROM blocks ORDER BY block_index''') for block in memory_cursor.fetchall(): try: logger.info('Block (re‐parse): {}'.format(str( block['block_index']))) previous_ledger_hash, previous_txlist_hash = blocks.parse_block( memory_db, block['block_index'], block['block_time'], previous_ledger_hash, block['ledger_hash'], previous_txlist_hash, block['txlist_hash']) except check.ConsensusError as e: message = str(e) if message.find('ledger_hash') != -1: new_ledger = get_block_ledger(memory_db, block['block_index']) old_ledger = get_block_ledger(prod_db, block['block_index']) compare_strings(old_ledger, new_ledger) elif message.find('txlist_hash') != -1: new_txlist = get_block_txlist(memory_db, block['block_index']) old_txlist = get_block_txlist(prod_db, block['block_index']) compare_strings(old_txlist, new_txlist) raise (e)