def test_check_database_version():
    server.initialise(
        database_file=tempfile.gettempdir() + "/fixtures.unittest.db", testnet=True, **util_test.COUNTERPARTYD_OPTIONS
    )
    util_test.restore_database(config.DATABASE, CURR_DIR + "/fixtures/scenarios/unittest_fixture.sql")
    db = database.get_connection(read_only=False)
    database.update_version(db)

    version_major, version_minor = database.version(db)
    assert config.VERSION_MAJOR == version_major
    assert config.VERSION_MINOR == version_minor

    check.database_version(db)

    config.VERSION_MINOR += 1
    with pytest.raises(check.DatabaseVersionError) as exception:
        check.database_version(db)
    assert exception.value.reparse_block_index == None
    config.VERSION_MINOR -= 1

    config.VERSION_MAJOR += 1
    with pytest.raises(check.DatabaseVersionError) as exception:
        check.database_version(db)
    assert exception.value.reparse_block_index == config.BLOCK_FIRST
    config.VERSION_MAJOR -= 1
Beispiel #2
0
def test_check_database_version():
    server.initialise(database_file=tempfile.gettempdir() +
                      '/fixtures.unittest.db',
                      testnet=True,
                      **util_test.COUNTERPARTYD_OPTIONS)
    util_test.restore_database(
        config.DATABASE, CURR_DIR + '/fixtures/scenarios/unittest_fixture.sql')
    db = database.get_connection(read_only=False)
    database.update_version(db)

    version_major, version_minor = database.version(db)
    assert config.VERSION_MAJOR == version_major
    assert config.VERSION_MINOR == version_minor

    check.database_version(db)

    config.VERSION_MINOR += 1
    with pytest.raises(check.DatabaseVersionError) as exception:
        check.database_version(db)
    assert exception.value.reparse_block_index == None
    config.VERSION_MINOR -= 1

    config.VERSION_MAJOR += 1
    with pytest.raises(check.DatabaseVersionError) as exception:
        check.database_version(db)
    assert exception.value.reparse_block_index == config.BLOCK_FIRST
    config.VERSION_MAJOR -= 1
def server_db(request):
    """Enable database access for unit test vectors."""
    db = database.get_connection(read_only=False)
    database.update_version(db)
    cursor = db.cursor()
    cursor.execute('''BEGIN''')
    request.addfinalizer(lambda: cursor.execute('''ROLLBACK'''))
    return db
Beispiel #4
0
def counterpartyd_db(request):
    """Enable database access for unit test vectors."""
    db = database.get_connection(read_only=False)
    database.update_version(db)
    cursor = db.cursor()
    cursor.execute('''BEGIN''')
    request.addfinalizer(lambda: cursor.execute('''ROLLBACK'''))
    return db
def init_database(sqlfile, dbfile, options=None):
    kwargs = COUNTERPARTYD_OPTIONS.copy()
    kwargs.update(options or {})

    server.initialise(
        database_file=dbfile,
        testnet=True,
        verbose=True,
        console_logfilter=os.environ.get('COUNTERPARTY_LOGGING', None),
        **kwargs)

    restore_database(config.DATABASE, sqlfile)
    db = database.get_connection(read_only=False)  # reinit the DB to deal with the restoring
    database.update_version(db)
    util.FIRST_MULTISIG_BLOCK_TESTNET = 1

    return db
def init_database(sqlfile, dbfile, options=None):
    kwargs = COUNTERPARTYD_OPTIONS.copy()
    kwargs.update(options or {})

    server.initialise(
        database_file=dbfile,
        testnet=True,
        verbose=True,
        console_logfilter=os.environ.get('COUNTERPARTY_LOGGING', None),
        **kwargs)

    restore_database(config.DATABASE, sqlfile)
    db = database.get_connection(read_only=False)  # reinit the DB to deal with the restoring
    database.update_version(db)
    util.FIRST_MULTISIG_BLOCK_TESTNET = 1

    return db
Beispiel #7
0
def follow(db):
    # Check software version.
    check.software_version()

    # Initialise.
    initialise(db)

    # Get index of last block.
    if util.CURRENT_BLOCK_INDEX == 0:
        logger.warning('New database.')
        block_index = config.BLOCK_FIRST
    else:
        block_index = util.CURRENT_BLOCK_INDEX + 1

        # Check database version.
        try:
            check.database_version(db)
        except check.DatabaseVersionError as e:
            logger.info(str(e))
            # no need to reparse or rollback a new database
            if block_index != config.BLOCK_FIRST:
                reparse(db, block_index=e.reparse_block_index, quiet=False)
            database.update_version(db)

    logger.info('Resuming parsing.')

    # Get index of last transaction.
    tx_index = get_next_tx_index(db)

    not_supported = {}   # No false positives. Use a dict to allow for O(1) lookups
    not_supported_sorted = collections.deque()
    # ^ Entries in form of (block_index, tx_hash), oldest first. Allows for easy removal of past, unncessary entries
    mempool_initialised = False
    backend.init_mempool_cache()
    cursor = db.cursor()
    # a reorg can happen without the block count increasing, or even for that
    # matter, with the block count decreasing. This should only delay
    # processing of the new blocks a bit.
    while True:
        starttime = time.time()

        # Get block count.
        # If the backend is unreachable and `config.FORCE` is set, just sleep
        # and try again repeatedly.
        try:
            block_count = backend.getblockcount()
        except (ConnectionRefusedError, http.client.CannotSendRequest, backend.addrindex.BackendRPCError) as e:
            if config.FORCE:
                time.sleep(config.BACKEND_POLL_INTERVAL)
                continue
            else:
                raise e

        # Get new blocks.
        if block_index <= block_count:

            # Backwards check for incorrect blocks due to chain reorganisation, and stop when a common parent is found.
            current_index = block_index
            requires_rollback = False
            while True:
                if current_index == config.BLOCK_FIRST:
                    break

                logger.debug('Checking that block {} is not an orphan.'.format(current_index))

                # Backend parent hash.
                current_hash = backend.getblockhash(current_index)
                current_cblock = backend.getblock(current_hash)
                backend_parent = bitcoinlib.core.b2lx(current_cblock.hashPrevBlock)

                # DB parent hash.
                blocks = list(cursor.execute('''SELECT * FROM blocks
                                                WHERE block_index = ?''', (current_index - 1,)))
                if len(blocks) != 1:  # For empty DB.
                    break
                db_parent = blocks[0]['block_hash']

                # Compare.
                assert type(db_parent) == str
                assert type(backend_parent) == str
                if db_parent == backend_parent:
                    break
                else:
                    current_index -= 1
                    requires_rollback = True

            # Rollback for reorganisation.
            if requires_rollback:
                # Record reorganisation.
                logger.warning('Blockchain reorganisation at block {}.'.format(current_index))
                log.message(db, block_index, 'reorg', None, {'block_index': current_index})

                # Rollback the DB.
                reparse(db, block_index=current_index-1, quiet=True)
                block_index = current_index
                tx_index = get_next_tx_index(db)
                continue

            # Check version. (Don’t add any blocks to the database while
            # running an out‐of‐date client!)
            check.software_version()

            # Get and parse transactions in this block (atomically).
            block_hash = backend.getblockhash(current_index)
            block = backend.getblock(block_hash)
            previous_block_hash = bitcoinlib.core.b2lx(block.hashPrevBlock)
            block_time = block.nTime
            txhash_list = backend.get_txhash_list(block)
            raw_transactions = backend.getrawtransaction_batch(txhash_list)
            with db:
                util.CURRENT_BLOCK_INDEX = block_index

                # List the block.
                cursor.execute('''INSERT INTO blocks(
                                    block_index,
                                    block_hash,
                                    block_time,
                                    previous_block_hash,
                                    difficulty) VALUES(?,?,?,?,?)''',
                                    (block_index,
                                    block_hash,
                                    block_time,
                                    previous_block_hash,
                                    block.difficulty)
                              )

                # List the transactions in the block.
                for tx_hash in txhash_list:
                    tx_hex = raw_transactions[tx_hash]
                    tx_index = list_tx(db, block_hash, block_index, block_time, tx_hash, tx_index, tx_hex)

                # Parse the transactions in the block.
                parse_block(db, block_index, block_time)

            # When newly caught up, check for conservation of assets.
            if block_index == block_count:
                if config.CHECK_ASSET_CONSERVATION:
                    check.asset_conservation(db)

            # Remove any non‐supported transactions older than ten blocks.
            while len(not_supported_sorted) and not_supported_sorted[0][0] <= block_index - 10:
                tx_h = not_supported_sorted.popleft()[1]
                del not_supported[tx_h]

            logger.info('Block: %s (%ss)'%(str(block_index), "{:.2f}".format(time.time() - starttime, 3)))
            # Increment block index.
            block_count = backend.getblockcount()
            block_index += 1

        else:   
            # First mempool fill for session?
            if mempool_initialised:
                logger.debug('Updating mempool.')
            else:
                logger.debug('Initialising mempool.')

            # Get old mempool.
            old_mempool = list(cursor.execute('''SELECT * FROM mempool'''))
            old_mempool_hashes = [message['tx_hash'] for message in old_mempool]

            # Fake values for fake block.
            curr_time = int(time.time())
            mempool_tx_index = tx_index

            # For each transaction in Bitcoin Core mempool, if it’s new, create
            # a fake block, a fake transaction, capture the generated messages,
            # and then save those messages.
            # Every transaction in mempool is parsed independently. (DB is rolled back after each one.)
            mempool = []
            for tx_hash in backend.getrawmempool():

                # If already in mempool, copy to new one.
                if tx_hash in old_mempool_hashes:
                    for message in old_mempool:
                        if message['tx_hash'] == tx_hash:
                            mempool.append((tx_hash, message))

                # If already skipped, skip it again.
                elif tx_hash not in not_supported:

                    # Else: list, parse and save it.
                    try:
                        with db:
                            # List the fake block.
                            cursor.execute('''INSERT INTO blocks(
                                                block_index,
                                                block_hash,
                                                block_time) VALUES(?,?,?)''',
                                                (config.MEMPOOL_BLOCK_INDEX,
                                                 config.MEMPOOL_BLOCK_HASH,
                                                 curr_time)
                                          )

                            # List transaction.
                            try:    # Sometimes the transactions can’t be found: `{'code': -5, 'message': 'No information available about transaction'} Is txindex enabled in Bitcoind?`
                                mempool_tx_index = list_tx(db, None, block_index, curr_time, tx_hash, mempool_tx_index)
                            except backend.addrindex.BackendRPCError:
                                raise MempoolError

                            # Parse transaction.
                            cursor.execute('''SELECT * FROM transactions \
                                              WHERE tx_hash = ?''',
                                           (tx_hash,))
                            transactions = list(cursor)
                            if transactions:
                                assert len(transactions) == 1
                                transaction = transactions[0]
                                supported = parse_tx(db, transaction)
                                if not supported:
                                    not_supported[tx_hash] = ''
                                    not_supported_sorted.append((block_index, tx_hash))
                            else:
                                # If a transaction hasn’t been added to the
                                # table `transactions`, then it’s not a
                                # Counterparty transaction.
                                not_supported[tx_hash] = ''
                                not_supported_sorted.append((block_index, tx_hash))
                                raise MempoolError

                            # Save transaction and side‐effects in memory.
                            cursor.execute('''SELECT * FROM messages WHERE block_index = ?''', (config.MEMPOOL_BLOCK_INDEX,))
                            for message in list(cursor):
                                mempool.append((tx_hash, message))

                            # Rollback.
                            raise MempoolError
                    except MempoolError:
                        pass

            # Re‐write mempool messages to database.
            with db:
                cursor.execute('''DELETE FROM mempool''')
                for message in mempool:
                    tx_hash, new_message = message
                    new_message['tx_hash'] = tx_hash
                    cursor.execute('''INSERT INTO mempool VALUES(:tx_hash, :command, :category, :bindings, :timestamp)''', (new_message))

            # Wait
            mempool_initialised = True
            db.wal_checkpoint(mode=apsw.SQLITE_CHECKPOINT_PASSIVE)
            time.sleep(config.BACKEND_POLL_INTERVAL)

    cursor.close()
Beispiel #8
0
def follow(db):
    # Check software version.
    check.software_version()

    # Initialise.
    initialise(db)

    # Get index of last block.
    if util.CURRENT_BLOCK_INDEX == 0:
        logger.warning('New database.')
        block_index = config.BLOCK_FIRST
    else:
        block_index = util.CURRENT_BLOCK_INDEX + 1

    # Check database version.
    try:
        check.database_version(db)
    except check.DatabaseVersionError as e:
        logger.info(str(e))
        # no need to reparse or rollback a new database
        if block_index != config.BLOCK_FIRST:
            reparse(db, block_index=e.reparse_block_index, quiet=False)
        database.update_version(db)

    logger.info('Resuming parsing.')

    # Get index of last transaction.
    tx_index = get_next_tx_index(db)

    not_supported = {
    }  # No false positives. Use a dict to allow for O(1) lookups
    not_supported_sorted = collections.deque()
    # ^ Entries in form of (block_index, tx_hash), oldest first. Allows for easy removal of past, unncessary entries
    mempool_initialised = False
    cursor = db.cursor()
    # a reorg can happen without the block count increasing, or even for that
    # matter, with the block count decreasing. This should only delay
    # processing of the new blocks a bit.
    while True:
        starttime = time.time()

        # Get block count.
        # If the backend is unreachable and `config.FORCE` is set, just sleep
        # and try again repeatedly.
        try:
            block_count = backend.getblockcount()
        except (ConnectionRefusedError, http.client.CannotSendRequest) as e:
            if config.FORCE:
                time.sleep(config.BACKEND_POLL_INTERVAL)
                continue
            else:
                raise e

        # Get new blocks.
        if block_index <= block_count:

            # Backwards check for incorrect blocks due to chain reorganisation, and stop when a common parent is found.
            current_index = block_index
            requires_rollback = False
            while True:
                if current_index == config.BLOCK_FIRST:
                    break

                logger.debug('Checking that block {} is not an orphan.'.format(
                    current_index))

                # Backend parent hash.
                current_hash = backend.getblockhash(current_index)
                current_cblock = backend.getblock(current_hash)
                backend_parent = bitcoinlib.core.b2lx(
                    current_cblock.hashPrevBlock)

                # DB parent hash.
                blocks = list(
                    cursor.execute(
                        '''SELECT * FROM blocks
                                                WHERE block_index = ?''',
                        (current_index - 1, )))
                if len(blocks) != 1:  # For empty DB.
                    break
                db_parent = blocks[0]['block_hash']

                # Compare.
                assert type(db_parent) == str
                assert type(backend_parent) == str
                if db_parent == backend_parent:
                    break
                else:
                    current_index -= 1
                    requires_rollback = True

            # Rollback for reorganisation.
            if requires_rollback:
                # Record reorganisation.
                logger.warning('Blockchain reorganisation at block {}.'.format(
                    current_index))
                log.message(db, block_index, 'reorg', None,
                            {'block_index': current_index})

                # Rollback the DB.
                reparse(db, block_index=current_index - 1, quiet=True)
                block_index = current_index
                tx_index = get_next_tx_index(db)
                continue

            # Check version. (Don’t add any blocks to the database while
            # running an out‐of‐date client!)
            check.software_version()

            # Get and parse transactions in this block (atomically).
            block_hash = backend.getblockhash(current_index)
            block = backend.getblock(block_hash)
            previous_block_hash = bitcoinlib.core.b2lx(block.hashPrevBlock)
            block_time = block.nTime
            txhash_list = backend.get_txhash_list(block)
            with db:
                util.CURRENT_BLOCK_INDEX = block_index

                # List the block.
                cursor.execute(
                    '''INSERT INTO blocks(
                                    block_index,
                                    block_hash,
                                    block_time,
                                    previous_block_hash,
                                    difficulty) VALUES(?,?,?,?,?)''',
                    (block_index, block_hash, block_time, previous_block_hash,
                     block.difficulty))

                # List the transactions in the block.
                for tx_hash in txhash_list:
                    # TODO: use rpc._batch to get all transactions with one RPC call
                    tx_index = list_tx(db, block_hash, block_index, block_time,
                                       tx_hash, tx_index)

                # Parse the transactions in the block.
                parse_block(db, block_index, block_time)

            # When newly caught up, check for conservation of assets.
            if block_index == block_count:
                check.asset_conservation(db)

            # Remove any non‐supported transactions older than ten blocks.
            while len(not_supported_sorted
                      ) and not_supported_sorted[0][0] <= block_index - 10:
                tx_h = not_supported_sorted.popleft()[1]
                del not_supported[tx_h]

            logger.info('Block: %s (%ss)' % (str(block_index), "{:.2f}".format(
                time.time() - starttime, 3)))
            # Increment block index.
            block_count = backend.getblockcount()
            block_index += 1

        else:
            # First mempool fill for session?
            if mempool_initialised:
                logger.debug('Updating mempool.')
            else:
                logger.debug('Initialising mempool.')

            # Get old counterpartyd mempool.
            old_mempool = list(cursor.execute('''SELECT * FROM mempool'''))
            old_mempool_hashes = [
                message['tx_hash'] for message in old_mempool
            ]

            # Fake values for fake block.
            curr_time = int(time.time())
            mempool_tx_index = tx_index

            # For each transaction in Bitcoin Core mempool, if it’s new, create
            # a fake block, a fake transaction, capture the generated messages,
            # and then save those messages.
            # Every transaction in mempool is parsed independently. (DB is rolled back after each one.)
            mempool = []
            for tx_hash in backend.getrawmempool():

                # If already in counterpartyd mempool, copy to new one.
                if tx_hash in old_mempool_hashes:
                    for message in old_mempool:
                        if message['tx_hash'] == tx_hash:
                            mempool.append((tx_hash, message))

                # If already skipped, skip it again.
                elif tx_hash not in not_supported:

                    # Else: list, parse and save it.
                    try:
                        with db:
                            # List the fake block.
                            cursor.execute(
                                '''INSERT INTO blocks(
                                                block_index,
                                                block_hash,
                                                block_time) VALUES(?,?,?)''',
                                (config.MEMPOOL_BLOCK_INDEX,
                                 config.MEMPOOL_BLOCK_HASH, curr_time))

                            # List transaction.
                            try:  # Sometimes the transactions can’t be found: `{'code': -5, 'message': 'No information available about transaction'} Is txindex enabled in Bitcoind?`
                                mempool_tx_index = list_tx(
                                    db, None, block_index, curr_time, tx_hash,
                                    mempool_tx_index)
                            except backend.addrindex.BackendRPCError:
                                raise MempoolError

                            # Parse transaction.
                            cursor.execute(
                                '''SELECT * FROM transactions \
                                              WHERE tx_hash = ?''',
                                (tx_hash, ))
                            transactions = list(cursor)
                            if transactions:
                                assert len(transactions) == 1
                                transaction = transactions[0]
                                supported = parse_tx(db, transaction)
                                if not supported:
                                    not_supported[tx_hash] = ''
                                    not_supported_sorted.append(
                                        (block_index, tx_hash))
                            else:
                                # If a transaction hasn’t been added to the
                                # table `transactions`, then it’s not a
                                # Counterparty transaction.
                                not_supported[tx_hash] = ''
                                not_supported_sorted.append(
                                    (block_index, tx_hash))
                                raise MempoolError

                            # Save transaction and side‐effects in memory.
                            cursor.execute(
                                '''SELECT * FROM messages WHERE block_index = ?''',
                                (config.MEMPOOL_BLOCK_INDEX, ))
                            for message in list(cursor):
                                mempool.append((tx_hash, message))

                            # Rollback.
                            raise MempoolError
                    except MempoolError:
                        pass

            # Re‐write mempool messages to database.
            with db:
                cursor.execute('''DELETE FROM mempool''')
                for message in mempool:
                    tx_hash, new_message = message
                    new_message['tx_hash'] = tx_hash
                    cursor.execute(
                        '''INSERT INTO mempool VALUES(:tx_hash, :command, :category, :bindings, :timestamp)''',
                        (new_message))

            # Wait
            mempool_initialised = True
            db.wal_checkpoint(mode=apsw.SQLITE_CHECKPOINT_PASSIVE)
            time.sleep(config.BACKEND_POLL_INTERVAL)

    cursor.close()