Esempio n. 1
0
def test_full_pipeline(monkeypatch, user_pk):
    from bigchaindb.backend import query
    from bigchaindb.models import Transaction
    CONFIG = {
        'keyring': ['aaa', 'bbb'],
        'backlog_reassign_delay': 0.01
    }
    config_utils.update_config(CONFIG)
    b = Bigchain()

    original_txs = {}
    original_txc = []

    monkeypatch.setattr('time.time', lambda: 1)

    for i in range(100):
        tx = Transaction.create([b.me], [([user_pk], 1)],
                                metadata={'msg': random.random()})
        tx = tx.sign([b.me_private])
        original_txc.append(tx.to_dict())

        b.write_transaction(tx)
    original_txs = list(query.get_stale_transactions(b.connection, 0))
    original_txs = {tx['id']: tx for tx in original_txs}

    assert len(original_txs) == 100

    monkeypatch.undo()

    inpipe = Pipe()
    # Each time the StaleTransactionMonitor pipeline runs, it reassigns
    # all eligible transactions. Passing this inpipe prevents that from
    # taking place more than once.
    inpipe.put(())
    outpipe = Pipe()
    pipeline = stale.create_pipeline(backlog_reassign_delay=1,
                                     timeout=1)
    pipeline.setup(indata=inpipe, outdata=outpipe)
    pipeline.start()

    # to terminate
    for _ in range(100):
        outpipe.get()

    pipeline.terminate()

    assert len(list(query.get_stale_transactions(b.connection, 0))) == 100
    reassigned_txs = list(query.get_stale_transactions(b.connection, 0))

    # check that every assignment timestamp has increased, and every tx has a new assignee
    for reassigned_tx in reassigned_txs:
        assert reassigned_tx['assignment_timestamp'] > original_txs[reassigned_tx['id']]['assignment_timestamp']
        assert reassigned_tx['assignee'] != original_txs[reassigned_tx['id']]['assignee']
Esempio n. 2
0
def test_reassign_transactions(b, user_pk):
    from bigchaindb.backend import query
    from bigchaindb.models import Transaction
    # test with single node
    tx = Transaction.create([b.me], [([user_pk], 1)])
    tx = tx.sign([b.me_private])
    b.write_transaction(tx)

    stm = stale.StaleTransactionMonitor(timeout=0.001,
                                        backlog_reassign_delay=0.001)
    # This worked previously because transaction['assignee'] was only used if
    # bigchain.nodes_except_me was not empty.
    tx_dict = tx.to_dict()
    tx_dict['assignee'] = b.me
    stm.reassign_transactions(tx_dict)

    # test with federation
    tx = Transaction.create([b.me], [([user_pk], 1)])
    tx = tx.sign([b.me_private])
    b.write_transaction(tx)

    stm = stale.StaleTransactionMonitor(timeout=0.001,
                                        backlog_reassign_delay=0.001)
    stm.bigchain.nodes_except_me = ['aaa', 'bbb', 'ccc']
    tx = list(query.get_stale_transactions(b.connection, 0))[0]
    stm.reassign_transactions(tx)

    reassigned_tx = list(query.get_stale_transactions(b.connection, 0))[0]
    assert reassigned_tx['assignment_timestamp'] > tx['assignment_timestamp']
    assert reassigned_tx['assignee'] != tx['assignee']

    # test with node not in federation
    tx = Transaction.create([b.me], [([user_pk], 1)])
    tx = tx.sign([b.me_private])
    stm.bigchain.nodes_except_me = ['lol']
    b.write_transaction(tx)
    stm.bigchain.nodes_except_me = []

    tx = list(query.get_stale_transactions(b.connection, 0))[0]
    stm.reassign_transactions(tx)
    assert tx['assignee'] != 'lol'
def test_full_pipeline(b, user_pk):
    import random
    from bigchaindb.backend import query
    from bigchaindb.models import Transaction

    outpipe = Pipe()

    # write two blocks
    txs = []
    for i in range(100):
        tx = Transaction.create([b.me], [([user_pk], 1)],
                                {'msg': random.random()})
        tx = tx.sign([b.me_private])
        txs.append(tx)

    valid_block = b.create_block(txs)
    b.write_block(valid_block)

    txs = []
    for i in range(100):
        tx = Transaction.create([b.me], [([user_pk], 1)],
                                {'msg': random.random()})
        tx = tx.sign([b.me_private])
        txs.append(tx)

    invalid_block = b.create_block(txs)
    b.write_block(invalid_block)

    pipeline = election.create_pipeline()
    pipeline.setup(indata=election.get_changefeed(), outdata=outpipe)
    pipeline.start()
    time.sleep(1)

    # vote one block valid, one invalid
    vote_valid = b.vote(valid_block.id, 'b' * 64, True)
    vote_invalid = b.vote(invalid_block.id, 'c' * 64, False)

    b.write_vote(vote_valid)
    b.write_vote(vote_invalid)

    outpipe.get()
    pipeline.terminate()

    # only transactions from the invalid block should be returned to
    # the backlog
    assert query.count_backlog(b.connection) == 100
    # NOTE: I'm still, I'm still tx from the block.
    tx_from_block = set([tx.id for tx in invalid_block.transactions])
    tx_from_backlog = set([
        tx['id'] for tx in list(query.get_stale_transactions(b.connection, 0))
    ])
    assert tx_from_block == tx_from_backlog
def test_get_stale_transactions(signed_create_tx):
    import time
    from bigchaindb.backend import connect, query
    conn = connect()

    # create two transaction, one of them stale
    tx1 = signed_create_tx.to_dict()
    tx1.update({'id': 'notstale', 'assignment_timestamp': time.time()})
    tx2 = signed_create_tx.to_dict()
    tx2.update({'id': 'stale', 'assignment_timestamp': time.time() - 60})

    # write the transactions
    conn.db.backlog.insert_one(tx1)
    conn.db.backlog.insert_one(tx2)

    # get stale transactions
    stale_txs = list(query.get_stale_transactions(conn, 30))

    assert len(stale_txs) == 1
    assert stale_txs[0]['id'] == 'stale'