Beispiel #1
0
 def __init__(self, testgen, datadir):
     self.test_generator = testgen
     self.connections = []
     self.test_nodes = []
     self.block_store = BlockStore(datadir)
     self.tx_store = TxStore(datadir)
     self.ping_counter = 1
Beispiel #2
0
 def __init__(self, testgen, datadir):
     self.test_generator = testgen
     self.connections    = []
     self.test_nodes     = []
     self.block_store    = BlockStore(datadir)
     self.tx_store       = TxStore(datadir)
     self.ping_counter   = 1
Beispiel #3
0
class TestManager(object):
    def __init__(self, testgen, datadir):
        self.test_generator = testgen
        self.connections = []
        self.test_nodes = []
        self.block_store = BlockStore(datadir)
        self.tx_store = TxStore(datadir)
        self.ping_counter = 1

    def add_all_connections(self, nodes):
        for i in range(len(nodes)):
            # Create a p2p connection to each node
            test_node = TestNode(self.block_store, self.tx_store)
            self.test_nodes.append(test_node)
            self.connections.append(
                NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
            # Make sure the TestNode (callback class) has a reference to its
            # associated NodeConn
            test_node.add_connection(self.connections[-1])

    def clear_all_connections(self):
        self.connections = []
        self.test_nodes = []

    def wait_for_disconnections(self):
        def disconnected():
            return all(node.closed for node in self.test_nodes)

        return wait_until(disconnected, timeout=10)

    def wait_for_verack(self):
        def veracked():
            return all(node.verack_received for node in self.test_nodes)

        return wait_until(veracked, timeout=10)

    def wait_for_pings(self, counter):
        def received_pongs():
            return all(
                node.received_ping_response(counter)
                for node in self.test_nodes)

        return wait_until(received_pongs)

    # sync_blocks: Wait for all connections to request the blockhash given
    # then send get_headers to find out the tip of each node, and synchronize
    # the response by using a ping (and waiting for pong with same nonce).
    def sync_blocks(self, blockhash, num_blocks):
        def blocks_requested():
            return all(blockhash in node.block_request_map
                       and node.block_request_map[blockhash]
                       for node in self.test_nodes)

        # --> error if not requested
        if not wait_until(blocks_requested, attempts=20 * num_blocks):
            # print [ c.cb.block_request_map for c in self.connections ]
            raise AssertionError("Not all nodes requested block")

        # Send getheaders message
        [c.cb.send_getheaders() for c in self.connections]

        # Send ping and wait for response -- synchronization hack
        [c.cb.send_ping(self.ping_counter) for c in self.connections]
        self.wait_for_pings(self.ping_counter)
        self.ping_counter += 1

    # Analogous to sync_block (see above)
    def sync_transaction(self, txhash, num_events):
        # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
        def transaction_requested():
            return all(
                txhash in node.tx_request_map and node.tx_request_map[txhash]
                for node in self.test_nodes)

        # --> error if not requested
        if not wait_until(transaction_requested, attempts=20 * num_events):
            # print [ c.cb.tx_request_map for c in self.connections ]
            raise AssertionError("Not all nodes requested transaction")

        # Get the mempool
        [c.cb.send_mempool() for c in self.connections]

        # Send ping and wait for response -- synchronization hack
        [c.cb.send_ping(self.ping_counter) for c in self.connections]
        self.wait_for_pings(self.ping_counter)
        self.ping_counter += 1

        # Sort inv responses from each node
        with mininode_lock:
            [c.cb.lastInv.sort() for c in self.connections]

    # Verify that the tip of each connection all agree with each other, and
    # with the expected outcome (if given)
    def check_results(self, blockhash, outcome):
        with mininode_lock:
            for c in self.connections:
                if outcome is None:
                    if c.cb.bestblockhash != self.connections[
                            0].cb.bestblockhash:
                        return False
                elif isinstance(
                        outcome,
                        RejectResult):  # Check that block was rejected w/ code
                    if c.cb.bestblockhash == blockhash:
                        return False
                    if blockhash not in c.cb.block_reject_map:
                        print 'Block not in reject map: %064x' % (blockhash)
                        return False
                    if not outcome.match(c.cb.block_reject_map[blockhash]):
                        print 'Block rejected with %s instead of expected %s: %064x' % (
                            c.cb.block_reject_map[blockhash], outcome,
                            blockhash)
                        return False
                elif ((c.cb.bestblockhash == blockhash) != outcome):
                    # print c.cb.bestblockhash, blockhash, outcome
                    return False
            return True

    # Either check that the mempools all agree with each other, or that
    # txhash's presence in the mempool matches the outcome specified.
    # This is somewhat of a strange comparison, in that we're either comparing
    # a particular tx to an outcome, or the entire mempools altogether;
    # perhaps it would be useful to add the ability to check explicitly that
    # a particular tx's existence in the mempool is the same across all nodes.
    def check_mempool(self, txhash, outcome):
        with mininode_lock:
            for c in self.connections:
                if outcome is None:
                    # Make sure the mempools agree with each other
                    if c.cb.lastInv != self.connections[0].cb.lastInv:
                        # print c.rpc.getrawmempool()
                        return False
                elif isinstance(
                        outcome,
                        RejectResult):  # Check that tx was rejected w/ code
                    if txhash in c.cb.lastInv:
                        return False
                    if txhash not in c.cb.tx_reject_map:
                        print 'Tx not in reject map: %064x' % (txhash)
                        return False
                    if not outcome.match(c.cb.tx_reject_map[txhash]):
                        print 'Tx rejected with %s instead of expected %s: %064x' % (
                            c.cb.tx_reject_map[txhash], outcome, txhash)
                        return False
                elif ((txhash in c.cb.lastInv) != outcome):
                    # print c.rpc.getrawmempool(), c.cb.lastInv
                    return False
            return True

    def run(self):
        # Wait until verack is received
        self.wait_for_verack()

        test_number = 1
        for test_instance in self.test_generator.get_tests():
            # We use these variables to keep track of the last block
            # and last transaction in the tests, which are used
            # if we're not syncing on every block or every tx.
            [block, block_outcome, tip] = [None, None, None]
            [tx, tx_outcome] = [None, None]
            invqueue = []

            for test_obj in test_instance.blocks_and_transactions:
                b_or_t = test_obj[0]
                outcome = test_obj[1]
                # Determine if we're dealing with a block or tx
                if isinstance(b_or_t, CBlock):  # Block test runner
                    block = b_or_t
                    block_outcome = outcome
                    tip = block.sha256
                    # each test_obj can have an optional third argument
                    # to specify the tip we should compare with
                    # (default is to use the block being tested)
                    if len(test_obj) >= 3:
                        tip = test_obj[2]

                    # Add to shared block_store, set as current block
                    # If there was an open getdata request for the block
                    # previously, and we didn't have an entry in the
                    # block_store, then immediately deliver, because the
                    # node wouldn't send another getdata request while
                    # the earlier one is outstanding.
                    first_block_with_hash = True
                    if self.block_store.get(block.sha256) is not None:
                        first_block_with_hash = False
                    with mininode_lock:
                        self.block_store.add_block(block)
                        for c in self.connections:
                            if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[
                                    block.sha256] == True:
                                # There was a previous request for this block hash
                                # Most likely, we delivered a header for this block
                                # but never had the block to respond to the getdata
                                c.send_message(msg_block(block))
                            else:
                                c.cb.block_request_map[block.sha256] = False
                    # Either send inv's to each node and sync, or add
                    # to invqueue for later inv'ing.
                    if (test_instance.sync_every_block):
                        [c.cb.send_inv(block) for c in self.connections]
                        self.sync_blocks(block.sha256, 1)
                        if (not self.check_results(tip, outcome)):
                            raise AssertionError("Test failed at test %d" %
                                                 test_number)
                    else:
                        invqueue.append(CInv(2, block.sha256))
                elif isinstance(b_or_t, CBlockHeader):
                    block_header = b_or_t
                    self.block_store.add_header(block_header)
                else:  # Tx test runner
                    assert (isinstance(b_or_t, CTransaction))
                    tx = b_or_t
                    tx_outcome = outcome
                    # Add to shared tx store and clear map entry
                    with mininode_lock:
                        self.tx_store.add_transaction(tx)
                        for c in self.connections:
                            c.cb.tx_request_map[tx.sha256] = False
                    # Again, either inv to all nodes or save for later
                    if (test_instance.sync_every_tx):
                        [c.cb.send_inv(tx) for c in self.connections]
                        self.sync_transaction(tx.sha256, 1)
                        if (not self.check_mempool(tx.sha256, outcome)):
                            raise AssertionError("Test failed at test %d" %
                                                 test_number)
                    else:
                        invqueue.append(CInv(1, tx.sha256))
                # Ensure we're not overflowing the inv queue
                if len(invqueue) == MAX_INV_SZ:
                    [
                        c.send_message(msg_inv(invqueue))
                        for c in self.connections
                    ]
                    invqueue = []

            # Do final sync if we weren't syncing on every block or every tx.
            if (not test_instance.sync_every_block and block is not None):
                if len(invqueue) > 0:
                    [
                        c.send_message(msg_inv(invqueue))
                        for c in self.connections
                    ]
                    invqueue = []
                self.sync_blocks(block.sha256,
                                 len(test_instance.blocks_and_transactions))
                if (not self.check_results(tip, block_outcome)):
                    raise AssertionError("Block test failed at test %d" %
                                         test_number)
            if (not test_instance.sync_every_tx and tx is not None):
                if len(invqueue) > 0:
                    [
                        c.send_message(msg_inv(invqueue))
                        for c in self.connections
                    ]
                    invqueue = []
                self.sync_transaction(
                    tx.sha256, len(test_instance.blocks_and_transactions))
                if (not self.check_mempool(tx.sha256, tx_outcome)):
                    raise AssertionError("Mempool test failed at test %d" %
                                         test_number)

            print "Test %d: PASS" % test_number, [
                c.rpc.getblockcount() for c in self.connections
            ]
            test_number += 1

        [c.disconnect_node() for c in self.connections]
        self.wait_for_disconnections()
        self.block_store.close()
        self.tx_store.close()
Beispiel #4
0
class TestManager(object):

    def __init__(self, testgen, datadir):
        self.test_generator = testgen
        self.connections    = []
        self.test_nodes     = []
        self.block_store    = BlockStore(datadir)
        self.tx_store       = TxStore(datadir)
        self.ping_counter   = 1

    def add_all_connections(self, nodes):
        for i in range(len(nodes)):
            # Create a p2p connection to each node
            test_node = TestNode(self.block_store, self.tx_store)
            self.test_nodes.append(test_node)
            self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
            # Make sure the TestNode (callback class) has a reference to its
            # associated NodeConn
            test_node.add_connection(self.connections[-1])

    def wait_for_disconnections(self):
        def disconnected():
            return all(node.closed for node in self.test_nodes)
        return wait_until(disconnected, timeout=10)

    def wait_for_verack(self):
        def veracked():
            return all(node.verack_received for node in self.test_nodes)
        return wait_until(veracked, timeout=10)

    def wait_for_pings(self, counter):
        def received_pongs():
            return all(node.received_ping_response(counter) for node in self.test_nodes)
        return wait_until(received_pongs)

    # sync_blocks: Wait for all connections to request the blockhash given
    # then send get_headers to find out the tip of each node, and synchronize
    # the response by using a ping (and waiting for pong with same nonce).
    def sync_blocks(self, blockhash, num_blocks):
        def blocks_requested():
            return all(
                blockhash in node.block_request_map and node.block_request_map[blockhash]
                for node in self.test_nodes
            )

        # --> error if not requested
        if not wait_until(blocks_requested, attempts=20*num_blocks):
            # print [ c.cb.block_request_map for c in self.connections ]
            raise AssertionError("Not all nodes requested block")
        # --> Answer request (we did this inline!)

        # Send getheaders message
        [ c.cb.send_getheaders() for c in self.connections ]

        # Send ping and wait for response -- synchronization hack
        [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
        self.wait_for_pings(self.ping_counter)
        self.ping_counter += 1

    # Analogous to sync_block (see above)
    def sync_transaction(self, txhash, num_events):
        # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
        def transaction_requested():
            return all(
                txhash in node.tx_request_map and node.tx_request_map[txhash]
                for node in self.test_nodes
            )

        # --> error if not requested
        if not wait_until(transaction_requested, attempts=20*num_events):
            # print [ c.cb.tx_request_map for c in self.connections ]
            raise AssertionError("Not all nodes requested transaction")
        # --> Answer request (we did this inline!)

        # Get the mempool
        [ c.cb.send_mempool() for c in self.connections ]

        # Send ping and wait for response -- synchronization hack
        [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
        self.wait_for_pings(self.ping_counter)
        self.ping_counter += 1

        # Sort inv responses from each node
        with mininode_lock:
            [ c.cb.lastInv.sort() for c in self.connections ]

    # Verify that the tip of each connection all agree with each other, and
    # with the expected outcome (if given)
    def check_results(self, blockhash, outcome):
        with mininode_lock:
            for c in self.connections:
                if outcome is None:
                    if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
                        return False
                elif ((c.cb.bestblockhash == blockhash) != outcome):
                    # print c.cb.bestblockhash, blockhash, outcome
                    return False
            return True

    # Either check that the mempools all agree with each other, or that
    # txhash's presence in the mempool matches the outcome specified.
    # This is somewhat of a strange comparison, in that we're either comparing
    # a particular tx to an outcome, or the entire mempools altogether;
    # perhaps it would be useful to add the ability to check explicitly that
    # a particular tx's existence in the mempool is the same across all nodes.
    def check_mempool(self, txhash, outcome):
        with mininode_lock:
            for c in self.connections:
                if outcome is None:
                    # Make sure the mempools agree with each other
                    if c.cb.lastInv != self.connections[0].cb.lastInv:
                        # print c.rpc.getrawmempool()
                        return False
                elif ((txhash in c.cb.lastInv) != outcome):
                    # print c.rpc.getrawmempool(), c.cb.lastInv
                    return False
            return True

    def run(self):
        # Wait until verack is received
        self.wait_for_verack()

        test_number = 1
        for test_instance in self.test_generator.get_tests():
            # We use these variables to keep track of the last block
            # and last transaction in the tests, which are used
            # if we're not syncing on every block or every tx.
            [ block, block_outcome ] = [ None, None ]
            [ tx, tx_outcome ] = [ None, None ]
            invqueue = []

            for b_or_t, outcome in test_instance.blocks_and_transactions:
                # Determine if we're dealing with a block or tx
                if isinstance(b_or_t, CBlock):  # Block test runner
                    block = b_or_t
                    block_outcome = outcome
                    # Add to shared block_store, set as current block
                    with mininode_lock:
                        self.block_store.add_block(block)
                        for c in self.connections:
                            c.cb.block_request_map[block.sha256] = False
                    # Either send inv's to each node and sync, or add
                    # to invqueue for later inv'ing.
                    if (test_instance.sync_every_block):
                        [ c.cb.send_inv(block) for c in self.connections ]
                        self.sync_blocks(block.sha256, 1)
                        if (not self.check_results(block.sha256, outcome)):
                            raise AssertionError("Test failed at test %d" % test_number)
                    else:
                        invqueue.append(CInv(2, block.sha256))
                else:  # Tx test runner
                    assert(isinstance(b_or_t, CTransaction))
                    tx = b_or_t
                    tx_outcome = outcome
                    # Add to shared tx store and clear map entry
                    with mininode_lock:
                        self.tx_store.add_transaction(tx)
                        for c in self.connections:
                            c.cb.tx_request_map[tx.sha256] = False
                    # Again, either inv to all nodes or save for later
                    if (test_instance.sync_every_tx):
                        [ c.cb.send_inv(tx) for c in self.connections ]
                        self.sync_transaction(tx.sha256, 1)
                        if (not self.check_mempool(tx.sha256, outcome)):
                            raise AssertionError("Test failed at test %d" % test_number)
                    else:
                        invqueue.append(CInv(1, tx.sha256))
                # Ensure we're not overflowing the inv queue
                if len(invqueue) == MAX_INV_SZ:
                    [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
                    invqueue = []

            # Do final sync if we weren't syncing on every block or every tx.
            if (not test_instance.sync_every_block and block is not None):
                if len(invqueue) > 0:
                    [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
                    invqueue = []
                self.sync_blocks(block.sha256,
                                 len(test_instance.blocks_and_transactions))
                if (not self.check_results(block.sha256, block_outcome)):
                    raise AssertionError("Block test failed at test %d" % test_number)
            if (not test_instance.sync_every_tx and tx is not None):
                if len(invqueue) > 0:
                    [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
                    invqueue = []
                self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
                if (not self.check_mempool(tx.sha256, tx_outcome)):
                    raise AssertionError("Mempool test failed at test %d" % test_number)

            print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
            test_number += 1

        [ c.disconnect_node() for c in self.connections ]
        self.wait_for_disconnections()
        self.block_store.close()
        self.tx_store.close()
Beispiel #5
0
class TestManager(object):
    def __init__(self, testgen, datadir):
        self.test_generator = testgen
        self.connections = []
        self.block_store = BlockStore(datadir)
        self.tx_store = TxStore(datadir)
        self.ping_counter = 1

    def add_all_connections(self, nodes):
        for i in range(len(nodes)):
            # Create a p2p connection to each node
            self.connections.append(
                NodeConn('127.0.0.1', p2p_port(i), nodes[i],
                         TestNode(self.block_store, self.tx_store)))
            # Make sure the TestNode (callback class) has a reference to its
            # associated NodeConn
            self.connections[-1].cb.add_connection(self.connections[-1])

    def clear_all_connections(self):
        self.connections = []
        self.test_nodes = []

    def wait_for_verack(self):
        sleep_time = 0.05
        max_tries = 10 / sleep_time  # Wait at most 10 seconds
        while max_tries > 0:
            done = True
            with mininode_lock:
                for c in self.connections:
                    if c.cb.verack_received is False:
                        done = False
                        break
            if done:
                break
            time.sleep(sleep_time)

    def wait_for_pings(self, counter):
        received_pongs = False
        while received_pongs is not True:
            time.sleep(0.05)
            received_pongs = True
            with mininode_lock:
                for c in self.connections:
                    if c.cb.received_ping_response(counter) is not True:
                        received_pongs = False
                        break

    # sync_blocks: Wait for all connections to request the blockhash given
    # then send get_headers to find out the tip of each node, and synchronize
    # the response by using a ping (and waiting for pong with same nonce).
    def sync_blocks(self, blockhash, num_blocks):
        # Wait for nodes to request block (50ms sleep * 20 tries * num_blocks)
        max_tries = 20 * num_blocks
        while max_tries > 0:
            with mininode_lock:
                results = [
                    blockhash in c.cb.block_request_map
                    and c.cb.block_request_map[blockhash]
                    for c in self.connections
                ]
            if False not in results:
                break
            time.sleep(0.05)
            max_tries -= 1

        # --> error if not requested
        if max_tries == 0:
            # print [ c.cb.block_request_map for c in self.connections ]
            raise AssertionError("Not all nodes requested block")
        # --> Answer request (we did this inline!)

        # Send getheaders message
        [c.cb.send_getheaders() for c in self.connections]

        # Send ping and wait for response -- synchronization hack
        [c.cb.send_ping(self.ping_counter) for c in self.connections]
        self.wait_for_pings(self.ping_counter)
        self.ping_counter += 1

    # Analogous to sync_block (see above)
    def sync_transaction(self, txhash, num_events):
        # Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
        max_tries = 20 * num_events
        while max_tries > 0:
            with mininode_lock:
                results = [
                    txhash in c.cb.tx_request_map
                    and c.cb.tx_request_map[txhash] for c in self.connections
                ]
            if False not in results:
                break
            time.sleep(0.05)
            max_tries -= 1

        # --> error if not requested
        if max_tries == 0:
            # print [ c.cb.tx_request_map for c in self.connections ]
            raise AssertionError("Not all nodes requested transaction")
        # --> Answer request (we did this inline!)

        # Get the mempool
        [c.cb.send_mempool() for c in self.connections]

        # Send ping and wait for response -- synchronization hack
        [c.cb.send_ping(self.ping_counter) for c in self.connections]
        self.wait_for_pings(self.ping_counter)
        self.ping_counter += 1

        # Sort inv responses from each node
        with mininode_lock:
            [c.cb.lastInv.sort() for c in self.connections]

    # Verify that the tip of each connection all agree with each other, and
    # with the expected outcome (if given)
    def check_results(self, blockhash, outcome):
        with mininode_lock:
            for c in self.connections:
                if outcome is None:
                    if c.cb.bestblockhash != self.connections[
                            0].cb.bestblockhash:
                        return False
                elif ((c.cb.bestblockhash == blockhash) != outcome):
                    # print c.cb.bestblockhash, blockhash, outcome
                    return False
            return True

    # Either check that the mempools all agree with each other, or that
    # txhash's presence in the mempool matches the outcome specified.
    # This is somewhat of a strange comparison, in that we're either comparing
    # a particular tx to an outcome, or the entire mempools altogether;
    # perhaps it would be useful to add the ability to check explicitly that
    # a particular tx's existence in the mempool is the same across all nodes.
    def check_mempool(self, txhash, outcome):
        with mininode_lock:
            for c in self.connections:
                if outcome is None:
                    # Make sure the mempools agree with each other
                    if c.cb.lastInv != self.connections[0].cb.lastInv:
                        # print c.rpc.getrawmempool()
                        return False
                elif ((txhash in c.cb.lastInv) != outcome):
                    # print c.rpc.getrawmempool(), c.cb.lastInv
                    return False
            return True

    def run(self):
        # Wait until verack is received
        self.wait_for_verack()

        test_number = 1
        for test_instance in self.test_generator.get_tests():
            # We use these variables to keep track of the last block
            # and last transaction in the tests, which are used
            # if we're not syncing on every block or every tx.
            [block, block_outcome] = [None, None]
            [tx, tx_outcome] = [None, None]
            invqueue = []

            for b_or_t, outcome in test_instance.blocks_and_transactions:
                # Determine if we're dealing with a block or tx
                if isinstance(b_or_t, CBlock):  # Block test runner
                    block = b_or_t
                    block_outcome = outcome
                    # Add to shared block_store, set as current block
                    with mininode_lock:
                        self.block_store.add_block(block)
                        for c in self.connections:
                            c.cb.block_request_map[block.sha256] = False
                    # Either send inv's to each node and sync, or add
                    # to invqueue for later inv'ing.
                    if (test_instance.sync_every_block):
                        [c.cb.send_inv(block) for c in self.connections]
                        self.sync_blocks(block.sha256, 1)
                        if (not self.check_results(block.sha256, outcome)):
                            raise AssertionError("Test failed at test %d" %
                                                 test_number)
                    else:
                        invqueue.append(CInv(2, block.sha256))
                else:  # Tx test runner
                    assert (isinstance(b_or_t, CTransaction))
                    tx = b_or_t
                    tx_outcome = outcome
                    # Add to shared tx store and clear map entry
                    with mininode_lock:
                        self.tx_store.add_transaction(tx)
                        for c in self.connections:
                            c.cb.tx_request_map[tx.sha256] = False
                    # Again, either inv to all nodes or save for later
                    if (test_instance.sync_every_tx):
                        [c.cb.send_inv(tx) for c in self.connections]
                        self.sync_transaction(tx.sha256, 1)
                        if (not self.check_mempool(tx.sha256, outcome)):
                            raise AssertionError("Test failed at test %d" %
                                                 test_number)
                    else:
                        invqueue.append(CInv(1, tx.sha256))
                # Ensure we're not overflowing the inv queue
                if len(invqueue) == MAX_INV_SZ:
                    [
                        c.send_message(msg_inv(invqueue))
                        for c in self.connections
                    ]
                    invqueue = []

            # Do final sync if we weren't syncing on every block or every tx.
            if (not test_instance.sync_every_block and block is not None):
                if len(invqueue) > 0:
                    [
                        c.send_message(msg_inv(invqueue))
                        for c in self.connections
                    ]
                    invqueue = []
                self.sync_blocks(block.sha256,
                                 len(test_instance.blocks_and_transactions))
                if (not self.check_results(block.sha256, block_outcome)):
                    raise AssertionError("Block test failed at test %d" %
                                         test_number)
            if (not test_instance.sync_every_tx and tx is not None):
                if len(invqueue) > 0:
                    [
                        c.send_message(msg_inv(invqueue))
                        for c in self.connections
                    ]
                    invqueue = []
                self.sync_transaction(
                    tx.sha256, len(test_instance.blocks_and_transactions))
                if (not self.check_mempool(tx.sha256, tx_outcome)):
                    raise AssertionError("Mempool test failed at test %d" %
                                         test_number)

            print "Test %d: PASS" % test_number, [
                c.rpc.getblockcount() for c in self.connections
            ]
            test_number += 1

        self.block_store.close()
        self.tx_store.close()
        [c.disconnect_node() for c in self.connections]
Beispiel #6
0
 def __init__(self):
   self.config = config.config()  
   self.meta = BlockStore(self.config['tinyimages']['metapath'], 768)
   self.data = BlockStore(self.config['tinyimages']['datapath'], 3072) 
   self.img_count = 79302017
Beispiel #7
0
class TinyImage:
  def __init__(self):
    self.config = config.config()  
    self.meta = BlockStore(self.config['tinyimages']['metapath'], 768)
    self.data = BlockStore(self.config['tinyimages']['datapath'], 3072) 
    self.img_count = 79302017
 
  #public functions
  def byid(self, ids):
    if isinstance(ids, int):
      return numpy.fromstring(self.data.byid(ids), dtype='uint8')    
    elif isinstance(ids, tuple):
      o = []
      for s in self.data.slice(ids[0], ids[1]):
        o.append(numpy.fromstring(s, dtype='uint8'))
      return o 
    else:
      o = []
      for i in ids:
        o.append(numpy.fromstring(self.data.byid(i), dtype='uint8'))  
      return o

  def display(self, items):
    import cStringIO as StringIO
    import base64
    from IPython.core.display import HTML
    output_html = ""
    for i in items:
      t = i.reshape(32,32,3, order="F").copy()
      img = scipy.misc.toimage(t) 
      output = StringIO.StringIO()
      img.save(output, format="PNG")
      output_html += '<img src="data:image/png;base64,%s"/>' % base64.b64encode(output.getvalue())
    return HTML(output_html) 
    
  def search(self, keyword, limit):
    (l, h) = self._logSearch(keyword)
    found = False
    found_count = 0
    o = []
    for i in range(l, h):
      curr_word = self._keywordFromMeta(i)
      if curr_word.lower() == keyword.lower():
        found = True
        o.append(i)
        found_count += 1
        if (found_count == limit):
          break
      else:
        if (found):
          break  
    return o

  def _keywordFromMeta(self, index):
    for s in self.meta.slice(index, index):
      return s[0:80].strip()

  def _logSearch(self, term):
    low = 0
    high = self.img_count
    for i in range(0, 9):
      curr_word = self._keywordFromMeta(int((low + high) / 2))
      cmp = _strcmp(curr_word.lower(), term.lower())
      if (cmp == 0):
        return (low, high)
      if (cmp == 1):
        high = ((low + high) / 2)
      if (cmp == -1):
        low = ((low + high) / 2)
    return (low, high)

  def subsets(self):
    return None
Beispiel #8
0
 def __init__(self):
     self.config = config.config()
     self.meta = BlockStore(self.config['tinyimages']['metapath'], 768)
     self.data = BlockStore(self.config['tinyimages']['datapath'], 3072)
     self.img_count = 79302017
Beispiel #9
0
class TinyImage:
    def __init__(self):
        self.config = config.config()
        self.meta = BlockStore(self.config['tinyimages']['metapath'], 768)
        self.data = BlockStore(self.config['tinyimages']['datapath'], 3072)
        self.img_count = 79302017

    #public functions
    def byid(self, ids):
        if isinstance(ids, int):
            return numpy.fromstring(self.data.byid(ids), dtype='uint8')
        elif isinstance(ids, tuple):
            o = []
            for s in self.data.slice(ids[0], ids[1]):
                o.append(numpy.fromstring(s, dtype='uint8'))
            return o
        else:
            o = []
            for i in ids:
                o.append(numpy.fromstring(self.data.byid(i), dtype='uint8'))
            return o

    def display(self, items):
        import cStringIO as StringIO
        import base64
        from IPython.core.display import HTML
        output_html = ""
        for i in items:
            t = i.reshape(32, 32, 3, order="F").copy()
            img = scipy.misc.toimage(t)
            output = StringIO.StringIO()
            img.save(output, format="PNG")
            output_html += '<img src="data:image/png;base64,%s"/>' % base64.b64encode(
                output.getvalue())
        return HTML(output_html)

    def search(self, keyword, limit):
        (l, h) = self._logSearch(keyword)
        found = False
        found_count = 0
        o = []
        for i in range(l, h):
            curr_word = self._keywordFromMeta(i)
            if curr_word.lower() == keyword.lower():
                found = True
                o.append(i)
                found_count += 1
                if (found_count == limit):
                    break
            else:
                if (found):
                    break
        return o

    def _keywordFromMeta(self, index):
        for s in self.meta.slice(index, index):
            return s[0:80].strip()

    def _logSearch(self, term):
        low = 0
        high = self.img_count
        for i in range(0, 9):
            curr_word = self._keywordFromMeta(int((low + high) / 2))
            cmp = _strcmp(curr_word.lower(), term.lower())
            if (cmp == 0):
                return (low, high)
            if (cmp == 1):
                high = ((low + high) / 2)
            if (cmp == -1):
                low = ((low + high) / 2)
        return (low, high)

    def subsets(self):
        return None