Exemplo n.º 1
0
def test_work_local_single(local_work_plugin_factory, work_unit_factory):
    # Enqueue an easy PoW and make sure it is completed
    work_plugin = local_work_plugin_factory()
    work_plugin.add_work_units_to_solve(
        [work_unit_factory(account_id=ACCOUNT_ID)],
        network_difficulty=to_hex(10000, 16))
    work_units = work_plugin.work_units

    wait_for(lambda: count_solved_units(work_units) == 1, timeout=1)

    assert count_pending_units(work_units) == 0

    # Worker thread should stop soon after the work is finished
    wait_for(lambda: len([
        thread.name for thread in threading.enumerate()
        if thread.name.startswith("work_local_worker")
    ]) == 0,
             timeout=1)

    # Get the completed work
    completed_work_unit = next(work_unit for work_unit in work_units.values()
                               if work_unit.solved)

    assert completed_work_unit.account_id == ACCOUNT_ID
    assert completed_work_unit.difficulty == to_hex(10000, 16)
    assert get_work_value(block_hash=completed_work_unit.work_block_hash,
                          work=completed_work_unit.work) > 10000
Exemplo n.º 2
0
    def test_invalid_signature(self, mock_nanovault_node,
                               mock_nanovault_ws_node,
                               nanovault_ws_node_network_plugin):
        # If the node returns an invalid signature, the network plugin
        # will abort and shut itself down
        mock_nanovault_ws_node.add_replay_datasets(["invalid_signature"
                                                    ]).start()
        network_plugin = nanovault_ws_node_network_plugin

        # Start watching a single account
        network_plugin.account_sync_statuses["xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"] \
            = AccountSyncStatus(
                account_id="xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"
            )
        sync_status = network_plugin.account_sync_statuses[
            "xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"]

        wait_for(lambda: network_plugin.connection_status.aborted, timeout=2)
        wait_for(lambda: not network_plugin.started, timeout=2)

        assert not sync_status.network_head
        assert not network_plugin.connected
        assert not network_plugin.started
        assert isinstance(network_plugin.connection_status.error,
                          InvalidSignature)
Exemplo n.º 3
0
def test_work_local_impossible(local_work_plugin_factory, work_unit_factory):
    # Enqueue an impossible PoW and shutdown the work server while it is
    # running
    work_plugin = local_work_plugin_factory(threads=2)
    work_plugin.add_work_units_to_solve([work_unit_factory()],
                                        network_difficulty=to_hex((2**64) - 1,
                                                                  16))
    work_units = work_plugin.work_units

    with pytest.raises(TimeoutError):
        wait_for(lambda: count_solved_units(work_units) == 1, timeout=1)

    # Ensure that there are two active worker threads
    assert len([
        thread.name for thread in threading.enumerate()
        if thread.name.startswith("work_local_worker")
    ]) == 2

    assert count_solved_units(work_units) == 0

    # Shutdown the work server
    work_plugin.stop()

    # No threads should be left alive after stopping the work server
    assert len([
        thread.name for thread in threading.enumerate()
        if thread.name.startswith("work_local_worker")
    ]) == 0
Exemplo n.º 4
0
    def test_pocketable_blocks(self, mock_nanovault_node,
                               mock_nanovault_ws_node,
                               nanovault_ws_node_network_plugin):
        mock_nanovault_node.add_replay_datasets(
            ["nanovault_version", "ws_watching"]).start()
        mock_nanovault_ws_node.add_replay_datasets(["pocketable"]).start()
        network_plugin = nanovault_ws_node_network_plugin

        # Start watching a single account
        network_plugin.account_sync_statuses["xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"] \
            = AccountSyncStatus(
                account_id="xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"
            )

        wait_for(lambda: network_plugin.pocketable_block_queue.qsize() == 1,
                 timeout=1)

        link_block = network_plugin.pocketable_block_queue.get()

        assert link_block.block_hash == "82CDDC385108D25E520B9CB2C7CB539CDF2FD5C9C3D7F4992AB6E18D0135B85F"
        assert link_block.amount == 1000000000000000000000000000000
        assert link_block.recipient == "xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"

        assert link_block.timestamp.source == TimestampSource.BROADCAST
        assert int(link_block.timestamp.date.timestamp()) > time.time() - 10
    def test_filter_fields(self):
        opman, _, _ = self.get_oplog_thread()
        docman = opman.doc_managers[0]
        conn = opman.main_connection

        include_fields = ["a", "b", "c"]
        exclude_fields = ["d", "e", "f"]

        # Set fields to care about
        opman.fields = include_fields
        # Documents have more than just these fields
        doc = {
            "a": 1, "b": 2, "c": 3,
            "d": 4, "e": 5, "f": 6,
            "_id": 1
        }
        db = conn['test']['test']
        db.insert(doc)
        wait_for(lambda: db.count() == 1)
        opman.dump_collection()

        result = docman._search()[0]
        keys = result.keys()
        for inc, exc in zip(include_fields, exclude_fields):
            self.assertIn(inc, keys)
            self.assertNotIn(exc, keys)
Exemplo n.º 6
0
    def test_many_targets(self):
        """Test that one OplogThread is capable of replicating to more than
        one target.
        """

        opman, primary_conn, oplog_coll = self.get_oplog_thread()
        doc_managers = [DocManager(), DocManager(), DocManager()]
        opman.doc_managers = doc_managers

        # start replicating
        opman.start()
        primary_conn["test"]["test"].insert({
            "name": "kermit",
            "color": "green"
        })
        primary_conn["test"]["test"].insert({
            "name": "elmo",
            "color": "firetruck red"
        })

        self.assertTrue(
            wait_for(lambda: sum(len(d._search()) for d in doc_managers) == 6),
            "OplogThread should be able to replicate to multiple targets")

        primary_conn["test"]["test"].remove({"name": "elmo"})

        self.assertTrue(
            wait_for(lambda: sum(len(d._search()) for d in doc_managers) == 3),
            "OplogThread should be able to replicate to multiple targets")
        for d in doc_managers:
            self.assertEqual(d._search()[0]["name"], "kermit")

        # cleanup
        opman.join()
Exemplo n.º 7
0
    def test_sync_legacy_watching(self, mock_node, node_network_plugin):
        # Synchronize an account blockchain consisting only of legacy blocks
        mock_node.add_replay_datasets(
            ["legacy_watching", "active_difficulty", "version"]).start()
        network_plugin = node_network_plugin

        network_plugin.account_sync_statuses["xrb_3rropjiqfxpmrrkooej4qtmm1pueu36f9ghinpho4esfdor8785a455d16nf"] \
            = AccountSyncStatus(
                account_id="xrb_3rropjiqfxpmrrkooej4qtmm1pueu36f9ghinpho4esfdor8785a455d16nf"
            )

        # Wait until the account has finished syncing
        sync_status = network_plugin.account_sync_statuses[
            "xrb_3rropjiqfxpmrrkooej4qtmm1pueu36f9ghinpho4esfdor8785a455d16nf"]
        wait_for(lambda: sync_status.sync_complete, timeout=2)

        assert network_plugin.processed_block_queue.qsize() == 5

        block_results = [
            network_plugin.processed_block_queue.get_nowait()
            for _ in range(0, 5)
        ]
        blocks = [result.block for result in block_results]
        block_a, block_b, block_c, block_d, block_e = blocks

        # Ensure blocks are in the correct order and have the corresponding
        # link blocks
        assert block_a.block_hash == "088EE46429CA936F76C4EAA20B97F6D33E5D872971433EE0C1311BCB98764456"
        assert block_b.block_hash == "13552AC3928E93B5C6C215F61879358E248D4A5246B8B3D1EEC5A566EDCEE077"
        assert block_c.block_hash == "D6E1921FA6B341EE1D3EC36F31AF3B7B73EE17F82CA80B76002EDBA30B82B447"
        assert block_d.block_hash == "94EA9E9DC69B7634560B56B21EF47A04C7ADC7CF80BB911267A9D7C824EEB83C"
        assert block_e.block_hash == "BCDEF4D74B0D93231B1C6CFDBA21DC189CFF4D69BE8FAC07278968FE0BC09FFC"

        assert block_a.source == "E749404912F8C239E2F413B7C604E5732F428C9DEC4BA649AEBB54AC964EBFA4"
        assert block_a.source == block_a.link_block.block_hash

        assert not block_b.source
        assert not block_b.link_block

        assert block_c.source == "786E621F133DDC9DA97808CEF006499845D3ED660C0630BCC7B21FE313F869F8"
        assert block_c.source == block_c.link_block.block_hash

        assert block_d.source == "548E61BAF6CF07E418324D2D08DAB0FC710681837E94C30242E14C97169AB529"
        assert block_d.source == block_d.link_block.block_hash

        assert not block_e.source

        # Ensure the other queues are empty
        assert network_plugin.pocketable_block_queue.empty()
        assert network_plugin.broadcast_block_queue.empty()

        sync_status = network_plugin.account_sync_statuses[
            "xrb_3rropjiqfxpmrrkooej4qtmm1pueu36f9ghinpho4esfdor8785a455d16nf"]

        assert sync_status.sync_complete
        assert sync_status.network_head == block_e.block_hash
        assert not sync_status.wallet_head
        assert not sync_status.ready_to_pocket
Exemplo n.º 8
0
    def test_remove(self):
        """Tests remove
        """

        self.conn['test']['test'].insert({'name': 'paulie'}, safe=True)
        wait_for(lambda: sum(1 for _ in self.mongo_doc._search()) == 1)
        self.conn['test']['test'].remove({'name': 'paulie'}, safe=True)
        wait_for(lambda: sum(1 for _ in self.mongo_doc._search()) != 1)
        self.assertEqual(sum(1 for _ in self.mongo_doc._search()), 0)
Exemplo n.º 9
0
    def test_rollback(self):
        """Tests rollback. We force a rollback by adding a doc, killing the
            primary, adding another doc, killing the new primary, and then
            restarting both.
        """
        primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY']))
        self.conn['test']['test'].insert({'name': 'paul'}, safe=True)
        condition = lambda: self.conn['test']['test'].find_one(
            {'name': 'paul'}) is not None
        wait_for(condition)
        wait_for(lambda: sum(1 for _ in self.mongo_doc._search()) == 1)

        kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY'])

        new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))

        admin = new_primary_conn['admin']
        condition = lambda: admin.command("isMaster")['ismaster']
        wait_for(condition)

        time.sleep(5)
        count = 0
        while True:
            try:
                result_set_1 = self.conn['test']['test'].insert(
                    {'name': 'pauline'}, safe=True)
                break
            except OperationFailure:
                time.sleep(1)
                count += 1
                if count >= 60:
                    sys.exit(1)
                continue
        wait_for(lambda: sum(1 for _ in self.mongo_doc._search()) == 2)
        result_set_1 = list(self.mongo_doc._search())
        result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
        self.assertEqual(len(result_set_1), 2)
        #make sure pauline is there
        for item in result_set_1:
            if item['name'] == 'pauline':
                self.assertEqual(item['_id'], result_set_2['_id'])
        kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY'])

        start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a",
                         "/replset1a.log", None)
        wait_for(lambda: primary_conn['admin'].command("isMaster")['ismaster'])

        start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b",
                         "/replset1b.log", None)

        time.sleep(2)
        result_set_1 = list(self.mongo_doc._search())
        self.assertEqual(len(result_set_1), 1)
        for item in result_set_1:
            self.assertEqual(item['name'], 'paul')
        find_cursor = retry_until_ok(self.conn['test']['test'].find)
        self.assertEqual(retry_until_ok(find_cursor.count), 1)
Exemplo n.º 10
0
    def test_stressed_rollback(self):
        """Test stressed rollback with number of documents equal to specified
            in global variable. Strategy for rollback is the same as before.
        """

        for i in range(0, NUMBER_OF_DOC_DIRS):
            self.conn['test']['test'].insert({'name': 'Paul ' + str(i)},
                                             safe=True)

        search = self.mongo_doc._search
        condition = lambda: sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS
        wait_for(condition)
        primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY']))
        kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY'])

        new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))

        admin = new_primary_conn['admin']
        wait_for(lambda: admin.command("isMaster")['ismaster'])

        time.sleep(5)
        count = -1
        while count + 1 < NUMBER_OF_DOC_DIRS:
            try:
                count += 1
                self.conn['test']['test'].insert(
                    {'name': 'Pauline ' + str(count)}, safe=True)
            except (OperationFailure, AutoReconnect):
                time.sleep(1)
        wait_for(lambda: sum(1 for _ in self.mongo_doc._search()) == self.conn[
            'test']['test'].find().count())
        result_set_1 = self.mongo_doc._search()
        for item in result_set_1:
            if 'Pauline' in item['name']:
                result_set_2 = self.conn['test']['test'].find_one(
                    {'name': item['name']})
                self.assertEqual(item['_id'], result_set_2['_id'])

        kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY'])

        start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a",
                         "/replset1a.log", None)
        db_admin = primary_conn['admin']
        wait_for(lambda: db_admin.command("isMaster")['ismaster'])
        start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b",
                         "/replset1b.log", None)

        search = self.mongo_doc._search
        condition = lambda: sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS
        wait_for(condition)

        result_set_1 = list(self.mongo_doc._search())
        self.assertEqual(len(result_set_1), NUMBER_OF_DOC_DIRS)
        for item in result_set_1:
            self.assertTrue('Paul' in item['name'])
        find_cursor = retry_until_ok(self.conn['test']['test'].find)
        self.assertEqual(retry_until_ok(find_cursor.count), NUMBER_OF_DOC_DIRS)
Exemplo n.º 11
0
    def test_remove(self):
        """Tests remove
        """

        self.conn['test']['test'].insert({'name': 'paulie'}, safe=True)
        wait_for(lambda : sum(1 for _ in self.mongo_doc._search()) == 1)
        self.conn['test']['test'].remove({'name': 'paulie'}, safe=True)
        wait_for(lambda : sum(1 for _ in self.mongo_doc._search()) != 1)
        self.assertEqual(sum(1 for _ in self.mongo_doc._search()), 0)
Exemplo n.º 12
0
    def test_rollback(self):
        """Tests rollback. We force a rollback by adding a doc, killing the
            primary, adding another doc, killing the new primary, and then
            restarting both.
        """
        primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY']))
        self.conn['test']['test'].insert({'name': 'paul'}, safe=True)
        condition = lambda : self.conn['test']['test'].find_one(
            {'name': 'paul'}) is not None
        wait_for(condition)
        wait_for(lambda : sum(1 for _ in self.mongo_doc._search()) == 1)

        kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY'])

        new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))

        admin = new_primary_conn['admin']
        condition = lambda : admin.command("isMaster")['ismaster']
        wait_for(condition)

        time.sleep(5)
        count = 0
        while True:
            try:
                result_set_1 = self.conn['test']['test'].insert(
                    {'name': 'pauline'}, safe=True)
                break
            except OperationFailure:
                time.sleep(1)
                count += 1
                if count >= 60:
                    sys.exit(1)
                continue
        wait_for(lambda : sum(1 for _ in self.mongo_doc._search()) == 2)
        result_set_1 = list(self.mongo_doc._search())
        result_set_2 = self.conn['test']['test'].find_one({'name': 'pauline'})
        self.assertEqual(len(result_set_1), 2)
        #make sure pauline is there
        for item in result_set_1:
            if item['name'] == 'pauline':
                self.assertEqual(item['_id'], result_set_2['_id'])
        kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY'])

        start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a",
                       "/replset1a.log", None)
        wait_for(lambda : primary_conn['admin'].command("isMaster")['ismaster'])

        start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b",
                       "/replset1b.log", None)

        time.sleep(2)
        result_set_1 = list(self.mongo_doc._search())
        self.assertEqual(len(result_set_1), 1)
        for item in result_set_1:
            self.assertEqual(item['name'], 'paul')
        find_cursor = retry_until_ok(self.conn['test']['test'].find)
        self.assertEqual(retry_until_ok(find_cursor.count), 1)
Exemplo n.º 13
0
    def test_stressed_rollback(self):
        """Test stressed rollback with number of documents equal to specified
            in global variable. Strategy for rollback is the same as before.
        """

        for i in range(0, NUMBER_OF_DOC_DIRS):
            self.conn['test']['test'].insert({'name': 'Paul ' + str(i)},
                safe=True)

        search = self.mongo_doc._search
        condition = lambda : sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS
        wait_for(condition)
        primary_conn = Connection(HOSTNAME, int(PORTS_ONE['PRIMARY']))
        kill_mongo_proc(HOSTNAME, PORTS_ONE['PRIMARY'])

        new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))

        admin = new_primary_conn['admin']
        wait_for(lambda : admin.command("isMaster")['ismaster'])

        time.sleep(5)
        count = -1
        while count + 1 < NUMBER_OF_DOC_DIRS:
            try:
                count += 1
                self.conn['test']['test'].insert({'name': 'Pauline ' +
                    str(count)}, safe=True)
            except (OperationFailure, AutoReconnect):
                time.sleep(1)
        wait_for(lambda : sum(1 for _ in self.mongo_doc._search())
                 == self.conn['test']['test'].find().count())
        result_set_1 = self.mongo_doc._search()
        for item in result_set_1:
            if 'Pauline' in item['name']:
                result_set_2 = self.conn['test']['test'].find_one(
                    {'name': item['name']})
                self.assertEqual(item['_id'], result_set_2['_id'])

        kill_mongo_proc(HOSTNAME, PORTS_ONE['SECONDARY'])

        start_mongo_proc(PORTS_ONE['PRIMARY'], "demo-repl", "/replset1a",
                       "/replset1a.log", None)
        db_admin = primary_conn['admin']
        wait_for(lambda : db_admin.command("isMaster")['ismaster'])
        start_mongo_proc(PORTS_ONE['SECONDARY'], "demo-repl", "/replset1b",
                       "/replset1b.log", None)

        search = self.mongo_doc._search
        condition = lambda : sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS
        wait_for(condition)

        result_set_1 = list(self.mongo_doc._search())
        self.assertEqual(len(result_set_1), NUMBER_OF_DOC_DIRS)
        for item in result_set_1:
            self.assertTrue('Paul' in item['name'])
        find_cursor = retry_until_ok(self.conn['test']['test'].find)
        self.assertEqual(retry_until_ok(find_cursor.count), NUMBER_OF_DOC_DIRS)
Exemplo n.º 14
0
    def test_single_target(self):
        """Test with a single replication target"""

        self.opman.start()

        # Insert first document with primary up
        self.main_conn["test"]["mc"].insert({"i": 0})
        self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)

        # Make sure the insert is replicated
        secondary = self.secondary_conn
        self.assertTrue(wait_for(lambda: secondary["test"]["mc"].count() == 1),
                        "first write didn't replicate to secondary")

        # Kill the primary
        kill_mongo_proc(self.primary_p, destroy=False)

        # Wait for the secondary to be promoted
        while not secondary["admin"].command("isMaster")["ismaster"]:
            time.sleep(1)

        # Insert another document. This will be rolled back later
        retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1})
        self.assertEqual(secondary["test"]["mc"].count(), 2)

        # Wait for replication to doc manager
        c = lambda: len(self.opman.doc_managers[0]._search()) == 2
        self.assertTrue(wait_for(c),
                        "not all writes were replicated to doc manager")

        # Kill the new primary
        kill_mongo_proc(self.secondary_p, destroy=False)

        # Start both servers back up
        restart_mongo_proc(self.primary_p)
        primary_admin = self.primary_conn["admin"]
        while not primary_admin.command("isMaster")["ismaster"]:
            time.sleep(1)
        restart_mongo_proc(self.secondary_p)
        while secondary["admin"].command("replSetGetStatus")["myState"] != 2:
            time.sleep(1)
        while retry_until_ok(self.main_conn["test"]["mc"].find().count) == 0:
            time.sleep(1)

        # Only first document should exist in MongoDB
        self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
        self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)

        # Same case should hold for the doc manager
        doc_manager = self.opman.doc_managers[0]
        self.assertEqual(len(doc_manager._search()), 1)
        self.assertEqual(doc_manager._search()[0]["i"], 0)

        # cleanup
        self.opman.join()
Exemplo n.º 15
0
    def test_old_version(self, mock_nanovault_node, nanovault_network_plugin):
        # This test is almost identical to the same test in
        # test_nano_node.py.
        mock_nanovault_node.add_replay_datasets(["nanovault_old_version"
                                                 ]).start()
        network_plugin = nanovault_network_plugin

        wait_for(lambda: network_plugin.connection_status.aborted, timeout=1)

        assert isinstance(network_plugin.connection_status.error,
                          UnsupportedProtocolVersion)
Exemplo n.º 16
0
    def test_active_difficulty(self, mock_node, mock_ws_node,
                               ws_node_network_plugin):
        # HTTP server won't broadcast the difficulty
        mock_node.add_replay_datasets(["legacy_watching", "version"]).start()

        # WebSocket server will broadcast the difficulty
        mock_ws_node.add_replay_datasets(["active_difficulty"]).start()
        network_plugin = ws_node_network_plugin

        wait_for(lambda: network_plugin.work_difficulty == "0000000088664422",
                 timeout=1)
Exemplo n.º 17
0
    def test_insert(self):
        """Tests insert
        """

        self.conn['test']['test'].insert({'name': 'paulie'}, safe=True)
        wait_for(lambda : sum(1 for _ in self.mongo_doc._search()) == 1)
        result_set_1 = self.mongo_doc._search()
        self.assertEqual(sum(1 for _ in result_set_1), 1)
        result_set_2 = self.conn['test']['test'].find_one()
        for item in result_set_1:
            self.assertEqual(item['_id'], result_set_2['_id'])
            self.assertEqual(item['name'], result_set_2['name'])
Exemplo n.º 18
0
    def test_insert(self):
        """Tests insert
        """

        self.conn['test']['test'].insert({'name': 'paulie'}, safe=True)
        wait_for(lambda: sum(1 for _ in self.mongo_doc._search()) == 1)
        result_set_1 = self.mongo_doc._search()
        self.assertEqual(sum(1 for _ in result_set_1), 1)
        result_set_2 = self.conn['test']['test'].find_one()
        for item in result_set_1:
            self.assertEqual(item['_id'], result_set_2['_id'])
            self.assertEqual(item['name'], result_set_2['name'])
Exemplo n.º 19
0
    def test_insert(self):
        """Tests insert
        """

        self.conn['test']['test'].insert({'name': 'paulie'}, safe=True)
        wait_for(lambda : sum(1 for _ in self.elastic_doc._search()) > 0)
        result_set_1 = list(self.elastic_doc._search())
        self.assertEqual(len(result_set_1), 1)
        result_set_2 = self.conn['test']['test'].find_one()
        for item in result_set_1:
            self.assertEqual(item['_id'], str(result_set_2['_id']))
            self.assertEqual(item['name'], result_set_2['name'])
Exemplo n.º 20
0
    def test_insert(self):
        """Tests insert
        """

        self.conn['test']['test'].insert({'name': 'paulie'}, safe=True)
        wait_for(lambda: sum(1 for _ in self.elastic_doc._search()) > 0)
        result_set_1 = list(self.elastic_doc._search())
        self.assertEqual(len(result_set_1), 1)
        result_set_2 = self.conn['test']['test'].find_one()
        for item in result_set_1:
            self.assertEqual(item['_id'], str(result_set_2['_id']))
            self.assertEqual(item['name'], result_set_2['name'])
Exemplo n.º 21
0
 def setUp(self):
     if not self.flag:
         self.fail("Shards cannot be added to mongos")
     self.connector = Connector("%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]),
        CONFIG, '%s:30000' % (HOSTNAME),
        ['test.test'],
        '_id', None,
        'mongo_connector/doc_managers/mongo_doc_manager.py')
     self.connector.start()
     while len(self.connector.shard_set) == 0:
         pass
     self.conn['test']['test'].remove(safe=True)
     wait_for(lambda : sum(1 for _ in self.mongo_doc._search()) == 0)
Exemplo n.º 22
0
    def test_broadcast_block_failure_rejected(self, mock_node,
                                              node_network_plugin,
                                              replay_dataset, expected_error):
        # Sync account with two blocks and broadcast the third block
        # while simulating failure
        mock_node.add_replay_datasets(
            [replay_dataset, "active_difficulty", "version"]).reload()
        network_plugin = node_network_plugin

        network_plugin.account_sync_statuses["xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"] \
            = AccountSyncStatus(
                account_id="xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"
            )

        # For 'timeout' test case, cause a timeout in 1 second to make
        # the test case shorter
        network_plugin.rpc_processor.CONFIRMATION_TIMEOUT_SECONDS = 1

        # Wait until the account has finished syncing
        sync_status = network_plugin.account_sync_statuses[
            "xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"]
        wait_for(lambda: sync_status.sync_complete, timeout=2)

        assert network_plugin.processed_block_queue.qsize() == 2

        # Broadcast the third block
        block = Block(block=RawBlock.from_dict({
            "account":
            "xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc",
            "balance": "0",
            "link":
            "0000000000000000000000000000000000000000000000000000000000000000",
            "previous":
            "96970559D7257F63ACB8383ED57CB510745DE744ABCEC4DC41590CE7E32179EA",
            "representative":
            "xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc",
            "signature":
            "BBC27F177C2C2DD574AE8EB8523A1A504B5790C65C95ACE744E3033A63BB158DDF95F9B7B88B902C8D743A64EA25785662CD454044E9C3000BC79C3BF7F1E809",
            "type": "state",
            "work": "561bab16393cb3c4"
        }))

        network_plugin.broadcast_block_queue.put(block)

        wait_for(network_plugin.broadcast_block_queue.empty, timeout=2)
        wait_for(lambda: sync_status.sync_complete, timeout=2)
        wait_for(lambda: network_plugin.processed_block_queue.qsize() == 3,
                 timeout=2)

        _, _, block = [
            network_plugin.processed_block_queue.get_nowait()
            for _ in range(0, 3)
        ]

        assert block.rejected
        assert not block.confirmed
        assert block.error == expected_error

        assert block.block_hash == \
            "62EE070DA06632FE1E54BA32FD25B00A5FD4E8CF09354A9B176CBF6BC33CDBDB"
Exemplo n.º 23
0
    def test_non_standard_fields(self):
        """ Tests ObjectIds, DBrefs, etc
        """
        # This test can break if it attempts to insert before the dump takes
        # place- this prevents it (other tests affected too actually)
        while (self.connector.shard_set['demo-repl'].checkpoint is None):
            time.sleep(1)
        docs = [{
            'foo': [1, 2]
        }, {
            'bar': {
                'hello': 'world'
            }
        }, {
            'code': Code("function x() { return 1; }")
        }, {
            'dbref': {
                '_ref': DBRef('simple', ObjectId('509b8db456c02c5ab7e63c34'))
            }
        }]
        try:
            self.conn['test']['test'].insert(docs)
        except OperationFailure:
            self.fail("Cannot insert documents into Elastic!")

        search = self.elastic_doc._search
        if not wait_for(lambda: sum(1 for _ in search()) == len(docs)):
            self.fail("Did not get all expected documents")
        self.assertIn("dbref", self.elastic_doc.get_last_doc())
Exemplo n.º 24
0
 def setUp(self):
     if not self.flag:
         self.fail("Shards cannot be added to mongos")
     self.connector = Connector(
         address="%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]),
         oplog_checkpoint=CONFIG,
         target_url='%s:30000' % (HOSTNAME),
         ns_set=['test.test'],
         u_key='_id',
         auth_key=None,
         doc_manager='mongo_connector/doc_managers/mongo_doc_manager.py')
     self.connector.start()
     while len(self.connector.shard_set) == 0:
         pass
     self.conn['test']['test'].remove(safe=True)
     wait_for(lambda: sum(1 for _ in self.mongo_doc._search()) == 0)
Exemplo n.º 25
0
    def test_stress(self):
        """Test stress by inserting and removing the number of documents
            specified in global
            variable
        """

        for i in range(0, NUMBER_OF_DOC_DIRS):
            self.conn['test']['test'].insert({'name': 'Paul ' + str(i)})
        time.sleep(5)
        search = self.mongo_doc._search
        condition = lambda: sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS
        wait_for(condition)
        for i in range(0, NUMBER_OF_DOC_DIRS):
            result_set_1 = self.mongo_doc._search()
            for item in result_set_1:
                if (item['name'] == 'Paul' + str(i)):
                    self.assertEqual(item['_id'], item['_id'])
Exemplo n.º 26
0
    def test_stress(self):
        """Test stress by inserting and removing the number of documents
            specified in global
            variable
        """

        for i in range(0, NUMBER_OF_DOC_DIRS):
            self.conn['test']['test'].insert({'name': 'Paul ' + str(i)})
        time.sleep(5)
        search = self.mongo_doc._search
        condition = lambda : sum(1 for _ in search()) == NUMBER_OF_DOC_DIRS
        wait_for(condition)
        for i in range(0, NUMBER_OF_DOC_DIRS):
            result_set_1 = self.mongo_doc._search()
            for item in result_set_1:
                if(item['name'] == 'Paul' + str(i)):
                    self.assertEqual(item['_id'], item['_id'])
Exemplo n.º 27
0
 def setUp(self):
     if not self.flag:
         self.fail("Shards cannot be added to mongos")
     self.connector = Connector(
         address="%s:%s" % (HOSTNAME, PORTS_ONE["MONGOS"]),
         oplog_checkpoint=CONFIG,
         target_url='%s:30000' % (HOSTNAME),
         ns_set=['test.test'],
         u_key='_id',
         auth_key=None,
         doc_manager='mongo_connector/doc_managers/mongo_doc_manager.py'
     )
     self.connector.start()
     while len(self.connector.shard_set) == 0:
         pass
     self.conn['test']['test'].remove(safe=True)
     wait_for(lambda : sum(1 for _ in self.mongo_doc._search()) == 0)
Exemplo n.º 28
0
def test_work_local_multiple(local_work_plugin_factory, work_unit_factory):
    # Enqueue multiple PoW jobs and ensure they're all completed
    account_ids = [
        get_account_id(private_key=generate_seed()) for _ in range(0, 100)
    ]
    work_plugin = local_work_plugin_factory(threads=8)
    work_plugin.add_work_units_to_solve([
        work_unit_factory(account_id=account_id) for account_id in account_ids
    ],
                                        network_difficulty=to_hex(10000, 16))
    work_units = work_plugin.work_units

    # The underlying queue is a set, so the exact amount of results
    # should be 100 even if more valid PoWs were found
    wait_for(lambda: count_solved_units(work_units) == 100, timeout=5)

    for work_unit in work_units.values():
        account_ids.remove(work_unit.account_id)
Exemplo n.º 29
0
 def setUp(self):
     """ Starts a new connector for every test
     """
     if not self.flag:
         self.fail("Shards cannot be added to mongos")
     self.connector = Connector(
         address='%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']),
         oplog_checkpoint=CONFIG,
         target_url='localhost:9200',
         ns_set=['test.test'],
         u_key='_id',
         auth_key=None,
         doc_manager='mongo_connector/doc_managers/elastic_doc_manager.py')
     self.connector.start()
     while len(self.connector.shard_set) == 0:
         pass
     self.conn['test']['test'].remove(safe=True)
     wait_for(lambda: sum(1 for _ in self.elastic_doc._search()) == 0)
Exemplo n.º 30
0
    def test_sync_state_watching_incomplete(self, mock_node,
                                            node_network_plugin):
        # Synchronize an account blockchain consisting only of state blocks and
        # in which the last block is still unconfirmed
        mock_node.add_replay_datasets(
            ["state_watching_incomplete", "active_difficulty",
             "version"]).start()
        network_plugin = node_network_plugin

        network_plugin.account_sync_statuses["xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"] \
            = AccountSyncStatus(
                account_id="xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"
            )

        # Wait until the account has finished syncing
        sync_status = network_plugin.account_sync_statuses[
            "xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"]
        wait_for(
            lambda: sync_status.network_head ==
            "96970559D7257F63ACB8383ED57CB510745DE744ABCEC4DC41590CE7E32179EA",
            timeout=2)

        # Sync is not complete as long as there unconfirmed blocks in the
        # blockchain
        assert not sync_status.sync_complete
        assert network_plugin.processed_block_queue.qsize() == 2

        block_results = [
            network_plugin.processed_block_queue.get_nowait()
            for _ in range(0, 2)
        ]
        blocks = [result.block for result in block_results]
        block_a, block_b = blocks

        assert block_a.block_hash == "E82CDC903E33AC80C2ACB6F3608FE9CFBDF610F11308F10CDDD8F6347F1CE058"
        assert block_b.block_hash == "96970559D7257F63ACB8383ED57CB510745DE744ABCEC4DC41590CE7E32179EA"

        assert block_a.link == "82CDDC385108D25E520B9CB2C7CB539CDF2FD5C9C3D7F4992AB6E18D0135B85F"
        assert block_a.link == block_a.link_block.block_hash

        assert block_b.link == "7EB064709F33C1E93541D48CBCE68C833FF105D1F3F7A9EF6B34CB23A849AB53"
        # Link blocks are retrieved for receive blocks, not send blocks
        assert not block_b.link_block
Exemplo n.º 31
0
 def setUp(self):
     """ Starts a new connector for every test
     """
     if not self.flag:
         self.fail("Shards cannot be added to mongos")
     self.connector = Connector(
         address='%s:%s' % (HOSTNAME, PORTS_ONE['MONGOS']),
         oplog_checkpoint=CONFIG,
         target_url='localhost:9200',
         ns_set=['test.test'],
         u_key='_id',
         auth_key=None,
         doc_manager='mongo_connector/doc_managers/elastic_doc_manager.py'
     )
     self.connector.start()
     while len(self.connector.shard_set) == 0:
         pass
     self.conn['test']['test'].remove(safe=True)
     wait_for(lambda : sum(1 for _ in self.elastic_doc._search()) == 0)
Exemplo n.º 32
0
    def test_sync_empty_watching(self, mock_node, node_network_plugin):
        """
        Synchronize account blockchain when no blocks for the account
        exist yet
        """
        mock_node.add_replay_datasets(
            ["empty_watching", "active_difficulty", "version"]).start()
        network_plugin = node_network_plugin

        network_plugin.account_sync_statuses["xrb_15n1wthxc5ndjnoufdfe8m4z5j973o6trzwbfys4cu4gtju5mh4xc918fout"] \
            = AccountSyncStatus(
                account_id="xrb_15n1wthxc5ndjnoufdfe8m4z5j973o6trzwbfys4cu4gtju5mh4xc918fout"
            )

        # Wait until the account has finished syncing
        sync_status = network_plugin.account_sync_statuses[
            "xrb_15n1wthxc5ndjnoufdfe8m4z5j973o6trzwbfys4cu4gtju5mh4xc918fout"]
        wait_for(lambda: sync_status.sync_complete, timeout=1)

        assert not sync_status.network_head
Exemplo n.º 33
0
    def test_nested_fields(self):
        """Test indexing fields that are sub-documents in MongoDB

        The following fields are defined in the provided schema.xml:

        <field name="person.address.street" type="string" ... />
        <field name="person.address.state" type="string" ... />
        <dynamicField name="numbers.*" type="string" ... />
        <dynamicField name="characters.*" type="string" ... />

        """

        self.solr_conn.delete(q='*:*')

        # Connector is already running
        self.conn["test"]["test"].insert({
            "name": "Jeb",
            "billing": {
                "address": {
                    "street": "12345 Mariposa Street",
                    "state": "California"
                }
            }
        })
        self.conn["test"]["test"].insert({
            "numbers": ["one", "two", "three"],
            "characters": [{
                "name": "Big Bird",
                "color": "yellow"
            }, {
                "name": "Elmo",
                "color": "red"
            }, "Cookie Monster"]
        })

        self.assertTrue(
            wait_for(lambda: len(self.solr_conn.search("*:*")) > 0),
            "documents should have been replicated to Solr")

        # Search for first document
        results = self.solr_conn.search(
            "billing.address.street:12345\ Mariposa\ Street")
        self.assertEqual(len(results), 1)
        self.assertEqual(
            next(iter(results))["billing.address.state"], "California")

        # Search for second document
        results = self.solr_conn.search("characters.1.color:red")
        self.assertEqual(len(results), 1)
        self.assertEqual(next(iter(results))["numbers.2"], "three")
        results = self.solr_conn.search("characters.2:Cookie\ Monster")
        self.assertEqual(len(results), 1)
Exemplo n.º 34
0
    def test_nested_fields(self):
        """Test indexing fields that are sub-documents in MongoDB

        The following fields are defined in the provided schema.xml:

        <field name="person.address.street" type="string" ... />
        <field name="person.address.state" type="string" ... />
        <dynamicField name="numbers.*" type="string" ... />
        <dynamicField name="characters.*" type="string" ... />

        """

        self.solr_conn.delete(q='*:*')

        # Connector is already running
        self.conn["test"]["test"].insert({
            "name": "Jeb",
            "billing": {
                "address": {
                    "street": "12345 Mariposa Street",
                    "state": "California"
                }
            }
        })
        self.conn["test"]["test"].insert({
            "numbers": ["one", "two", "three"],
            "characters": [
                {"name": "Big Bird",
                 "color": "yellow"},
                {"name": "Elmo",
                 "color": "red"},
                "Cookie Monster"
            ]
        })

        self.assertTrue(wait_for(lambda: len(self.solr_conn.search("*:*")) > 0),
                        "documents should have been replicated to Solr")

        # Search for first document
        results = self.solr_conn.search(
            "billing.address.street:12345\ Mariposa\ Street")
        self.assertEqual(len(results), 1)
        self.assertEqual(next(iter(results))["billing.address.state"],
                         "California")

        # Search for second document
        results = self.solr_conn.search(
            "characters.1.color:red")
        self.assertEqual(len(results), 1)
        self.assertEqual(next(iter(results))["numbers.2"], "three")
        results = self.solr_conn.search("characters.2:Cookie\ Monster")
        self.assertEqual(len(results), 1)
Exemplo n.º 35
0
    def test_filter_fields(self):
        opman, _, _ = self.get_oplog_thread()
        docman = opman.doc_managers[0]
        conn = opman.main_connection

        include_fields = ["a", "b", "c"]
        exclude_fields = ["d", "e", "f"]

        # Set fields to care about
        opman.fields = include_fields
        # Documents have more than just these fields
        doc = {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6, "_id": 1}
        db = conn['test']['test']
        db.insert(doc)
        wait_for(lambda: db.count() == 1)
        opman.dump_collection()

        result = docman._search()[0]
        keys = result.keys()
        for inc, exc in zip(include_fields, exclude_fields):
            self.assertIn(inc, keys)
            self.assertNotIn(exc, keys)
Exemplo n.º 36
0
    def test_update_pocketable_blocks(self, mock_node, node_network_plugin):
        # Sync account blockchain with two blocks and make sure two
        # pending blocks are found
        mock_node.add_replay_datasets(
            ["state_pocketable_pending", "active_difficulty",
             "version"]).reload()
        network_plugin = node_network_plugin

        network_plugin.account_sync_statuses["xrb_1w77aapnijnm5mo16r3xtpqu7n459r61fqpcdt3kxfmz8gtqgzbozswxmduy"] \
            = AccountSyncStatus(
                account_id="xrb_1w77aapnijnm5mo16r3xtpqu7n459r61fqpcdt3kxfmz8gtqgzbozswxmduy"
            )

        # Wait until the account has finished syncing
        sync_status = network_plugin.account_sync_statuses[
            "xrb_1w77aapnijnm5mo16r3xtpqu7n459r61fqpcdt3kxfmz8gtqgzbozswxmduy"]
        wait_for(lambda: sync_status.sync_complete, timeout=2)
        wait_for(lambda: network_plugin.pocketable_block_queue.qsize() == 2,
                 timeout=2)

        blocks = [
            network_plugin.pocketable_block_queue.get_nowait()
            for _ in range(0, 2)
        ]
        block_a = next(
            block for block in blocks if block.block_hash ==
            "C4BDB10778120F6748959EF7312C443BD0BFF4FE97F02A20B874694EB03DE0D0")
        block_b = next(
            block for block in blocks if block.block_hash ==
            "D96D72919D6EEAA9B82FE8046EBE70E20886E4D6C3BA218AD5340DD63AEE09C6")

        assert block_a.account_id == "xrb_3qbettndeemurhst593izae7j6x746bdzktmr16qfh9oa9uizxnuxkxgaiqd"
        assert block_a.amount == 1000000000000000000000000000000
        assert block_a.link_as_account == "xrb_1w77aapnijnm5mo16r3xtpqu7n459r61fqpcdt3kxfmz8gtqgzbozswxmduy"

        assert block_b.account_id == "xrb_36gijoeijuazu7d9urtxm1jqgejw43bi3tcfjx4i98q9mxuqhrsjs5dk1d9i"
        assert block_b.amount == 1000000000000000000000000000
        assert block_b.link_as_account == "xrb_1w77aapnijnm5mo16r3xtpqu7n459r61fqpcdt3kxfmz8gtqgzbozswxmduy"
Exemplo n.º 37
0
    def test_sync_invalid_signature(self, mock_node, node_network_plugin):
        # If the node returns an invalid signature, the network plugin
        # will abort and shut itself down
        mock_node.add_replay_datasets([
            "legacy_watching_invalid_signature", "active_difficulty", "version"
        ]).reload()
        network_plugin = node_network_plugin

        network_plugin.account_sync_statuses["xrb_3rropjiqfxpmrrkooej4qtmm1pueu36f9ghinpho4esfdor8785a455d16nf"] \
            = AccountSyncStatus(
                account_id="xrb_3rropjiqfxpmrrkooej4qtmm1pueu36f9ghinpho4esfdor8785a455d16nf"
            )

        sync_status = network_plugin.account_sync_statuses[
            "xrb_3rropjiqfxpmrrkooej4qtmm1pueu36f9ghinpho4esfdor8785a455d16nf"]

        wait_for(lambda: network_plugin.connection_status.aborted, timeout=2)

        assert not sync_status.network_head
        assert not network_plugin.connected
        assert not network_plugin.started
        assert isinstance(network_plugin.connection_status.error,
                          InvalidSignature)
Exemplo n.º 38
0
    def test_many_targets(self):
        """Test that one OplogThread is capable of replicating to more than
        one target.
        """

        opman, primary_conn, oplog_coll = self.get_oplog_thread()
        doc_managers = [DocManager(), DocManager(), DocManager()]
        opman.doc_managers = doc_managers

        # start replicating
        opman.start()
        primary_conn["test"]["test"].insert({
            "name": "kermit",
            "color": "green"
        })
        primary_conn["test"]["test"].insert({
            "name": "elmo",
            "color": "firetruck red"
        })

        self.assertTrue(
            wait_for(lambda: sum(len(d._search()) for d in doc_managers) == 6),
            "OplogThread should be able to replicate to multiple targets"
        )

        primary_conn["test"]["test"].remove({"name": "elmo"})

        self.assertTrue(
            wait_for(lambda: sum(len(d._search()) for d in doc_managers) == 3),
            "OplogThread should be able to replicate to multiple targets"
        )
        for d in doc_managers:
            self.assertEqual(d._search()[0]["name"], "kermit")

        # cleanup
        opman.join()
Exemplo n.º 39
0
    def test_broadcast_block_duplicate(self, mock_node, node_network_plugin):
        # Try broadcasting a block that is already confirmed
        # The network plugin should silently ignore the resulting error
        mock_node.add_replay_datasets(
            ["state_broadcast_duplicate", "active_difficulty",
             "version"]).reload()
        network_plugin = node_network_plugin

        network_plugin.account_sync_statuses["xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"] \
            = AccountSyncStatus(
                account_id="xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"
            )

        # Wait until the account has finished syncing
        sync_status = network_plugin.account_sync_statuses[
            "xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"]
        wait_for(lambda: sync_status.sync_complete, timeout=2)

        assert network_plugin.processed_block_queue.qsize() == 2

        # Broadcast the third already confirmed block
        block = Block(block=RawBlock.from_dict({
            "account":
            "xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc",
            "balance": "0",
            "link":
            "0000000000000000000000000000000000000000000000000000000000000000",
            "previous":
            "96970559D7257F63ACB8383ED57CB510745DE744ABCEC4DC41590CE7E32179EA",
            "representative":
            "xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc",
            "signature":
            "BBC27F177C2C2DD574AE8EB8523A1A504B5790C65C95ACE744E3033A63BB158DDF95F9B7B88B902C8D743A64EA25785662CD454044E9C3000BC79C3BF7F1E809",
            "type": "state",
            "work": "561bab16393cb3c4"
        }))

        network_plugin.broadcast_block_queue.put(block)

        wait_for(network_plugin.broadcast_block_queue.empty, timeout=2)
        wait_for(lambda: sync_status.sync_complete, timeout=2)

        assert network_plugin.processed_block_queue.qsize() == 2

        _, block = [
            network_plugin.processed_block_queue.get_nowait()
            for _ in range(0, 2)
        ]

        assert block.confirmed
        assert not block.rejected

        assert block.block_hash == \
            "96970559D7257F63ACB8383ED57CB510745DE744ABCEC4DC41590CE7E32179EA"
Exemplo n.º 40
0
    def test_dynamic_fields(self):
        """ Tests dynamic field definitions

        The following fields are supplied in the provided schema.xml:
        <dynamicField name="*_i" type="int" indexed="true" stored="true"/>
        <dynamicField name="i_*" type="int" indexed="true" stored="true"/>

        Cases:
        1. Match on first definition
        2. Match on second definition
        3. No match
        """
        self.solr_conn.delete(q='*:*')

        match_first = {"_id": 0, "foo_i": 100}
        match_second = {"_id": 1, "i_foo": 200}
        match_none = {"_id": 2, "foo": 300}

        # Connector is already running
        self.conn["test"]["test"].insert(match_first)
        self.conn["test"]["test"].insert(match_second)
        self.conn["test"]["test"].insert(match_none)

        # Should have documents in Solr now
        self.assertTrue(
            wait_for(lambda: len(self.solr_conn.search("*:*")) > 0),
            "Solr doc manager should allow dynamic fields")

        # foo_i and i_foo should be indexed, foo field should not exist
        self.assertEqual(len(self.solr_conn.search("foo_i:100")), 1)
        self.assertEqual(len(self.solr_conn.search("i_foo:200")), 1)

        # SolrError: "undefined field foo"
        logger = logging.getLogger("pysolr")
        logger.error("You should see an ERROR log message from pysolr here. "
                     "This indicates success, not an error in the test.")
        with self.assertRaises(SolrError):
            self.solr_conn.search("foo:300")
Exemplo n.º 41
0
    def test_non_standard_fields(self):
        """ Tests ObjectIds, DBrefs, etc
        """
        # This test can break if it attempts to insert before the dump takes
        # place- this prevents it (other tests affected too actually)
        while (self.connector.shard_set['demo-repl'].checkpoint is None):
            time.sleep(1)
        docs = [
            {'foo': [1, 2]},
            {'bar': {'hello': 'world'}},
            {'code': Code("function x() { return 1; }")},
            {'dbref': {'_ref': DBRef('simple',
                ObjectId('509b8db456c02c5ab7e63c34'))}}
        ]
        try:
            self.conn['test']['test'].insert(docs)
        except OperationFailure:
            self.fail("Cannot insert documents into Elastic!")

        search = self.elastic_doc._search
        if not wait_for(lambda : sum(1 for _ in search()) == len(docs)):
            self.fail("Did not get all expected documents")
        self.assertIn("dbref", self.elastic_doc.get_last_doc())
Exemplo n.º 42
0
    def test_dynamic_fields(self):
        """ Tests dynamic field definitions

        The following fields are supplied in the provided schema.xml:
        <dynamicField name="*_i" type="int" indexed="true" stored="true"/>
        <dynamicField name="i_*" type="int" indexed="true" stored="true"/>

        Cases:
        1. Match on first definition
        2. Match on second definition
        3. No match
        """
        self.solr_conn.delete(q='*:*')

        match_first = {"_id": 0, "foo_i": 100}
        match_second = {"_id": 1, "i_foo": 200}
        match_none = {"_id": 2, "foo": 300}

        # Connector is already running
        self.conn["test"]["test"].insert(match_first)
        self.conn["test"]["test"].insert(match_second)
        self.conn["test"]["test"].insert(match_none)

        # Should have documents in Solr now
        self.assertTrue(wait_for(lambda: len(self.solr_conn.search("*:*")) > 0),
                        "Solr doc manager should allow dynamic fields")

        # foo_i and i_foo should be indexed, foo field should not exist
        self.assertEqual(len(self.solr_conn.search("foo_i:100")), 1)
        self.assertEqual(len(self.solr_conn.search("i_foo:200")), 1)

        # SolrError: "undefined field foo"
        logger = logging.getLogger("pysolr")
        logger.error("You should see an ERROR log message from pysolr here. "
                     "This indicates success, not an error in the test.")
        with self.assertRaises(SolrError):
            self.solr_conn.search("foo:300")
Exemplo n.º 43
0
    def test_namespace_mapping(self):
        """Test mapping of namespaces
        Cases:

        upsert/delete/update of documents:
        1. in namespace set, mapping provided
        2. outside of namespace set, mapping provided
        """

        source_ns = ["test.test1", "test.test2"]
        phony_ns = ["test.phony1", "test.phony2"]
        dest_mapping = {"test.test1": "test.test1_dest",
                        "test.test2": "test.test2_dest"}
        test_oplog, primary_conn, oplog_coll = self.get_oplog_thread()
        docman = test_oplog.doc_managers[0]
        test_oplog.dest_mapping = dest_mapping
        test_oplog.namespace_set = source_ns
        # start replicating
        test_oplog.start()

        base_doc = {"_id": 1, "name": "superman"}

        # doc in namespace set
        for ns in source_ns:
            db, coll = ns.split(".", 1)

            # test insert
            primary_conn[db][coll].insert(base_doc)

            wait_for(lambda: len(docman._search()) == 1)
            self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns])
            bad = [d for d in docman._search() if d["ns"] == ns]
            self.assertEqual(len(bad), 0)

            # test update
            primary_conn[db][coll].update(
                {"_id": 1},
                {"$set": {"weakness": "kryptonite"}}
            )

            def update_complete():
                docs = docman._search()
                for d in docs:
                    if d.get("weakness") == "kryptonite":
                        return True
                    return False
            wait_for(update_complete)
            self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns])
            bad = [d for d in docman._search() if d["ns"] == ns]
            self.assertEqual(len(bad), 0)

            # test delete
            primary_conn[db][coll].remove({"_id": 1})
            wait_for(lambda: len(docman._search()) == 0)
            bad = [d for d in docman._search() if d["ns"] == dest_mapping[ns]]
            self.assertEqual(len(bad), 0)

            # cleanup
            primary_conn[db][coll].remove()
            test_oplog.doc_managers[0]._delete()

        # doc not in namespace set
        for ns in phony_ns:
            db, coll = ns.split(".", 1)

            # test insert
            primary_conn[db][coll].insert(base_doc)
            time.sleep(1)
            self.assertEqual(len(docman._search()), 0)
            # test update
            primary_conn[db][coll].update(
                {"_id": 1},
                {"$set": {"weakness": "kryptonite"}}
            )
            time.sleep(1)
            self.assertEqual(len(docman._search()), 0)
            # note: nothing to test for delete

        # cleanup
        test_oplog.join()
Exemplo n.º 44
0
    def test_namespace_mapping(self):
        """Test mapping of namespaces
        Cases:

        upsert/delete/update of documents:
        1. in namespace set, mapping provided
        2. outside of namespace set, mapping provided
        """

        source_ns = ["test.test1", "test.test2"]
        phony_ns = ["test.phony1", "test.phony2"]
        dest_mapping = {
            "test.test1": "test.test1_dest",
            "test.test2": "test.test2_dest"
        }
        test_oplog, primary_conn, oplog_coll = self.get_oplog_thread()
        docman = test_oplog.doc_managers[0]
        test_oplog.dest_mapping = dest_mapping
        test_oplog.namespace_set = source_ns
        # start replicating
        test_oplog.start()

        base_doc = {"_id": 1, "name": "superman"}

        # doc in namespace set
        for ns in source_ns:
            db, coll = ns.split(".", 1)

            # test insert
            primary_conn[db][coll].insert(base_doc)

            wait_for(lambda: len(docman._search()) == 1)
            self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns])
            bad = [d for d in docman._search() if d["ns"] == ns]
            self.assertEqual(len(bad), 0)

            # test update
            primary_conn[db][coll].update({"_id": 1},
                                          {"$set": {
                                              "weakness": "kryptonite"
                                          }})

            def update_complete():
                docs = docman._search()
                for d in docs:
                    if d.get("weakness") == "kryptonite":
                        return True
                    return False

            wait_for(update_complete)
            self.assertEqual(docman._search()[0]["ns"], dest_mapping[ns])
            bad = [d for d in docman._search() if d["ns"] == ns]
            self.assertEqual(len(bad), 0)

            # test delete
            primary_conn[db][coll].remove({"_id": 1})
            wait_for(lambda: len(docman._search()) == 0)
            bad = [d for d in docman._search() if d["ns"] == dest_mapping[ns]]
            self.assertEqual(len(bad), 0)

            # cleanup
            primary_conn[db][coll].remove()
            test_oplog.doc_managers[0]._delete()

        # doc not in namespace set
        for ns in phony_ns:
            db, coll = ns.split(".", 1)

            # test insert
            primary_conn[db][coll].insert(base_doc)
            time.sleep(1)
            self.assertEqual(len(docman._search()), 0)
            # test update
            primary_conn[db][coll].update({"_id": 1},
                                          {"$set": {
                                              "weakness": "kryptonite"
                                          }})
            time.sleep(1)
            self.assertEqual(len(docman._search()), 0)
            # note: nothing to test for delete

        # cleanup
        test_oplog.join()
Exemplo n.º 45
0
    def test_confirmed_blocks(self, mock_nanovault_node,
                              mock_nanovault_ws_node,
                              nanovault_ws_node_network_plugin):
        mock_nanovault_node.add_replay_datasets(
            ["nanovault_version", "ws_watching"]).start()
        mock_nanovault_ws_node.add_replay_datasets(["watching"]).start()
        network_plugin = nanovault_ws_node_network_plugin

        # Start watching a single account
        network_plugin.account_sync_statuses["xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"] \
            = AccountSyncStatus(
                account_id="xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"
            )
        sync_status_a = network_plugin.account_sync_statuses[
            "xrb_1nanoftwk6741wmdznzangwm8prq95spu3zntb5gwpjdk8qd3p8eu5bxoehc"]

        wait_for(
            lambda: sync_status_a.network_head ==
            "E82CDC903E33AC80C2ACB6F3608FE9CFBDF610F11308F10CDDD8F6347F1CE058",
            timeout=1)

        # Add another account to watch. Network server should update
        # the subscription options accordingly.
        network_plugin.account_sync_statuses["xrb_3sjmihcotq4ofuufmw6tr6pmgy698gg8wggsrzpyebkrp1i6g6igfrzfhpkw"] \
            = AccountSyncStatus(
                account_id="xrb_3sjmihcotq4ofuufmw6tr6pmgy698gg8wggsrzpyebkrp1i6g6igfrzfhpkw"
            )
        sync_status_b = network_plugin.account_sync_statuses[
            "xrb_3sjmihcotq4ofuufmw6tr6pmgy698gg8wggsrzpyebkrp1i6g6igfrzfhpkw"]

        wait_for(
            lambda: sync_status_a.network_head ==
            "96970559D7257F63ACB8383ED57CB510745DE744ABCEC4DC41590CE7E32179EA",
            timeout=1)
        wait_for(
            lambda: sync_status_b.network_head ==
            "19A49CD5E8AA7C84E0C656ADA7FDF16FEE340A1D815825939F972F1BBB3358FF",
            timeout=1)

        # Check that the blocks have correct timestamps
        block_results = [
            network_plugin.processed_block_queue.get_nowait()
            for _ in range(0, 3)
        ]
        blocks = [result.block for result in block_results]
        block_a, block_b, block_c = blocks

        assert block_a.block_hash == "E82CDC903E33AC80C2ACB6F3608FE9CFBDF610F11308F10CDDD8F6347F1CE058"
        assert block_b.block_hash == "19A49CD5E8AA7C84E0C656ADA7FDF16FEE340A1D815825939F972F1BBB3358FF"
        assert block_c.block_hash == "96970559D7257F63ACB8383ED57CB510745DE744ABCEC4DC41590CE7E32179EA"

        assert block_a.timestamp.source == TimestampSource.BROADCAST
        assert int(block_a.timestamp.date.timestamp()) > time.time() - 10
        assert block_a.link_block.block_hash == "82CDDC385108D25E520B9CB2C7CB539CDF2FD5C9C3D7F4992AB6E18D0135B85F"

        assert block_b.timestamp.source == TimestampSource.BROADCAST
        assert int(block_b.timestamp.date.timestamp()) > time.time() - 10
        assert block_b.link_block.block_hash == "B92DC6098D6105CDBCC3A6DE45A31451578CF4322759A4A9DD5C54043090F3B1"

        assert block_c.timestamp.source == TimestampSource.BROADCAST
        assert int(block_c.timestamp.date.timestamp()) > time.time() - 10
        assert not block_c.link_block
Exemplo n.º 46
0
    def test_single_target(self):
        """Test with a single replication target"""

        self.opman.start()

        # Insert first document with primary up
        self.main_conn["test"]["mc"].insert({"i": 0})
        self.assertEqual(self.primary_conn["test"]["mc"].find().count(), 1)

        # Make sure the insert is replicated
        secondary = self.secondary_conn
        self.assertTrue(wait_for(lambda: secondary["test"]["mc"].count() == 1),
                        "first write didn't replicate to secondary")

        # Kill the primary
        kill_mongo_proc("localhost", PORTS_ONE["PRIMARY"])

        # Wait for the secondary to be promoted
        while not secondary["admin"].command("isMaster")["ismaster"]:
            time.sleep(1)

        # Insert another document. This will be rolled back later
        retry_until_ok(self.main_conn["test"]["mc"].insert, {"i": 1})
        self.assertEqual(secondary["test"]["mc"].count(), 2)

        # Wait for replication to doc manager
        c = lambda: len(self.opman.doc_managers[0]._search()) == 2
        self.assertTrue(wait_for(c),
                        "not all writes were replicated to doc manager")

        # Kill the new primary
        kill_mongo_proc("localhost", PORTS_ONE["SECONDARY"])

        # Start both servers back up
        start_mongo_proc(port=PORTS_ONE['PRIMARY'],
                         repl_set_name="demo-repl",
                         data="/replset1a",
                         log="/replset1a.log",
                         key_file=None)
        primary_admin = self.primary_conn["admin"]
        while not primary_admin.command("isMaster")["ismaster"]:
            time.sleep(1)
        start_mongo_proc(port=PORTS_ONE['SECONDARY'],
                         repl_set_name="demo-repl",
                         data="/replset1b",
                         log="/replset1b.log",
                         key_file=None)
        while secondary["admin"].command("replSetGetStatus")["myState"] != 2:
            time.sleep(1)
        while retry_until_ok(self.main_conn["test"]["mc"].find().count) == 0:
            time.sleep(1)

        # Only first document should exist in MongoDB
        self.assertEqual(self.main_conn["test"]["mc"].count(), 1)
        self.assertEqual(self.main_conn["test"]["mc"].find_one()["i"], 0)

        # Same case should hold for the doc manager
        doc_manager = self.opman.doc_managers[0]
        self.assertEqual(len(doc_manager._search()), 1)
        self.assertEqual(doc_manager._search()[0]["i"], 0)

        # cleanup
        self.opman.join()
Exemplo n.º 47
0
    def test_many_targets(self):
        """Test with several replication targets"""

        # OplogThread has multiple doc managers
        doc_managers = [DocManager(), DocManager(), DocManager()]
        self.opman.doc_managers = doc_managers

        self.opman.start()

        # Insert a document into each namespace
        self.main_conn["test"]["mc"].insert({"i": 0})
        self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)

        # Make sure the insert is replicated
        secondary = self.secondary_conn
        self.assertTrue(wait_for(lambda: secondary["test"]["mc"].count() == 1),
                        "first write didn't replicate to secondary")

        # Kill the primary
        kill_mongo_proc(self.primary_p, destroy=False)

        # Wait for the secondary to be promoted
        while not secondary["admin"].command("isMaster")["ismaster"]:
            time.sleep(1)

        # Insert more documents. This will be rolled back later
        # Some of these documents will be manually removed from
        # certain doc managers, to emulate the effect of certain
        # target systems being ahead/behind others
        secondary_ids = []
        for i in range(1, 10):
            secondary_ids.append(
                retry_until_ok(self.main_conn["test"]["mc"].insert,
                               {"i": i}))
        self.assertEqual(self.secondary_conn["test"]["mc"].count(), 10)

        # Wait for replication to the doc managers
        def docmans_done():
            for dm in self.opman.doc_managers:
                if len(dm._search()) != 10:
                    return False
            return True
        self.assertTrue(wait_for(docmans_done),
                        "not all writes were replicated to doc managers")

        # Remove some documents from the doc managers to simulate
        # uneven replication
        for id in secondary_ids[8:]:
            self.opman.doc_managers[1].remove({"_id": id})
        for id in secondary_ids[2:]:
            self.opman.doc_managers[2].remove({"_id": id})

        # Kill the new primary
        kill_mongo_proc(self.secondary_p, destroy=False)

        # Start both servers back up
        restart_mongo_proc(self.primary_p)
        primary_admin = self.primary_conn["admin"]
        while not primary_admin.command("isMaster")["ismaster"]:
            time.sleep(1)
        restart_mongo_proc(self.secondary_p)
        while retry_until_ok(secondary["admin"].command,
                             "replSetGetStatus")["myState"] != 2:
            time.sleep(1)
        while retry_until_ok(self.primary_conn["test"]["mc"].find().count) == 0:
            time.sleep(1)

        # Only first document should exist in MongoDB
        self.assertEqual(self.primary_conn["test"]["mc"].count(), 1)
        self.assertEqual(self.primary_conn["test"]["mc"].find_one()["i"], 0)

        # Give OplogThread some time to catch up
        time.sleep(10)

        # Same case should hold for the doc managers
        for dm in self.opman.doc_managers:
            self.assertEqual(len(dm._search()), 1)
            self.assertEqual(dm._search()[0]["i"], 0)

        self.opman.join()