Exemple #1
0
    def _create_workers(self, row_event, worker_num):
        self.mech_driver.nb_ovn.idl.notify_handler.watch_event(row_event)
        worker_list = [self.mech_driver.nb_ovn]

        # Create 10 fake workers
        for _ in range(worker_num):
            node_uuid = uuidutils.generate_uuid()
            db_hash_ring.add_node(self.context, ovn_const.HASH_RING_ML2_GROUP,
                                  node_uuid)
            fake_driver = mock.MagicMock(
                node_uuid=node_uuid,
                hash_ring_group=ovn_const.HASH_RING_ML2_GROUP)
            _idl = ovsdb_monitor.OvnNbIdl.from_server(
                self.ovsdb_server_mgr.get_ovsdb_connection_path(),
                self.nb_api.schema_helper, fake_driver)
            worker = self.useFixture(
                fixtures.OVNIdlConnectionFixture(idl=_idl,
                                                 timeout=10)).connection
            worker.idl.notify_handler.watch_event(row_event)
            worker.start()
            worker_list.append(worker)

        # Refresh the hash rings just in case
        [worker.idl._hash_ring.refresh() for worker in worker_list]

        # Assert we have 11 active workers in the ring
        self.assertEqual(
            worker_num + 1,
            len(
                db_hash_ring.get_active_nodes(
                    self.context,
                    interval=ovn_const.HASH_RING_NODES_TIMEOUT,
                    group_name=ovn_const.HASH_RING_ML2_GROUP)))

        return worker_list
Exemple #2
0
    def test_ring_rebalance(self):
        # Use pre-defined UUIDs to make the hashes predictable
        node_1_uuid = db_hash_ring.add_node(self.admin_ctx,
                                            HASH_RING_TEST_GROUP, 'node-1')
        node_2_uuid = db_hash_ring.add_node(self.admin_ctx,
                                            HASH_RING_TEST_GROUP, 'node-2')

        # Add another node from a different host
        with mock.patch.object(db_hash_ring, 'CONF') as mock_conf:
            mock_conf.host = 'another-host-52359446-c366'
            another_host_node = db_hash_ring.add_node(self.admin_ctx,
                                                      HASH_RING_TEST_GROUP,
                                                      'another-host')

        # Assert all nodes are alive in the ring
        self.hash_ring_manager.refresh()
        self.assertEqual(3, len(self.hash_ring_manager._hash_ring.nodes))

        # Hash certain values against the nodes
        hash_dict_before = {
            node_1_uuid: 'fake-uuid',
            node_2_uuid: 'fake-uuid-0',
            another_host_node: 'fake-uuid-ABCDE'
        }
        self._verify_hashes(hash_dict_before)

        # Mock utcnow() as the HASH_RING_NODES_TIMEOUT have expired
        # already and touch the nodes from our host
        fake_utcnow = timeutils.utcnow() - datetime.timedelta(
            seconds=constants.HASH_RING_NODES_TIMEOUT)
        with mock.patch.object(timeutils, 'utcnow') as mock_utcnow:
            mock_utcnow.return_value = fake_utcnow
            db_hash_ring.touch_nodes_from_host(self.admin_ctx,
                                               HASH_RING_TEST_GROUP)

        # Now assert that the ring was re-balanced and only the node from
        # another host is marked as alive
        self.hash_ring_manager.refresh()
        self.assertEqual([another_host_node],
                         list(self.hash_ring_manager._hash_ring.nodes.keys()))

        # Now only "another_host_node" is alive, all values should hash to it
        hash_dict_after_rebalance = {
            another_host_node: 'fake-uuid',
            another_host_node: 'fake-uuid-0',
            another_host_node: 'fake-uuid-ABCDE'
        }
        self._verify_hashes(hash_dict_after_rebalance)

        # Now touch the nodes so they appear active again
        db_hash_ring.touch_nodes_from_host(self.admin_ctx,
                                           HASH_RING_TEST_GROUP)
        self.hash_ring_manager.refresh()

        # The ring should re-balance and as it was before
        self._verify_hashes(hash_dict_before)
Exemple #3
0
    def test_get_node(self):
        # Use pre-defined UUIDs to make the hashes predictable
        node_1_uuid = db_hash_ring.add_node(self.admin_ctx,
                                            HASH_RING_TEST_GROUP, 'node-1')
        node_2_uuid = db_hash_ring.add_node(self.admin_ctx,
                                            HASH_RING_TEST_GROUP, 'node-2')

        hash_dict_before = {
            node_1_uuid: 'fake-uuid',
            node_2_uuid: 'fake-uuid-0'
        }
        self._verify_hashes(hash_dict_before)
    def test_distributed_lock(self):
        api_workers = 11
        cfg.CONF.set_override('api_workers', api_workers)
        row_event = DistributedLockTestEvent()
        self.mech_driver._nb_ovn.idl.notify_handler.watch_event(row_event)
        worker_list = [
            self.mech_driver._nb_ovn,
        ]

        # Create 10 fake workers
        for _ in range(api_workers - len(worker_list)):
            node_uuid = uuidutils.generate_uuid()
            db_hash_ring.add_node(self.context, ovn_const.HASH_RING_ML2_GROUP,
                                  node_uuid)
            fake_driver = mock.MagicMock(
                node_uuid=node_uuid,
                hash_ring_group=ovn_const.HASH_RING_ML2_GROUP)
            _idl = ovsdb_monitor.OvnNbIdl.from_server(
                self.ovsdb_server_mgr.get_ovsdb_connection_path(),
                'OVN_Northbound', fake_driver)
            worker = self.useFixture(
                fixtures.OVNIdlConnectionFixture(idl=_idl,
                                                 timeout=10)).connection
            worker.idl.notify_handler.watch_event(row_event)
            worker.start()
            worker_list.append(worker)

        # Refresh the hash rings just in case
        [worker.idl._hash_ring.refresh() for worker in worker_list]

        # Assert we have 11 active workers in the ring
        self.assertEqual(
            11,
            len(
                db_hash_ring.get_active_nodes(
                    self.context,
                    interval=ovn_const.HASH_RING_NODES_TIMEOUT,
                    group_name=ovn_const.HASH_RING_ML2_GROUP)))

        # Trigger the event
        self.create_port()

        # Wait for the event to complete
        self.assertTrue(row_event.wait())

        # Assert that only one worker handled the event
        self.assertEqual(1, row_event.COUNTER)
Exemple #5
0
 def _add_nodes_and_assert_exists(self,
                                  count=1,
                                  group_name=HASH_RING_TEST_GROUP):
     nodes = []
     for i in range(count):
         node_uuid = ovn_hash_ring_db.add_node(self.admin_ctx, group_name)
         self.assertIsNotNone(self._get_node_row(node_uuid))
         nodes.append(node_uuid)
     return nodes
    def test__wait_startup_before_caching(self, api_workers):
        db_hash_ring.add_node(self.admin_ctx, HASH_RING_TEST_GROUP, 'node-1')

        # Assert it will return True until until we equal api_workers
        self.assertTrue(self.hash_ring_manager._wait_startup_before_caching)
        self.assertTrue(self.hash_ring_manager._check_hashring_startup)

        db_hash_ring.add_node(self.admin_ctx, HASH_RING_TEST_GROUP, 'node-2')

        # Assert it's now False. Waiting is not needed anymore
        self.assertFalse(self.hash_ring_manager._wait_startup_before_caching)
        self.assertFalse(self.hash_ring_manager._check_hashring_startup)

        # Now assert that since the _check_hashring_startup has been
        # flipped, we no longer will read from the database
        with mock.patch.object(hash_ring_manager.db_hash_ring,
                               'get_active_nodes') as get_nodes_mock:
            self.assertFalse(
                self.hash_ring_manager._wait_startup_before_caching)
            self.assertFalse(get_nodes_mock.called)
Exemple #7
0
    def test__wait_startup_before_caching(self):
        db_hash_ring.add_node(self.admin_ctx, HASH_RING_TEST_GROUP, 'node-1')
        db_hash_ring.add_node(self.admin_ctx, HASH_RING_TEST_GROUP, 'node-2')

        # Assert it will return True until created_at != updated_at
        self.assertTrue(self.hash_ring_manager._wait_startup_before_caching)
        self.assertTrue(self.hash_ring_manager._cache_startup_timeout)

        # Touch the nodes (== update the updated_at column)
        db_hash_ring.touch_nodes_from_host(self.admin_ctx,
                                           HASH_RING_TEST_GROUP)

        # Assert it's now False. Waiting is not needed anymore
        self.assertFalse(self.hash_ring_manager._wait_startup_before_caching)
        self.assertFalse(self.hash_ring_manager._cache_startup_timeout)

        # Now assert that since the _cache_startup_timeout has been
        # flipped, we no longer will read from the database
        with mock.patch.object(hash_ring_manager.db_hash_ring,
                               'get_active_nodes') as get_nodes_mock:
            self.assertFalse(
                self.hash_ring_manager._wait_startup_before_caching)
            self.assertFalse(get_nodes_mock.called)