Exemple #1
0
    def test_active_nodes_different_groups(self):
        another_group = 'another_test_group'
        self._add_nodes_and_assert_exists(count=3)
        self._add_nodes_and_assert_exists(count=2, group_name=another_group)

        active_nodes = ovn_hash_ring_db.get_active_nodes(
            self.admin_ctx, interval=60, group_name=HASH_RING_TEST_GROUP)
        self.assertEqual(3, len(active_nodes))
        for node in active_nodes:
            self.assertEqual(HASH_RING_TEST_GROUP, node.group_name)

        active_nodes = ovn_hash_ring_db.get_active_nodes(
            self.admin_ctx, interval=60, group_name=another_group)
        self.assertEqual(2, len(active_nodes))
        for node in active_nodes:
            self.assertEqual(another_group, node.group_name)
Exemple #2
0
    def _create_workers(self, row_event, worker_num):
        self.mech_driver.nb_ovn.idl.notify_handler.watch_event(row_event)
        worker_list = [self.mech_driver.nb_ovn]

        # Create 10 fake workers
        for _ in range(worker_num):
            node_uuid = uuidutils.generate_uuid()
            db_hash_ring.add_node(self.context, ovn_const.HASH_RING_ML2_GROUP,
                                  node_uuid)
            fake_driver = mock.MagicMock(
                node_uuid=node_uuid,
                hash_ring_group=ovn_const.HASH_RING_ML2_GROUP)
            _idl = ovsdb_monitor.OvnNbIdl.from_server(
                self.ovsdb_server_mgr.get_ovsdb_connection_path(),
                self.nb_api.schema_helper, fake_driver)
            worker = self.useFixture(
                fixtures.OVNIdlConnectionFixture(idl=_idl,
                                                 timeout=10)).connection
            worker.idl.notify_handler.watch_event(row_event)
            worker.start()
            worker_list.append(worker)

        # Refresh the hash rings just in case
        [worker.idl._hash_ring.refresh() for worker in worker_list]

        # Assert we have 11 active workers in the ring
        self.assertEqual(
            worker_num + 1,
            len(
                db_hash_ring.get_active_nodes(
                    self.context,
                    interval=ovn_const.HASH_RING_NODES_TIMEOUT,
                    group_name=ovn_const.HASH_RING_ML2_GROUP)))

        return worker_list
    def _wait_startup_before_caching(self):
        # NOTE(lucasagomes): Some events are processed at the service's
        # startup time and since many services may be started concurrently
        # we do not want to use a cached hash ring at that point. This
        # method checks if the created_at and updated_at columns from the
        # nodes in the ring from this host is equal, and if so it means
        # that the service just started.

        # If the startup timeout already expired, there's no reason to
        # keep reading from the DB. At this point this will always
        # return False
        if not self._cache_startup_timeout:
            return False

        nodes = db_hash_ring.get_active_nodes(
            self.admin_ctx,
            constants.HASH_RING_CACHE_TIMEOUT, self._group, from_host=True)
        # created_at and updated_at differ in microseonds so we compare their
        # difference is less than a second to be safe on slow machines
        dont_cache = nodes and (
            nodes[0].updated_at - nodes[0].created_at < datetime.timedelta(
                seconds=1))
        if not dont_cache:
            self._cache_startup_timeout = False

        return dont_cache
    def test_distributed_lock(self):
        api_workers = 11
        cfg.CONF.set_override('api_workers', api_workers)
        row_event = DistributedLockTestEvent()
        self.mech_driver._nb_ovn.idl.notify_handler.watch_event(row_event)
        worker_list = [
            self.mech_driver._nb_ovn,
        ]

        # Create 10 fake workers
        for _ in range(api_workers - len(worker_list)):
            node_uuid = uuidutils.generate_uuid()
            db_hash_ring.add_node(self.context, ovn_const.HASH_RING_ML2_GROUP,
                                  node_uuid)
            fake_driver = mock.MagicMock(
                node_uuid=node_uuid,
                hash_ring_group=ovn_const.HASH_RING_ML2_GROUP)
            _idl = ovsdb_monitor.OvnNbIdl.from_server(
                self.ovsdb_server_mgr.get_ovsdb_connection_path(),
                'OVN_Northbound', fake_driver)
            worker = self.useFixture(
                fixtures.OVNIdlConnectionFixture(idl=_idl,
                                                 timeout=10)).connection
            worker.idl.notify_handler.watch_event(row_event)
            worker.start()
            worker_list.append(worker)

        # Refresh the hash rings just in case
        [worker.idl._hash_ring.refresh() for worker in worker_list]

        # Assert we have 11 active workers in the ring
        self.assertEqual(
            11,
            len(
                db_hash_ring.get_active_nodes(
                    self.context,
                    interval=ovn_const.HASH_RING_NODES_TIMEOUT,
                    group_name=ovn_const.HASH_RING_ML2_GROUP)))

        # Trigger the event
        self.create_port()

        # Wait for the event to complete
        self.assertTrue(row_event.wait())

        # Assert that only one worker handled the event
        self.assertEqual(1, row_event.COUNTER)
Exemple #5
0
    def test_active_nodes_from_host(self):
        self._add_nodes_and_assert_exists(count=3)

        # Add another node from a different host
        another_host_id = 'another-host-52359446-c366'
        with mock.patch.object(ovn_hash_ring_db, 'CONF') as mock_conf:
            mock_conf.host = another_host_id
            self._add_nodes_and_assert_exists()

        # Assert only the 3 nodes from this host is returned
        active_nodes = ovn_hash_ring_db.get_active_nodes(
            self.admin_ctx,
            interval=60,
            group_name=HASH_RING_TEST_GROUP,
            from_host=True)
        self.assertEqual(3, len(active_nodes))
        self.assertNotIn(another_host_id, active_nodes)
Exemple #6
0
    def _load_hash_ring(self, refresh=False):
        cache_timeout = timeutils.utcnow() - datetime.timedelta(
            seconds=constants.HASH_RING_CACHE_TIMEOUT)

        # Refresh the cache if:
        # - Refreshed is forced (refresh=True)
        # - Service just started (_wait_startup_before_caching)
        # - Hash Ring is not yet instantiated
        # - Cache has timed out
        if (refresh or self._wait_startup_before_caching
                or self._hash_ring is None or not self._hash_ring.nodes
                or cache_timeout >= self._last_time_loaded):
            nodes = db_hash_ring.get_active_nodes(
                self.admin_ctx, constants.HASH_RING_NODES_TIMEOUT, self._group)
            self._hash_ring = hashring.HashRing(
                {node.node_uuid
                 for node in nodes})
            self._last_time_loaded = timeutils.utcnow()
Exemple #7
0
    def _wait_startup_before_caching(self):
        # NOTE(lucasagomes): Some events are processed at the service's
        # startup time and since many services may be started concurrently
        # we do not want to use a cached hash ring at that point. This
        # method ensures that we start allowing the use of cached HashRings
        # once the number of HashRing nodes >= the number of api workers.

        # If the startup timeout already expired, there's no reason to
        # keep reading from the DB. At this point this will always
        # return False
        if not self._check_hashring_startup:
            return False

        api_workers = service._get_api_workers()
        nodes = db_hash_ring.get_active_nodes(
            self.admin_ctx,
            constants.HASH_RING_CACHE_TIMEOUT, self._group, from_host=True)

        if len(nodes) >= api_workers:
            LOG.debug("Allow caching, nodes %s>=%s", len(nodes), api_workers)
            self._check_hashring_startup = False
            return False
        LOG.debug("Disallow caching, nodes %s<%s", len(nodes), api_workers)
        return True