Esempio n. 1
0
    def test_active_nodes(self):
        self._add_nodes_and_assert_exists(count=3)

        # Add another node from a different host
        with mock.patch.object(db_hash_ring, 'CONF') as mock_conf:
            mock_conf.host = 'another-host-52359446-c366'
            another_host_node = self._add_nodes_and_assert_exists()[0]

        # Assert all nodes are active (within 60 seconds)
        self.assertEqual(4, len(db_hash_ring.get_active_nodes(
            interval=60, group_name=HASH_RING_TEST_GROUP)))

        # Substract 60 seconds from utcnow() and touch the nodes from
        # our host
        fake_utcnow = timeutils.utcnow() - datetime.timedelta(seconds=60)
        with mock.patch.object(timeutils, 'utcnow') as mock_utcnow:
            mock_utcnow.return_value = fake_utcnow
            db_hash_ring.touch_nodes_from_host(HASH_RING_TEST_GROUP)

        # Now assert that all nodes from our host are seeing as offline.
        # Only the node from another host should be active
        active_nodes = db_hash_ring.get_active_nodes(
            interval=60, group_name=HASH_RING_TEST_GROUP)
        self.assertEqual(1, len(active_nodes))
        self.assertEqual(another_host_node, active_nodes[0].node_uuid)
    def test_distributed_lock(self):
        row_event = DistributedLockTestEvent()
        self.mech_driver._nb_ovn.idl.notify_handler.watch_event(row_event)
        worker_list = [self.mech_driver._nb_ovn, ]

        # Create 10 fake workers
        for _ in range(10):
            node_uuid = uuidutils.generate_uuid()
            db_hash_ring.add_node(node_uuid)
            fake_driver = mock.MagicMock(node_uuid=node_uuid)
            _idl = ovsdb_monitor.OvnNbIdl.from_server(
                self.ovsdb_server_mgr.get_ovsdb_connection_path(),
                'OVN_Northbound', fake_driver)
            worker = self.useFixture(
                base.ConnectionFixture(idl=_idl, timeout=10)).connection
            worker.idl.notify_handler.watch_event(row_event)
            worker.start()
            worker_list.append(worker)

        # Refresh the hash rings just in case
        [worker.idl._hash_ring.refresh() for worker in worker_list]

        # Assert we have 11 active workers in the ring
        self.assertEqual(
            11, len(db_hash_ring.get_active_nodes(
                    interval=ovn_const.HASH_RING_NODES_TIMEOUT)))

        # Trigger the event
        self.create_port()

        # Wait for the event to complete
        self.assertTrue(row_event.wait())

        # Assert that only one worker handled the event
        self.assertEqual(1, row_event.COUNTER)
Esempio n. 3
0
    def test_active_nodes_different_groups(self):
        another_group = 'another_test_group'
        self._add_nodes_and_assert_exists(count=3)
        self._add_nodes_and_assert_exists(count=2, group_name=another_group)

        active_nodes = db_hash_ring.get_active_nodes(
            interval=60, group_name=HASH_RING_TEST_GROUP)
        self.assertEqual(3, len(active_nodes))
        for node in active_nodes:
            self.assertEqual(HASH_RING_TEST_GROUP, node.group_name)

        active_nodes = db_hash_ring.get_active_nodes(
            interval=60, group_name=another_group)
        self.assertEqual(2, len(active_nodes))
        for node in active_nodes:
            self.assertEqual(another_group, node.group_name)
Esempio n. 4
0
    def test_active_nodes_from_host(self):
        self._add_nodes_and_assert_exists(count=3)

        # Add another node from a different host
        another_host_id = 'another-host-52359446-c366'
        with mock.patch.object(db_hash_ring, 'CONF') as mock_conf:
            mock_conf.host = another_host_id
            self._add_nodes_and_assert_exists()

        # Assert only the 3 nodes from this host is returned
        active_nodes = db_hash_ring.get_active_nodes(interval=60,
                                                     from_host=True)
        self.assertEqual(3, len(active_nodes))
        self.assertNotIn(another_host_id, active_nodes)
Esempio n. 5
0
    def _load_hash_ring(self, refresh=False):
        cache_timeout = timeutils.utcnow() - datetime.timedelta(
            seconds=constants.HASH_RING_CACHE_TIMEOUT)

        # Refresh the cache if:
        # - Refreshed is forced (refresh=True)
        # - Service just started (_wait_startup_before_caching)
        # - Hash Ring is not yet instantiated
        # - Cache has timed out
        if (refresh or self._wait_startup_before_caching
                or self._hash_ring is None or not self._hash_ring.nodes
                or cache_timeout >= self._last_time_loaded):
            nodes = db_hash_ring.get_active_nodes(
                constants.HASH_RING_NODES_TIMEOUT)
            self._hash_ring = hashring.HashRing(
                {node.node_uuid
                 for node in nodes})
            self._last_time_loaded = timeutils.utcnow()
Esempio n. 6
0
    def _wait_startup_before_caching(self):
        # NOTE(lucasagomes): Some events are processed at the service's
        # startup time and since many services may be started concurrently
        # we do not want to use a cached hash ring at that point. This
        # method checks if the created_at and updated_at columns from the
        # nodes in the ring from this host is equal, and if so it means
        # that the service just started.

        # If the startup timeout already expired, there's no reason to
        # keep reading from the DB. At this point this will always
        # return False
        if not self._cache_startup_timeout:
            return False

        nodes = db_hash_ring.get_active_nodes(
            constants.HASH_RING_CACHE_TIMEOUT, from_host=True)
        dont_cache = nodes and nodes[0].created_at == nodes[0].updated_at
        if not dont_cache:
            self._cache_startup_timeout = False

        return dont_cache