def test_distributed_lock(self):
        row_event = DistributedLockTestEvent()
        self.mech_driver._nb_ovn.idl.notify_handler.watch_event(row_event)
        worker_list = [self.mech_driver._nb_ovn, ]

        # Create 10 fake workers
        for _ in range(10):
            node_uuid = uuidutils.generate_uuid()
            db_hash_ring.add_node(node_uuid)
            fake_driver = mock.MagicMock(node_uuid=node_uuid)
            _idl = ovsdb_monitor.OvnNbIdl.from_server(
                self.ovsdb_server_mgr.get_ovsdb_connection_path(),
                'OVN_Northbound', fake_driver)
            worker = self.useFixture(
                base.ConnectionFixture(idl=_idl, timeout=10)).connection
            worker.idl.notify_handler.watch_event(row_event)
            worker.start()
            worker_list.append(worker)

        # Refresh the hash rings just in case
        [worker.idl._hash_ring.refresh() for worker in worker_list]

        # Assert we have 11 active workers in the ring
        self.assertEqual(
            11, len(db_hash_ring.get_active_nodes(
                    interval=ovn_const.HASH_RING_NODES_TIMEOUT)))

        # Trigger the event
        self.create_port()

        # Wait for the event to complete
        self.assertTrue(row_event.wait())

        # Assert that only one worker handled the event
        self.assertEqual(1, row_event.COUNTER)
    def test_get_node(self):
        # Use pre-defined UUIDs to make the hashes predictable
        node_1_uuid = db_hash_ring.add_node('node-1')
        node_2_uuid = db_hash_ring.add_node('node-2')

        hash_dict_before = {node_1_uuid: 'fake-uuid',
                            node_2_uuid: 'fake-uuid-0'}
        self._verify_hashes(hash_dict_before)
    def test_get_node(self):
        # Use pre-defined UUIDs to make the hashes predictable
        node_1_uuid = db_hash_ring.add_node(HASH_RING_TEST_GROUP, 'node-1')
        node_2_uuid = db_hash_ring.add_node(HASH_RING_TEST_GROUP, 'node-2')

        hash_dict_before = {
            'fake-uuid': node_1_uuid,
            'fake-uuid-0': node_2_uuid
        }
        self._verify_hashes(hash_dict_before)
    def test_ring_rebalance(self):
        # Use pre-defined UUIDs to make the hashes predictable
        node_1_uuid = db_hash_ring.add_node(HASH_RING_TEST_GROUP, 'node-1')
        node_2_uuid = db_hash_ring.add_node(HASH_RING_TEST_GROUP, 'node-2')

        # Add another node from a different host
        with mock.patch.object(db_hash_ring, 'CONF') as mock_conf:
            mock_conf.host = 'another-host-52359446-c366'
            another_host_node = db_hash_ring.add_node(HASH_RING_TEST_GROUP,
                                                      'another-host')

        # Assert all nodes are alive in the ring
        self.hash_ring_manager.refresh()
        self.assertEqual(3, len(self.hash_ring_manager._hash_ring.nodes))

        # Hash certain values against the nodes
        hash_dict_before = {
            'fake-uuid': node_1_uuid,
            'fake-uuid-0': node_2_uuid,
            'fake-uuid-ABCDE': another_host_node
        }
        self._verify_hashes(hash_dict_before)

        # Mock utcnow() as the HASH_RING_NODES_TIMEOUT have expired
        # already and touch the nodes from our host
        fake_utcnow = timeutils.utcnow() - datetime.timedelta(
            seconds=constants.HASH_RING_NODES_TIMEOUT)
        with mock.patch.object(timeutils, 'utcnow') as mock_utcnow:
            mock_utcnow.return_value = fake_utcnow
            db_hash_ring.touch_nodes_from_host(HASH_RING_TEST_GROUP)

        # Now assert that the ring was re-balanced and only the node from
        # another host is marked as alive
        self.hash_ring_manager.refresh()
        self.assertEqual([another_host_node],
                         list(self.hash_ring_manager._hash_ring.nodes.keys()))

        # Now only "another_host_node" is alive, all values should hash to it
        hash_dict_after_rebalance = {
            'fake-uuid': another_host_node,
            'fake-uuid-0': another_host_node,
            'fake-uuid-ABCDE': another_host_node
        }
        self._verify_hashes(hash_dict_after_rebalance)

        # Now touch the nodes so they appear active again
        db_hash_ring.touch_nodes_from_host(HASH_RING_TEST_GROUP)
        self.hash_ring_manager.refresh()

        # The ring should re-balance and as it was before
        self._verify_hashes(hash_dict_before)
Esempio n. 5
0
 def _add_nodes_and_assert_exists(self, count=1):
     nodes = []
     for i in range(count):
         node_uuid = db_hash_ring.add_node()
         self.assertIsNotNone(self._get_node_row(node_uuid))
         nodes.append(node_uuid)
     return nodes
Esempio n. 6
0
 def _add_nodes_and_assert_exists(self, count=1,
                                  group_name=HASH_RING_TEST_GROUP):
     nodes = []
     for i in range(count):
         node_uuid = db_hash_ring.add_node(group_name)
         self.assertIsNotNone(self._get_node_row(node_uuid))
         nodes.append(node_uuid)
     return nodes
    def test__wait_startup_before_caching(self):
        db_hash_ring.add_node('node-1')
        db_hash_ring.add_node('node-2')

        # Assert it will return True until created_at != updated_at
        self.assertTrue(self.hash_ring_manager._wait_startup_before_caching)
        self.assertTrue(self.hash_ring_manager._cache_startup_timeout)

        # Touch the nodes (== update the updated_at column)
        db_hash_ring.touch_nodes_from_host()

        # Assert it's now False. Waiting is not needed anymore
        self.assertFalse(self.hash_ring_manager._wait_startup_before_caching)
        self.assertFalse(self.hash_ring_manager._cache_startup_timeout)

        # Now assert that since the _cache_startup_timeout has been
        # flipped, we no longer will read from the database
        with mock.patch.object(hash_ring_manager.db_hash_ring,
                               'get_active_nodes') as get_nodes_mock:
            self.assertFalse(
                self.hash_ring_manager._wait_startup_before_caching)
            self.assertFalse(get_nodes_mock.called)
Esempio n. 8
0
    def post_fork_initialize(self, resource, event, trigger, payload=None):
        # NOTE(rtheis): This will initialize all workers (API, RPC,
        # plugin service and OVN) with OVN IDL connections.
        self._post_fork_event.clear()
        self._ovn_client_inst = None

        is_maintenance = (
            utils.get_method_class(trigger) == worker.MaintenanceWorker)
        if not is_maintenance:
            self.node_uuid = db_hash_ring.add_node(self.hash_ring_group)

        self._nb_ovn, self._sb_ovn = impl_idl_ovn.get_ovn_idls(
            self, trigger, binding_events=not is_maintenance)

        # Override agents API methods
        self.patch_plugin_merge("get_agents", get_agents)
        self.patch_plugin_choose("get_agent", get_agent)
        self.patch_plugin_choose("update_agent", update_agent)
        self.patch_plugin_choose("delete_agent", delete_agent)

        # Now IDL connections can be safely used.
        self._post_fork_event.set()

        if is_maintenance:
            # Call the synchronization task if its maintenance worker
            # This sync neutron DB to OVN-NB DB only in inconsistent states
            self.nb_synchronizer = ovn_db_sync.OvnNbSynchronizer(
                self._plugin, self._nb_ovn, self._sb_ovn,
                config.get_ovn_neutron_sync_mode(), self)
            self.nb_synchronizer.sync()

            # This sync neutron DB to OVN-SB DB only in inconsistent states
            self.sb_synchronizer = ovn_db_sync.OvnSbSynchronizer(
                self._plugin, self._sb_ovn, self)
            self.sb_synchronizer.sync()

            self._maintenance_thread = maintenance.MaintenanceThread()
            self._maintenance_thread.add_periodics(
                maintenance.DBInconsistenciesPeriodics(self._ovn_client))
            self._maintenance_thread.add_periodics(
                maintenance.HashRingHealthCheckPeriodics(self.hash_ring_group))
            self._maintenance_thread.start()