def test_deferred_update_resource_provider_traits(self): self.kwargs.update({ 'device_mappings': { 'physnet0': ['eth0'], }, 'rp_bandwidths': { 'eth0': {'egress': 1, 'ingress': 1}, }, 'supported_vnic_types': ['normal'], }) state = placement_report.PlacementState(**self.kwargs) for deferred in state.deferred_update_resource_provider_traits(): deferred.execute() self.client_mock.update_resource_provider_traits.assert_called() self.assertEqual( # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' 'fakehost:eth0' uuid.UUID('1ea6f823-bcf2-5dc5-9bee-4ee6177a6451'), self.client_mock.update_resource_provider_traits.call_args[1][ 'resource_provider_uuid']) # NOTE(bence romsics): To avoid testing the _order_ of traits. self.assertEqual( set(['CUSTOM_PHYSNET_PHYSNET0', 'CUSTOM_VNIC_TYPE_NORMAL']), set(self.client_mock.update_resource_provider_traits.call_args[1][ 'traits']))
def test_deferred_update_resource_provider_inventories(self): self.kwargs.update({ 'device_mappings': { 'physnet0': ['eth0'], }, 'rp_bandwidths': { 'eth0': {'egress': 100, 'ingress': None}, }, 'rp_inventory_defaults': { 'step_size': 10, 'max_unit': 50, }, }) state = placement_report.PlacementState(**self.kwargs) for deferred in state.deferred_update_resource_provider_inventories(): deferred.execute() self.client_mock.\ update_resource_provider_inventories.assert_called_with( # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' \ # 'fakehost:eth0' resource_provider_uuid=uuid.UUID( '1ea6f823-bcf2-5dc5-9bee-4ee6177a6451'), inventories={ 'NET_BW_EGR_KILOBIT_PER_SEC': { 'total': 100, 'step_size': 10, 'max_unit': 50}})
def test__deferred_create_agent_rps_multiple_hypervisors(self): self.kwargs['hypervisor_rps']['eth1'] = { 'name': 'fakehost2', 'uuid': self.hypervisor2_rp_uuid, } state = placement_report.PlacementState(**self.kwargs) for deferred in state._deferred_create_agent_rps(): deferred.execute() self.client_mock.ensure_resource_provider.assert_has_calls( any_order=True, calls=[ mock.call(resource_provider={ 'name': 'fakehost:fake agent type', # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' \ # 'fakehost' 'uuid': uuid.UUID('c0b4abe5-516f-54b8-b965-ff94060dcbcc'), 'parent_provider_uuid': self.hypervisor1_rp_uuid}), mock.call(resource_provider={ 'name': 'fakehost2:fake agent type', # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' \ # 'fakehost2' 'uuid': uuid.UUID('544155b7-1295-5f10-b5f0-eadc50abc6d4'), 'parent_provider_uuid': self.hypervisor2_rp_uuid}), ] )
def build_placement_state(self, chassis, name2uuid): bridge_mappings = _parse_bridge_mappings(chassis) cms_options = _parse_ovn_cms_options(chassis) LOG.debug('Building placement options for chassis %s: %s', chassis.name, cms_options) hypervisor_rps = {} try: for device, hyperv in cms_options[ ovn_const.RP_HYPERVISORS].items(): hypervisor_rps[device] = { 'name': hyperv, 'uuid': name2uuid[hyperv] } except KeyError: LOG.warning( 'Error updating BW information from chassis ' '%(chassis)s, CMS options: %(cms_options)s', { 'chassis': chassis.name, 'cms_options': cms_options }) return return placement_report.PlacementState( rp_bandwidths=cms_options[ovn_const.RP_BANDWIDTHS], rp_inventory_defaults=cms_options[ovn_const.RP_INVENTORY_DEFAULTS], driver_uuid_namespace=self.uuid_ns, agent_type=ovn_const.OVN_CONTROLLER_AGENT, hypervisor_rps=hypervisor_rps, device_mappings=bridge_mappings, supported_vnic_types=self.supported_vnic_types, client=self.placement_plugin._placement_client)
def test__deferred_update_vnic_type_traits(self): self.kwargs.update({ 'supported_vnic_types': ['direct'], }) state = placement_report.PlacementState(**self.kwargs) for deferred in state._deferred_update_vnic_type_traits(): deferred.execute() self.client_mock.update_trait.assert_any_call( name='CUSTOM_VNIC_TYPE_DIRECT')
def test__deferred_create_agent_rps(self): state = placement_report.PlacementState(**self.kwargs) for deferred in state._deferred_create_agent_rps(): deferred.execute() self.client_mock.ensure_resource_provider.assert_called_with( resource_provider={ 'name': 'fakehost:fake agent type', # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' 'fakehost' 'uuid': uuid.UUID('c0b4abe5-516f-54b8-b965-ff94060dcbcc'), 'parent_provider_uuid': self.hypervisor1_rp_uuid})
def test__deferred_update_physnet_traits(self): self.kwargs.update({ 'device_mappings': { 'physnet0': ['eth0'], 'physnet1': ['eth1'], }, 'rp_bandwidths': { 'eth0': {'egress': 1, 'ingress': 1}, }, }) state = placement_report.PlacementState(**self.kwargs) for deferred in state._deferred_update_physnet_traits(): deferred.execute() self.client_mock.update_trait.assert_called_with( name='CUSTOM_PHYSNET_PHYSNET0')
def build_placement_state(self, chassis, name2uuid): bridge_mappings = _parse_bridge_mappings(chassis) cms_options = _parse_ovn_cms_options(chassis) LOG.debug('Building placement options for chassis %s: %s', chassis.name, cms_options) hypervisor_rps = {} for device, hyperv in cms_options[ovn_const.RP_HYPERVISORS].items(): try: hypervisor_rps[device] = { 'name': hyperv, 'uuid': name2uuid[hyperv] } except (KeyError, AttributeError): continue bridges = set(itertools.chain(*bridge_mappings.values())) # Remove "cms_options[RP_BANDWIDTHS]" not present in "hypervisor_rps" # and "bridge_mappings". If we don't have a way to match the RP bridge # with a host ("hypervisor_rps") or a way to match the RP bridge with # an external network ("bridge_mappings"), this value is irrelevant. rp_bw = cms_options[n_const.RP_BANDWIDTHS] if rp_bw: cms_options[n_const.RP_BANDWIDTHS] = { device: bw for device, bw in rp_bw.items() if device in hypervisor_rps and device in bridges } # NOTE(ralonsoh): OVN only reports min BW RPs; packet processing RPs # will be added in a future implementation. If no RP_BANDWIDTHS values # are present (that means there is no BW information for any interface # in this host), no "PlacementState" is returned. return placement_report.PlacementState( rp_bandwidths=cms_options[n_const.RP_BANDWIDTHS], rp_inventory_defaults=cms_options[n_const.RP_INVENTORY_DEFAULTS], rp_pkt_processing={}, rp_pkt_processing_inventory_defaults=None, driver_uuid_namespace=self.uuid_ns, agent_type=ovn_const.OVN_CONTROLLER_AGENT, hypervisor_rps=hypervisor_rps, device_mappings=bridge_mappings, supported_vnic_types=self.supported_vnic_types, client=self.placement_plugin._placement_client)
def test_deferred_create_resource_providers(self): self.kwargs.update({ 'rp_bandwidths': { 'eth0': {'egress': 1, 'ingress': 1}, }, }) state = placement_report.PlacementState(**self.kwargs) for deferred in state.deferred_create_resource_providers(): deferred.execute() self.client_mock.ensure_resource_provider.assert_called_with( {'name': 'fakehost:fake agent type:eth0', # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' # 'fakehost:eth0' 'uuid': uuid.UUID('1ea6f823-bcf2-5dc5-9bee-4ee6177a6451'), # uuid below generated by the following command: # uuid -v5 '00000000-0000-0000-0000-000000000001' 'fakehost' 'parent_provider_uuid': uuid.UUID( 'c0b4abe5-516f-54b8-b965-ff94060dcbcc')})
def _sync_placement_state(self, agent, agent_db): configurations = agent['configurations'] mech_driver = self._agents.mechanism_driver_by_agent_type( agent['agent_type']) uuid_ns = mech_driver.resource_provider_uuid5_namespace supported_vnic_types = mech_driver.supported_vnic_types device_mappings = mech_driver.get_standard_device_mappings(agent) if 'resource_provider_hypervisors' in configurations: # When the agent has the fix for # https://bugs.launchpad.net/neutron/+bug/1853840 # it sends us hypervisor names (compute nodes in nova terminology). hypervisors = configurations['resource_provider_hypervisors'] else: # For older agents without the fix we have to assume the old # buggy behavior. There we assumed DEFAULT.host is the same as the # hypervisor name, which is true in many deployments, but not # always. (In nova terminology: The compute host's DEFAULT.host is # not neccessarily the same as the compute node name. We may even # have multiple compute nodes behind a compute host.) # TODO(bence romsics): This else branch can be removed when we no # longer want to support pre-Ussuri agents. hypervisors = { device: agent['host'] for device in configurations['resource_provider_bandwidths'].keys() } log_msg = ('Synchronization of resources ' 'of agent type %(type)s ' 'at host %(host)s ' 'to placement %(result)s.') try: name2uuid = {} for name in hypervisors.values(): name2uuid[name] = self._get_rp_by_name(name=name)['uuid'] hypervisor_rps = {} for device, hypervisor in hypervisors.items(): hypervisor_rps[device] = { 'name': hypervisor, 'uuid': name2uuid[hypervisor], } except (IndexError, ks_exc.HttpError, ks_exc.ClientException): agent_db.resources_synced = False agent_db.update() LOG.warning( log_msg, { 'type': agent['agent_type'], 'host': agent['host'], 'result': 'failed' }) return state = placement_report.PlacementState( rp_bandwidths=configurations['resource_provider_bandwidths'], rp_inventory_defaults=configurations[ 'resource_provider_inventory_defaults'], driver_uuid_namespace=uuid_ns, agent_type=agent['agent_type'], hypervisor_rps=hypervisor_rps, device_mappings=device_mappings, supported_vnic_types=supported_vnic_types, client=self._placement_client) deferred_batch = state.deferred_sync() # NOTE(bence romsics): Some client calls depend on earlier # ones, but not all. There are calls in a batch that can succeed # independently of earlier calls. Therefore even if a call fails # we have to suppress its failure so the later independent calls # have a chance to succeed. If we queue up the deferred client # calls one by one then we cannot handle errors at the end of # a batch. So instead we should wrap the deferred client calls # in a single deferred batch which executes the client calls, # continuing to the next client call even if there was an error # but remembering if an error happened. Then at the end of the # batch (also having access to the agent object) set the agent's # resources_synced attribute according to the success/failure # of the batch. Since each client call does monkey patched I/O # we'll yield to other eventlet threads in each call therefore # the performance should not be affected by the wrapping. def batch(): errors = False for deferred in deferred_batch: try: LOG.debug('placement client: {}'.format(deferred)) deferred.execute() except Exception: errors = True LOG.exception('placement client call failed: %s', str(deferred)) resources_synced = not errors agent_db.resources_synced = resources_synced agent_db.update() if resources_synced: LOG.debug( log_msg, { 'type': agent['agent_type'], 'host': agent['host'], 'result': 'succeeded' }) else: LOG.warning( log_msg, { 'type': agent['agent_type'], 'host': agent['host'], 'result': 'failed' }) self._batch_notifier.queue_event(batch)
def _sync_placement_state(self, agent, agent_db): configurations = agent['configurations'] mech_driver = self._agents.mechanism_driver_by_agent_type( agent['agent_type']) uuid_ns = mech_driver.resource_provider_uuid5_namespace supported_vnic_types = mech_driver.supported_vnic_types device_mappings = mech_driver.get_standard_device_mappings(agent) try: agent_host_rp_uuid = self._get_rp_by_name( name=agent['host'])['uuid'] except ks_exc.HttpError: # Delay the error for the same reason as in _get_rp_by_name(). agent_host_rp_uuid = None state = placement_report.PlacementState( rp_bandwidths=configurations['resource_provider_bandwidths'], rp_inventory_defaults=configurations[ 'resource_provider_inventory_defaults'], driver_uuid_namespace=uuid_ns, agent_type=agent['agent_type'], agent_host=agent['host'], agent_host_rp_uuid=agent_host_rp_uuid, device_mappings=device_mappings, supported_vnic_types=supported_vnic_types, client=self._placement_client) deferred_batch = state.deferred_sync() # NOTE(bence romsics): Some client calls depend on earlier # ones, but not all. There are calls in a batch that can succeed # independently of earlier calls. Therefore even if a call fails # we have to suppress its failure so the later independent calls # have a chance to succeed. If we queue up the deferred client # calls one by one then we cannot handle errors at the end of # a batch. So instead we should wrap the deferred client calls # in a single deferred batch which executes the client calls, # continuing to the next client call even if there was an error # but remembering if an error happened. Then at the end of the # batch (also having access to the agent object) set the agent's # resources_synced attribute according to the success/failure # of the batch. Since each client call does monkey patched I/O # we'll yield to other eventlet threads in each call therefore # the performance should not be affected by the wrapping. def batch(): errors = False for deferred in deferred_batch: try: LOG.debug('placement client: {}'.format(deferred)) deferred.execute() except Exception: errors = True LOG.exception('placement client call failed: %s', str(deferred)) resources_synced = not errors agent_db.resources_synced = resources_synced agent_db.update() LOG.debug( 'Synchronization of resources' ' of agent type %(type)s' ' at host %(host)s' ' to placement %(result)s.', { 'type': agent['agent_type'], 'host': agent['host'], 'result': 'succeeded' if resources_synced else 'failed' }) self._batch_notifier.queue_event(batch)