def setUp(self, service_plugins=None): service_plugins = { constants.CISCO_N1KV: POLICY_PROFILE_PLUGIN, constants.CISCO_N1KV_NET_PROFILE: NETWORK_PROFILE_PLUGIN, bc.constants.L3: L3_PLUGIN_KLASS } ml2_cisco_opts = { 'n1kv_vsm_ips': ['127.0.0.1'], 'username': '******', 'password': '******', 'default_policy_profile': DEFAULT_PP } for opt, val in ml2_cisco_opts.items(): ml2_n1kv_config.cfg.CONF.set_override(opt, val, 'ml2_cisco_n1kv') super(TestN1kvTrunkingPluggingDriver, self).setUp(service_plugins=service_plugins) # save possible test_lib.test_config 'config_files' dict entry so we # can restore it after tests since we will change its value self._old_config_files = copy.copy( test_lib.test_config.get('config_files')) # include config files for device manager service plugin and router # service plugin since we define a number of hosting device templates, # hosting devices and routertypes there self._add_device_manager_plugin_ini_file() self._add_router_plugin_ini_file() #TODO(bobmel): Fix bug in test_extensions.py and we can remove the # below call to setup_config() self.setup_config() self.net_plugin = bc.get_plugin(constants.CISCO_N1KV_NET_PROFILE) self.policy_plugin = bc.get_plugin(constants.CISCO_N1KV)
def update_subports(self, port): """Set port attributes for trunk subports. For baremetal deployments only, set the neutron port attributes during the bind_port event. """ trunk_details = port.get('trunk_details') subports = trunk_details['sub_ports'] host_id = port.get(bc.dns.DNSNAME) context = bc.get_context() el_context = context.elevated() for subport in subports: bc.get_plugin().update_port( el_context, subport['port_id'], { 'port': { bc.portbindings.HOST_ID: host_id, 'device_owner': bc.trunk_consts.TRUNK_SUBPORT_OWNER } }) # Set trunk to ACTIVE status. trunk_obj = bc.trunk_objects.Trunk.get_object( el_context, id=trunk_details['trunk_id']) trunk_obj.update(status=bc.trunk_consts.ACTIVE_STATUS)
def mgmt_sec_grp_id(cls): """Returns id of security group used by the management network.""" if not utils.is_extension_supported(bc.get_plugin(), "security-group"): return if cls._mgmt_sec_grp_id is None: # Get the id for the _mgmt_security_group_id tenant_id = cls.l3_tenant_id() res = bc.get_plugin().get_security_groups( bc.context.get_admin_context(), { 'tenant_id': [tenant_id], 'name': [cfg.CONF.general.default_security_group] }, ['id']) if len(res) == 1: sec_grp_id = res[0].get('id', None) cls._mgmt_sec_grp_id = sec_grp_id elif len(res) > 1: # the mgmt sec group must be unique. LOG.error('The security group for the management network ' 'does not have unique name. Please ensure that ' 'it is.') else: # Service VM Mgmt security group is not present. LOG.error('There is no security group for the management ' 'network. Please create one.') return cls._mgmt_sec_grp_id
def _get_profile_id(cls, p_type, resource, name): try: tenant_id = bc.get_plugin( cisco_constants.DEVICE_MANAGER).l3_tenant_id() except AttributeError: return if tenant_id is None: return if p_type == 'net_profile': plugin = bc.get_plugin(constants.CISCO_N1KV_NET_PROFILE) profiles = plugin.get_network_profiles( bc.context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [name]}, ['id']) else: plugin = bc.get_plugin(constants.CISCO_N1KV) profiles = plugin.get_policy_profiles( bc.context.get_admin_context(), {'tenant_id': [tenant_id], 'name': [name]}, ['id']) if len(profiles) == 1: return profiles[0]['id'] elif len(profiles) > 1: # Profile must have a unique name. LOG.error(_LE('The %(resource)s %(name)s does not have unique ' 'name. Please refer to admin guide and create one.'), {'resource': resource, 'name': name}) else: # Profile has not been created. LOG.error(_LE('There is no %(resource)s %(name)s. Please refer to ' 'admin guide and create one.'), {'resource': resource, 'name': name})
def apic_driver(self): """Get APIC driver There are different drivers for the GBP workflow and Neutron workflow for APIC. First see if the GBP workflow is active, and if so get the APIC driver for it. If the GBP service isn't installed, try to get the driver from the Neutron (APIC ML2) workflow. """ if not self._apic_driver: try: self._apic_driver = ( bc.get_plugin('GROUP_POLICY').policy_driver_manager. policy_drivers['apic'].obj) self._get_ext_net_name = self._get_ext_net_name_gbp self._get_vrf_context = self._get_vrf_context_gbp except AttributeError: LOG.info("GBP service plugin not present -- will " "try APIC ML2 plugin.") if not self._apic_driver: try: self._apic_driver = (self._core_plugin.mechanism_manager. mech_drivers['cisco_apic_ml2'].obj) self._get_ext_net_name = self._get_ext_net_name_neutron self._get_vrf_context = self._get_vrf_context_neutron except KeyError: LOG.error("APIC ML2 plugin not present: " "no APIC ML2 driver could be found.") raise AciDriverNoAciDriverInstalledOrConfigured() return self._apic_driver
def setUp(self): super(TestNexusTrunkHandler, self).setUp() self.handler = nexus_trunk.NexusTrunkHandler() self.plugin = bc.get_plugin() self.plugin.update_port = mock.Mock() self.mock_trunk_get_object = mock.patch.object(bc.trunk_objects.Trunk, 'get_object').start()
def _agent_notification_bulk(self, context, method, routers, hosting_device, operation): """Notify the Cisco cfg agent handling a particular hosting_device. A single notification can contain multiple routers. """ admin_context = context.is_admin and context or context.elevated() dmplugin = bc.get_plugin(cisco_constants.DEVICE_MANAGER) if (hosting_device is not None and utils.is_extension_supported(dmplugin, CFGAGENT_SCHED)): agents = dmplugin.get_cfg_agents_for_hosting_devices( admin_context, [hosting_device['id']], admin_state_up=True, schedule=True) if agents: agent = agents[0] LOG.debug( 'Notify %(agent_type)s at %(topic)s.%(host)s the ' 'message %(method)s [BULK]', { 'agent_type': agent.agent_type, 'topic': CFG_AGENT_L3_ROUTING, 'host': agent.host, 'method': method }) cctxt = self.client.prepare(server=agent.host, version='1.1') cctxt.cast(context, method, routers=routers)
def _agent_notification(self, context, method, routers, operation, shuffle_agents): """Notify individual Cisco cfg agents.""" admin_context = context.is_admin and context or context.elevated() dmplugin = bc.get_plugin(cisco_constants.DEVICE_MANAGER) for router in routers: if (router['hosting_device'] is not None and utils.is_extension_supported(dmplugin, CFGAGENT_SCHED)): agents = dmplugin.get_cfg_agents_for_hosting_devices( admin_context, [router['hosting_device']['id']], admin_state_up=True, schedule=True) else: continue for agent in agents: LOG.debug( 'Notify %(agent_type)s at %(topic)s.%(host)s the ' 'message %(method)s', { 'agent_type': agent.agent_type, 'topic': CFG_AGENT_L3_ROUTING, 'host': agent.host, 'method': method }) cctxt = self.client.prepare(server=agent.host) cctxt.cast(context, method, routers=[router['id']])
def handle_non_responding_hosting_devices(self, context, cfg_agent, hosting_device_ids): e_context = context.elevated() hosting_devices = self.get_hosting_devices_qry( e_context, hosting_device_ids).all() # 'hosting_info' is dictionary with ids of removed hosting # devices and the affected logical resources for each # removed hosting device: # {'hd_id1': {'routers': [id1, id2, ...], # 'fw': [id1, ...], # ...}, # 'hd_id2': {'routers': [id3, id4, ...]}, # 'fw': [id1, ...], # ...}, # ...} hosting_info = dict((id, {}) for id in hosting_device_ids) #TODO(bobmel): Modify so service plugins register themselves try: l3plugin = bc.get_plugin(bc.constants.L3) l3plugin.handle_non_responding_hosting_devices( context, hosting_devices, hosting_info) except AttributeError: pass notifier = self.agent_notifiers.get(c_constants.AGENT_TYPE_CFG) for hd in hosting_devices: if (self._process_non_responsive_hosting_device(e_context, hd) and notifier): notifier.hosting_devices_removed(context, hosting_info, False, cfg_agent)
def mgmt_nw_id(cls): """Returns id of the management network.""" if cls._mgmt_nw_uuid is None: tenant_id = cls.l3_tenant_id() if not tenant_id: return net = bc.get_plugin().get_networks( bc.context.get_admin_context(), { 'tenant_id': [tenant_id], 'name': [cfg.CONF.general.management_network] }, ['id', 'subnets']) if len(net) == 1: num_subnets = len(net[0]['subnets']) if num_subnets == 0: LOG.error('The management network has no subnet. ' 'Please assign one.') return elif num_subnets > 1: LOG.info( 'The management network has %d subnets. The ' 'first one will be used.', num_subnets) cls._mgmt_nw_uuid = net[0].get('id') cls._mgmt_subnet_uuid = net[0]['subnets'][0] elif len(net) > 1: # Management network must have a unique name. LOG.error('The management network for does not have ' 'unique name. Please ensure that it is.') else: # Management network has not been created. LOG.error('There is no virtual management network. Please ' 'create one.') return cls._mgmt_nw_uuid
def get_plugin(self): plugin = bc.get_plugin(bc.constants.L3) if not plugin: LOG.error('No L3 router service plugin registered to ' 'handle routertype-aware scheduling') msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin
def get_plugin(self): plugin = bc.get_plugin(cisco_constants.DEVICE_MANAGER) if not plugin: LOG.error('No Device manager service plugin registered to ' 'handle hosting device scheduling') msg = _('The resource could not be found.') raise webob.exc.HTTPNotFound(msg) return plugin
def get_resources(cls): """Returns Extended Resources.""" exts = [] plugin = bc.get_plugin(constants.CISCO_N1KV) for resource_name in [POLICY_PROFILE, 'policy_profile_binding']: collection_name = resource_name + 's' controller = base.create_resource( collection_name, resource_name, plugin, RESOURCE_ATTRIBUTE_MAP.get(collection_name)) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts
def _register_cfg_agent_states(self, host_a_active=True, host_b_active=False, host_c_active=False): """Register zero, one, two, or three L3 config agents.""" l3_cfg_host_a = { 'binary': 'neutron-cisco-cfg-agent', 'host': L3_CFG_HOST_A, 'topic': cisco_constants.CFG_AGENT, 'configurations': { 'service_agents': [cisco_constants.AGENT_TYPE_L3_CFG], 'total routers': 0, 'total ex_gw_ports': 0, 'total interfaces': 0, 'total floating_ips': 0, 'hosting_devices': 0, 'non_responding_hosting_devices': {} }, 'local_time': str(timeutils.utcnow()), 'agent_type': cisco_constants.AGENT_TYPE_CFG } agent_callback = agents_db.AgentExtRpcCallback() dev_mgr_callback = devices_cfgagent_rpc_cb.DeviceMgrCfgRpcCallback( bc.get_plugin(cisco_constants.DEVICE_MANAGER)) if host_a_active is True: agent_callback.report_state( self.adminContext, agent_state={'agent_state': l3_cfg_host_a}, time=datetime.utcnow().strftime(ISO8601_TIME_FORMAT)) dev_mgr_callback.register_for_duty(self.adminContext, L3_CFG_HOST_A) if host_b_active is True: l3_cfg_host_b = copy.deepcopy(l3_cfg_host_a) l3_cfg_host_b['host'] = L3_CFG_HOST_B l3_cfg_host_b['local_time'] = str(timeutils.utcnow()), agent_callback.report_state( self.adminContext, agent_state={'agent_state': l3_cfg_host_b}, time=datetime.utcnow().strftime(ISO8601_TIME_FORMAT)) dev_mgr_callback.register_for_duty(self.adminContext, L3_CFG_HOST_B) if host_c_active is True: l3_cfg_host_c = copy.deepcopy(l3_cfg_host_a) l3_cfg_host_c['host'] = L3_CFG_HOST_C l3_cfg_host_c['local_time'] = str(timeutils.utcnow()), agent_callback.report_state( self.adminContext, agent_state={'agent_state': l3_cfg_host_c}, time=datetime.utcnow().strftime(ISO8601_TIME_FORMAT)) dev_mgr_callback.register_for_duty(self.adminContext, L3_CFG_HOST_B)
def is_trunk_subport_baremetal(self, port): context = bc.get_context() el_context = context.elevated() subport_obj = bc.trunk_objects.SubPort.get_object( el_context, port_id=port['id']) if subport_obj: trunk_obj = bc.trunk_objects.Trunk.get_object( el_context, id=subport_obj.trunk_id) trunk_port = bc.get_plugin().get_port( el_context, trunk_obj.port_id) return nexus_help.is_baremetal(trunk_port) else: return False
def get_resources(cls): """Returns Extended Resources.""" exts = [] plugin = bc.get_plugin(constants.CISCO_N1KV_NET_PROFILE) resource_names = [NETWORK_PROFILE, NETWORK_PROFILE_BINDING] collection_names = [NETWORK_PROFILES, NETWORK_PROFILE_BINDINGS] for resource_name, collection_name in zip(resource_names, collection_names): controller = base.create_resource( collection_name, resource_name, plugin, RESOURCE_ATTRIBUTE_MAP.get(collection_name)) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts
def setUp(self): super(TestNexusTrunkHandler, self).setUp() self.handler = trunk.NexusMDTrunkHandler() self.plugin = bc.get_plugin() self.plugin.get_port = mock.Mock() self.plugin.update_port = mock.Mock() self.mock_subport_get_object = mock.patch.object( bc.trunk_objects.SubPort, 'get_object', return_value=TestSubPort).start() self.mock_trunk_get_object = mock.patch.object( bc.trunk_objects.Trunk, 'get_object', return_value=TestTrunk).start() self.mock_trunk_get_object = mock.patch.object( bc.trunk_objects.Trunk, 'get_object').start()
def setUp(self, core_plugin=None, l3_plugin=None, dm_plugin=None, ext_mgr=None): if not core_plugin: core_plugin = CORE_PLUGIN_KLASS if l3_plugin is None: l3_plugin = L3_PLUGIN_KLASS service_plugins = {'l3_plugin_name': l3_plugin} if dm_plugin is not None: service_plugins['dm_plugin_name'] = dm_plugin cfg.CONF.set_override('api_extensions_path', l3_router_test_support.extensions_path) if not ext_mgr: ext_mgr = L3TestRoutertypeExtensionManager() super(TestRoutertypeDBPlugin, self).setUp( plugin=core_plugin, service_plugins=service_plugins, ext_mgr=ext_mgr) self.l3_plugin = bc.get_plugin(bc.constants.L3) # Ensure we use policy definitions from our repo cfg.CONF.set_override('policy_file', policy_path, 'oslo_policy')
def setUp(self, core_plugin=None, dm_plugin=None, ext_mgr=None): # Save the global RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for resource, attrs in six.iteritems( attributes.RESOURCE_ATTRIBUTE_MAP): self.saved_attr_map[resource] = attrs.copy() if not core_plugin: core_plugin = CORE_PLUGIN_KLASS service_plugins = {} cfg.CONF.set_override('api_extensions_path', dev_mgr_test_support.extensions_path) if ext_mgr is None: ext_mgr = dev_mgr_test_support.TestDeviceManagerExtensionManager() super(HostingDeviceConfigAgentSchedulerTestCaseBase, self).setUp(plugin=core_plugin, service_plugins=service_plugins, ext_mgr=ext_mgr) # Ensure we use policy definitions from our repo cfg.CONF.set_override('policy_file', policy_path, 'oslo_policy') self.core_plugin = bc.get_plugin() self.plugin = self.core_plugin self.setup_notification_driver() if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]: cfg.CONF.set_override('allow_sorting', True) self._define_keystone_authtoken() self._mock_l3_admin_tenant() self._create_mgmt_nw_for_tests(self.fmt) # in unit tests we don't use keystone so we mock that session self.core_plugin._svc_vm_mgr_obj = service_vm_lib.ServiceVMManager( True, None, None, None, '', keystone_session=mock.MagicMock()) self._mock_svc_vm_create_delete(self.core_plugin) self._mock_io_file_ops() if self.mock_cfg_agent_notifiers is True: self._mock_cfg_agent_notifier(self.plugin) self._other_tenant_id = dev_mgr_test_support._uuid() self.adminContext = n_context.get_admin_context()
def _test_remove_all_hosting_devices(self): """Removes all hosting devices created during a test.""" devmgr = bc.get_plugin(cisco_constants.DEVICE_MANAGER) context = n_context.get_admin_context() devmgr.delete_all_hosting_devices(context, True)
def setUp(self, core_plugin=None, l3_plugin=None, dm_plugin=None, ext_mgr=None, create_mgmt_nw=True, service_plugins=None): # Save the global RESOURCE_ATTRIBUTE_MAP self.saved_attr_map = {} for resource, attrs in six.iteritems( attributes.RESOURCE_ATTRIBUTE_MAP): self.saved_attr_map[resource] = attrs.copy() if not core_plugin: core_plugin = CORE_PLUGIN_KLASS if l3_plugin is None: l3_plugin = L3_PLUGIN_KLASS if not service_plugins: service_plugins = {'l3_plugin_name': l3_plugin} cfg.CONF.set_override('api_extensions_path', l3_router_test_support.extensions_path) # for these tests we need to enable overlapping ips cfg.CONF.set_default('allow_overlapping_ips', True) cfg.CONF.set_default('max_routes', 3) if ext_mgr is None: ext_mgr = TestL3RouterApplianceExtensionManager() super(L3RouterApplianceTestCaseBase, self).setUp(plugin=core_plugin, service_plugins=service_plugins, ext_mgr=ext_mgr) # Ensure we use policy definitions from our repo cfg.CONF.set_override('policy_file', policy_path, 'oslo_policy') self.core_plugin = bc.get_plugin() self.l3_plugin = bc.get_plugin(bc.constants.L3) self.setup_notification_driver() if NEUTRON_VERSION.version[0] <= NEUTRON_NEWTON_VERSION.version[0]: cfg.CONF.set_override('allow_sorting', True) self._define_keystone_authtoken() cfg.CONF.register_opt( cfg.BoolOpt('router_auto_schedule', default=True, help=_('Allow auto scheduling of routers to ' 'L3 agent.'))) if self.router_type is not None: cfg.CONF.set_override('default_router_type', self.router_type, group='routing') self._mock_l3_admin_tenant() self._created_mgmt_nw = create_mgmt_nw if create_mgmt_nw is True: self._create_mgmt_nw_for_tests(self.fmt) if self.configure_routertypes is True: templates = self._test_create_hosting_device_templates() self._test_create_routertypes(templates.values()) # in unit tests we don't use keystone so we mock that session self.core_plugin._svc_vm_mgr_obj = service_vm_lib.ServiceVMManager( True, None, None, None, '', keystone_session=mock.MagicMock()) self._mock_svc_vm_create_delete(self.core_plugin) self._mock_io_file_ops() if self.mock_cfg_agent_notifiers is True: self._mock_cfg_agent_notifier(self.l3_plugin) # mock the periodic router backlog processing in the tests self._mock_backlog_processing(self.l3_plugin)
def _dev_mgr(self): return bc.get_plugin(cisco_constants.DEVICE_MANAGER)
def _core_plugin(self): return bc.get_plugin()
def _core_plugin(self): try: return self._plugin except AttributeError: self._plugin = bc.get_plugin() return self._plugin
def __init__(self): self.plugin = bc.get_plugin()
def l3_plugin(self): if not self._l3_plugin: self._l3_plugin = bc.get_plugin(svc_constants.L3_ROUTER_NAT) return self._l3_plugin
def _l3_plugin(self): return bc.get_plugin(bc.constants.L3)
def l3_plugin(self): if not self._l3_plugin: self._l3_plugin = bc.get_plugin(bc.constants.L3) return self._l3_plugin
def __init__(self, driver, interval=None): self.core_plugin = bc.get_plugin() self.driver = driver self.interval = interval
def _l3_plugin(self): return bc.get_plugin(constants.L3_ROUTER_NAT)