def get_connected_nsxlib(nsx_username=None, nsx_password=None, use_basic_auth=False): global _NSXLIB # for non-default agruments, initiate new lib if nsx_username or use_basic_auth: return v3_utils.get_nsxlib_wrapper(nsx_username, nsx_password, use_basic_auth) if _NSXLIB is None: _NSXLIB = v3_utils.get_nsxlib_wrapper() return _NSXLIB
def get_connected_nsxlib(nsx_username=None, nsx_password=None, use_basic_auth=False, plugin_conf=None): global _NSXLIB # for non-default agruments, initiate new lib if nsx_username or use_basic_auth: return v3_utils.get_nsxlib_wrapper(nsx_username, nsx_password, use_basic_auth, plugin_conf) if _NSXLIB is None: _NSXLIB = v3_utils.get_nsxlib_wrapper(plugin_conf=plugin_conf) return _NSXLIB
def setUp(self): super(TestQosNsxV3Notification, self).setUp() self.setup_coreplugin(PLUGIN_NAME) # Add a dummy notification driver that calls our handler directly # (to skip the message queue) cfg.CONF.set_override("notification_drivers", [ 'vmware_nsx.tests.unit.services.qos.fake_notifier.' 'DummyNotificationDriver' ], "qos") self.qos_plugin = qos_plugin.QoSPlugin() self.ctxt = context.Context('fake_user', 'fake_tenant') mock.patch.object(self.ctxt.session, 'refresh').start() mock.patch.object(self.ctxt.session, 'expunge').start() self.policy_data = { 'policy': { 'id': uuidutils.generate_uuid(), 'tenant_id': uuidutils.generate_uuid(), 'name': 'test-policy', 'description': 'Test policy description', 'shared': True } } self.rule_data = { 'bandwidth_limit_rule': { 'id': uuidutils.generate_uuid(), 'max_kbps': 2000, 'max_burst_kbps': 150 } } self.dscp_rule_data = { 'dscp_marking_rule': { 'id': uuidutils.generate_uuid(), 'dscp_mark': 22 } } self.policy = policy_object.QosPolicy(self.ctxt, **self.policy_data['policy']) self.rule = rule_object.QosBandwidthLimitRule( self.ctxt, **self.rule_data['bandwidth_limit_rule']) self.dscp_rule = rule_object.QosDscpMarkingRule( self.ctxt, **self.dscp_rule_data['dscp_marking_rule']) self.fake_profile_id = 'fake_profile' self.fake_profile = {'id': self.fake_profile_id} mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch( 'neutron.objects.qos.policy.QosPolicy.obj_load_attr').start() mock.patch.object(nsx_db, 'get_switch_profile_by_qos_policy', return_value=self.fake_profile_id).start() self.peak_bw_multiplier = cfg.CONF.NSX.qos_peak_bw_multiplier self.nsxlib = v3_utils.get_nsxlib_wrapper()
def setUp(self): # Reset the drive to re-create it qos_driver.DRIVER = None super(TestQosNsxV3Notification, self).setUp() self.setup_coreplugin(PLUGIN_NAME) self.qos_plugin = qos_plugin.QoSPlugin() self.ctxt = context.Context('fake_user', 'fake_tenant') mock.patch.object(self.ctxt.session, 'refresh').start() mock.patch.object(self.ctxt.session, 'expunge').start() self.policy_data = { 'policy': { 'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'name': 'test-policy', 'description': 'Test policy description', 'shared': True } } self.rule_data = { 'bandwidth_limit_rule': { 'id': uuidutils.generate_uuid(), 'max_kbps': 2000, 'max_burst_kbps': 150 } } self.dscp_rule_data = { 'dscp_marking_rule': { 'id': uuidutils.generate_uuid(), 'dscp_mark': 22 } } self.policy = policy_object.QosPolicy(self.ctxt, **self.policy_data['policy']) self.rule = rule_object.QosBandwidthLimitRule( self.ctxt, **self.rule_data['bandwidth_limit_rule']) self.dscp_rule = rule_object.QosDscpMarkingRule( self.ctxt, **self.dscp_rule_data['dscp_marking_rule']) self.fake_profile_id = 'fake_profile' self.fake_profile = {'id': self.fake_profile_id} mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch.object(nsx_db, 'get_switch_profile_by_qos_policy', return_value=self.fake_profile_id).start() self.peak_bw_multiplier = cfg.CONF.NSX.qos_peak_bw_multiplier self.nsxlib = v3_utils.get_nsxlib_wrapper()
def setUp(self): # Reset the drive to re-create it qos_driver.DRIVER = None super(TestQosNsxV3Notification, self).setUp() self.setup_coreplugin(PLUGIN_NAME) self.qos_plugin = qos_plugin.QoSPlugin() self.ctxt = context.Context('fake_user', 'fake_tenant') mock.patch.object(self.ctxt.session, 'refresh').start() mock.patch.object(self.ctxt.session, 'expunge').start() self.policy_data = { 'policy': {'id': uuidutils.generate_uuid(), 'project_id': uuidutils.generate_uuid(), 'name': 'test-policy', 'description': 'Test policy description', 'shared': True}} self.rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 2000, 'max_burst_kbps': 150}} self.ingress_rule_data = { 'bandwidth_limit_rule': {'id': uuidutils.generate_uuid(), 'max_kbps': 3000, 'max_burst_kbps': 350, 'direction': 'ingress'}} self.dscp_rule_data = { 'dscp_marking_rule': {'id': uuidutils.generate_uuid(), 'dscp_mark': 22}} self.policy = QoSPolicy( self.ctxt, **self.policy_data['policy']) # egress BW limit rule self.rule = QosBandwidthLimitRule( self.ctxt, **self.rule_data['bandwidth_limit_rule']) # ingress bw limit rule self.ingress_rule = QosBandwidthLimitRule( self.ctxt, **self.ingress_rule_data['bandwidth_limit_rule']) self.dscp_rule = QosDscpMarkingRule( self.ctxt, **self.dscp_rule_data['dscp_marking_rule']) self.fake_profile_id = 'fake_profile' self.fake_profile = {'id': self.fake_profile_id} mock.patch('neutron.objects.db.api.create_object').start() mock.patch('neutron.objects.db.api.update_object').start() mock.patch('neutron.objects.db.api.delete_object').start() mock.patch.object(nsx_db, 'get_switch_profile_by_qos_policy', return_value=self.fake_profile_id).start() self.peak_bw_multiplier = cfg.CONF.NSX.qos_peak_bw_multiplier self.nsxlib = v3_utils.get_nsxlib_wrapper()
def _create_local_span(self, context, src_port_id, dest_port_id, direction, tags): """Create a PortMirroring session on the backend for local SPAN.""" tf = context.tap_flow # Backend expects a list of source ports and destination ports. # Due to TaaS API requirements, we are only able to add one port # as a source port and one port as a destination port in a single # request. Hence we send a list of one port for source_ports # and dest_ports. nsx_src_ports = self._convert_to_backend_source_port( context._plugin_context.session, src_port_id) nsx_dest_ports = self._convert_to_backend_dest_port( context._plugin_context.session, dest_port_id) # Create port mirror session on the backend try: nsxlib = v3_utils.get_nsxlib_wrapper() pm_session = nsxlib.port_mirror.create_session( source_ports=nsx_src_ports, dest_ports=nsx_dest_ports, direction=direction, description=tf.get('description'), name=tf.get('name'), tags=tags) except nsxlib_exc.ManagerError: with excutils.save_and_reraise_exception(): LOG.error( _LE("Unable to create port mirror session %s " "on NSX backend, rolling back " "changes on neutron."), tf['id']) # Create internal mappings between tap flow and port mirror session. # Ideally DB transactions must take place in precommit, but since we # rely on the NSX backend to retrieve the port session UUID, we perform # the create action in postcommit. try: nsx_db.add_port_mirror_session_mapping( session=context._plugin_context.session, tf_id=tf['id'], pm_session_id=pm_session['id']) except db_exc.DBError: with excutils.save_and_reraise_exception(): LOG.error( _LE("Unable to create port mirror session db " "mappings for tap flow %s. Rolling back " "changes in Neutron."), tf['id']) nsxlib.port_mirror.delete_session(pm_session['id'])
def create_tap_flow_postcommit(self, context): """Create tap flow and port mirror session on NSX backend.""" tf = context.tap_flow # Retrieve tap service. ts = self._get_tap_service(context._plugin_context, tf.get('tap_service_id')) src_port_id = tf.get('source_port') dest_port_id = ts.get('port_id') nsxlib = v3_utils.get_nsxlib_wrapper() tags = nsxlib.build_v3_tags_payload( tf, resource_type='os-neutron-mirror-id', project_name=context._plugin_context.tenant_name) nsx_direction = self._convert_to_backend_direction(tf.get('direction')) # Create a port mirroring session object if local SPAN. Otherwise # create a port mirroring switching profile for L3SPAN. if self._is_local_span(context, src_port_id, dest_port_id): self._create_local_span(context, src_port_id, dest_port_id, nsx_direction, tags) else: self._create_l3span(context, src_port_id, dest_port_id, nsx_direction, tags)
def get_nsxmanager_lib(self): """Prepare agent for NSX Manager API calls""" return nsx_utils.get_nsxlib_wrapper()
def get_nsxmanager_client(self): """Prepare agent for NSX Manager API calls""" nsxlib = nsx_utils.get_nsxlib_wrapper() return nsxlib.client
def get_connected_nsxlib(): return v3_utils.get_nsxlib_wrapper()