def test_create_delete_l3_policy_with_routers(self):
     with self.router() as router1:
         with self.router() as router2:
             routers = [router1['router']['id'], router2['router']['id']]
             l3p = self.create_l3_policy(routers=routers)
             l3p_id = l3p['l3_policy']['id']
             test_l3p_id = self._gbp_plugin.get_l3p_id_from_router_id(
                 nctx.get_admin_context(),
                 router1['router']['id'])
             self.assertEqual(l3p_id, test_l3p_id)
             test_l3p_id = self._gbp_plugin.get_l3p_id_from_router_id(
                 nctx.get_admin_context(),
                 router2['router']['id'])
             self.assertEqual(l3p_id, test_l3p_id)
             self.assertEqual(sorted(routers),
                              sorted(l3p['l3_policy']['routers']))
             req = self.new_show_request('l3_policies', l3p_id,
                                         fmt=self.fmt)
             res = self.deserialize(self.fmt,
                                    req.get_response(self.ext_api))
             self.assertEqual(sorted(routers),
                              sorted(res['l3_policy']['routers']))
             req = self.new_delete_request('l3_policies', l3p_id)
             res = req.get_response(self.ext_api)
             self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
    def _pull_missed_statuses(self):
        LOG.debug("starting to pull pending statuses...")
        plugin = directory.get_plugin()
        filter = {"status": [n_const.PORT_STATUS_DOWN],
                  "vif_type": ["unbound"]}
        ports = plugin.get_ports(context.get_admin_context(), filter)

        if not ports:
            LOG.debug("no down ports found, done")
            return

        port_fetch_url = utils.get_odl_url(self.PORT_PATH)
        client = odl_client.OpenDaylightRestClient.create_client(
            url=port_fetch_url)

        for port in ports:
            port_id = port["id"]
            response = client.get(port_id)
            if response.status_code != 200:
                LOG.warning("Non-200 response code %s", str(response))
                continue
            odl_status = response.json()['port'][0]['status']
            if odl_status == n_const.PORT_STATUS_ACTIVE:
                # for now we only support transition from DOWN->ACTIVE
                # See https://bugs.launchpad.net/networking-odl/+bug/1686023
                provisioning_blocks.provisioning_complete(
                    context.get_admin_context(),
                    port_id, resources.PORT,
                    provisioning_blocks.L2_AGENT_ENTITY)
        LOG.debug("done pulling pending statuses")
 def test_delete_listener(self):
     with self.loadbalancer(no_delete=True) as loadbalancer:
         lb_id = loadbalancer['loadbalancer']['id']
         self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id)
         with self.listener(loadbalancer_id=lb_id,
                            no_delete=True) as listener:
             listener_id = listener['listener']['id']
             self._update_status(models.LoadBalancer, constants.ACTIVE,
                                 lb_id)
             ctx = context.get_admin_context()
             self.plugin_instance.delete_listener(
                 ctx, listener['listener']['id'])
             calls = self.mock_api.delete_listener.call_args_list
             _, called_listener, called_host = calls[0][0]
             self.assertEqual(listener_id, called_listener.id)
             self.assertEqual('host', called_host)
             self.assertEqual(constants.PENDING_DELETE,
                              called_listener.provisioning_status)
             ctx = context.get_admin_context()
             lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id)
             self.assertEqual(constants.ACTIVE,
                              lb.provisioning_status)
             self.assertRaises(
                 loadbalancerv2.EntityNotFound,
                 self.plugin_instance.db.get_listener, ctx, listener_id)
Esempio n. 4
0
    def _extend_port_resource_request(port_res, port_db):
        """Add resource request to a port."""
        port_res['resource_request'] = None
        qos_policy = policy_object.QosPolicy.get_port_policy(
            context.get_admin_context(), port_res['id'])
        # Note(lajoskatona): QosPolicyPortBinding is not ready for some
        # reasons, so let's try and fetch the QoS policy directly if there is a
        # qos_policy_id in port_res.
        if (not qos_policy and 'qos_policy_id' in port_res and
                port_res['qos_policy_id']):
            qos_policy = policy_object.QosPolicy.get_policy_obj(
                context.get_admin_context(), port_res['qos_policy_id']
            )

        # Note(lajoskatona): handle the case when the port inherits qos-policy
        # from the network.
        if not qos_policy:
            net = network_object.Network.get_object(
                context.get_admin_context(), id=port_res['network_id'])
            if net and net.qos_policy_id:
                qos_policy = policy_object.QosPolicy.get_network_policy(
                    context.get_admin_context(), net.id)

        if not qos_policy:
            return port_res

        resources = {}
        rule_direction_class = {
            nl_constants.INGRESS_DIRECTION:
                pl_constants.CLASS_NET_BW_INGRESS_KBPS,
            nl_constants.EGRESS_DIRECTION:
                pl_constants.CLASS_NET_BW_EGRESS_KBPS
        }
        for rule in qos_policy.rules:
            if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH:
                resources[rule_direction_class[rule.direction]] = rule.min_kbps
        if not resources:
            return port_res

        vnic_trait = pl_utils.vnic_type_trait(
            port_res[portbindings.VNIC_TYPE])

        # TODO(lajoskatona): Change to handle all segments when any traits
        # support will be available. See Placement spec:
        # https://review.openstack.org/565730
        first_segment = network_object.NetworkSegment.get_objects(
            context.get_admin_context(),
            network_id=port_res['network_id'])[0]

        if not first_segment or not first_segment.physical_network:
            return port_res
        physnet_trait = pl_utils.physnet_trait(
            first_segment.physical_network)

        resource_request = {
            'required': [physnet_trait, vnic_trait],
            'resources': resources
        }
        port_res['resource_request'] = resource_request
        return port_res
Esempio n. 5
0
    def reschedule_resources_from_down_agents(self, agent_type,
                                              get_down_bindings,
                                              agent_id_attr,
                                              resource_id_attr,
                                              resource_name,
                                              reschedule_resource,
                                              rescheduling_failed):
        """Reschedule resources from down neutron agents
        if admin state is up.
        """
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents(agent_type, agent_dead_limit)

        context = ncontext.get_admin_context()
        try:
            down_bindings = get_down_bindings(context, agent_dead_limit)

            agents_back_online = set()
            for binding in down_bindings:
                binding_agent_id = getattr(binding, agent_id_attr)
                binding_resource_id = getattr(binding, resource_id_attr)
                if binding_agent_id in agents_back_online:
                    continue
                else:
                    # we need new context to make sure we use different DB
                    # transaction - otherwise we may fetch same agent record
                    # each time due to REPEATABLE_READ isolation level
                    context = ncontext.get_admin_context()
                    agent = self._get_agent(context, binding_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding_agent_id)
                        continue

                LOG.warning(
                    "Rescheduling %(resource_name)s %(resource)s from agent "
                    "%(agent)s because the agent did not report to the server "
                    "in the last %(dead_time)s seconds.",
                    {'resource_name': resource_name,
                     'resource': binding_resource_id,
                     'agent': binding_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    reschedule_resource(context, binding_resource_id)
                except (rescheduling_failed, oslo_messaging.RemoteError):
                    # Catch individual rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception("Failed to reschedule %(resource_name)s "
                                  "%(resource)s",
                                  {'resource_name': resource_name,
                                   'resource': binding_resource_id})
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception("Exception encountered during %(resource_name)s "
                          "rescheduling.",
                          {'resource_name': resource_name})
Esempio n. 6
0
 def _gen_port(self):
     network_id = self.plugin.create_network(context.get_admin_context(), {
         'network':
         {'name': 'pecannet', 'tenant_id': 'tenid', 'shared': False,
          'admin_state_up': True, 'status': 'ACTIVE'}})['id']
     self.port = self.plugin.create_port(context.get_admin_context(), {
         'port':
         {'tenant_id': 'tenid', 'network_id': network_id,
          'fixed_ips': n_const.ATTR_NOT_SPECIFIED,
          'mac_address': '00:11:22:33:44:55',
          'admin_state_up': True, 'device_id': 'FF',
          'device_owner': 'pecan', 'name': 'pecan'}})
    def test_member_crud(self):
        with self.subnet(cidr='10.0.0.0/24') as s:
            with self.loadbalancer(subnet=s) as lb:
                lb_id = lb['loadbalancer']['id']
                with self.listener(loadbalancer_id=lb_id) as l:
                    listener_id = l['listener']['id']
                    with self.pool(
                        protocol=lb_con.PROTOCOL_HTTP,
                        listener_id=listener_id) as p:
                        pool_id = p['pool']['id']
                        with self.member(
                            no_delete=True, address='10.0.1.10',
                            pool_id=pool_id, subnet=s) as m1:
                            member1_id = m1['member']['id']

                            self.driver_rest_call_mock.reset_mock()
                            rest_call_function_mock.__dict__.update(
                                {'WORKFLOW_MISSING': False})

                            with self.member(
                                no_delete=True, pool_id=pool_id,
                                subnet=s, address='10.0.1.20') as m2:
                                member2_id = m2['member']['id']
                                self.compare_apply_call()

                                self.driver_rest_call_mock.reset_mock()
                                m = self.plugin_instance.db.get_pool_member(
                                    context.get_admin_context(),
                                    m1['member']['id']).to_dict(pool=False)

                                m['weight'] = 2
                                self.plugin_instance.update_pool_member(
                                    context.get_admin_context(),
                                    m1['member']['id'], p['pool']['id'],
                                    {'member': m})
                                self.update_member(pool_id, id=member1_id,
                                                   weight=2)
                                self.compare_apply_call()

                                self.driver_rest_call_mock.reset_mock()

                                self.plugin_instance.delete_pool_member(
                                    context.get_admin_context(),
                                    member2_id, pool_id)
                                self.delete_member(member2_id, pool_id)
                                self.compare_apply_call()

                                lb = self.plugin_instance.db.get_loadbalancer(
                                    context.get_admin_context(),
                                    lb_id).to_dict(listener=False)
                                self.assertEqual('ACTIVE',
                                             lb['provisioning_status'])
Esempio n. 8
0
    def _flood_cache_for_query(self, rtype, **filter_kwargs):
        """Load info from server for first query.

        Queries the server if this is the first time a given query for
        rtype has been issued.
        """
        query_ids = self._get_query_ids(rtype, filter_kwargs)
        if query_ids.issubset(self._satisfied_server_queries):
            # we've already asked the server this question so we don't
            # ask directly again because any updates will have been
            # pushed to us
            return
        context = n_ctx.get_admin_context()
        resources = self._puller.bulk_pull(context, rtype,
                                           filter_kwargs=filter_kwargs)
        for resource in resources:
            if self._is_stale(rtype, resource):
                # if the server was slow enough to respond the object may have
                # been updated already and pushed to us in another thread.
                LOG.debug("Ignoring stale update for %s: %s", rtype, resource)
                continue
            self.record_resource_update(context, rtype, resource)
        LOG.debug("%s resources returned for queries %s", len(resources),
                  query_ids)
        self._satisfied_server_queries.update(query_ids)
 def setUp(self):
     super(TestDhcpSchedulerBaseTestCase, self).setUp()
     self.setup_coreplugin(self.CORE_PLUGIN)
     self.ctx = context.get_admin_context()
     self.network = {'id': 'foo_network_id'}
     self.network_id = 'foo_network_id'
     self._save_networks([self.network_id])
Esempio n. 10
0
    def test_floatingip_update_qos_policy_id(self):
        ctx = context.get_admin_context()
        policy_obj_1 = policy.QosPolicy(ctx,
                                        id=uuidutils.generate_uuid(),
                                        project_id='tenant', name='pol2',
                                        rules=[])
        policy_obj_1.create()
        policy_obj_2 = policy.QosPolicy(ctx,
                                        id=uuidutils.generate_uuid(),
                                        project_id='tenant', name='pol3',
                                        rules=[])
        policy_obj_2.create()
        with self.subnet(cidr='11.0.0.0/24') as s:
            network_id = s['subnet']['network_id']
            self._set_net_external(network_id)
            fip = self._make_floatingip(
                self.fmt,
                network_id,
                qos_policy_id=policy_obj_1.id)
            self.assertEqual(policy_obj_1.id,
                             fip['floatingip'][qos_consts.QOS_POLICY_ID])
            body = self._show('floatingips', fip['floatingip']['id'])
            self.assertEqual(policy_obj_1.id,
                             body['floatingip'][qos_consts.QOS_POLICY_ID])

            body = self._update(
                'floatingips', fip['floatingip']['id'],
                {'floatingip': {qos_consts.QOS_POLICY_ID: policy_obj_2.id}})
            self.assertEqual(policy_obj_2.id,
                             body['floatingip'][qos_consts.QOS_POLICY_ID])
 def test_create_health_monitor(self):
     with self.loadbalancer(no_delete=True) as loadbalancer:
         lb_id = loadbalancer['loadbalancer']['id']
         self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id)
         with self.listener(loadbalancer_id=lb_id,
                            no_delete=True) as listener:
             listener_id = listener['listener']['id']
             self._update_status(models.LoadBalancer, constants.ACTIVE,
                                 lb_id)
             with self.pool(listener_id=listener_id, loadbalancer_id=lb_id,
                            no_delete=True) as pool:
                 pool_id = pool['pool']['id']
                 self._update_status(models.LoadBalancer, constants.ACTIVE,
                                     lb_id)
                 with self.healthmonitor(pool_id=pool_id,
                                         no_delete=True) as monitor:
                     hm_id = monitor['healthmonitor']['id']
                     calls = (
                         self.mock_api.create_healthmonitor.call_args_list)
                     _, called_hm, called_host = calls[0][0]
                     self.assertEqual(hm_id, called_hm.id)
                     self.assertEqual('host', called_host)
                     self.assertEqual(constants.PENDING_CREATE,
                                      called_hm.provisioning_status)
                     ctx = context.get_admin_context()
                     lb = self.plugin_instance.db.get_loadbalancer(
                         ctx, lb_id)
                     self.assertEqual(constants.PENDING_UPDATE,
                                      lb.provisioning_status)
Esempio n. 12
0
    def setUp(self):
        flowclassifier_plugin = (
            test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS)

        service_plugins = {
            flowclassifier.FLOW_CLASSIFIER_EXT: flowclassifier_plugin
        }
        fdb.FlowClassifierDbPlugin.supported_extension_aliases = [
            flowclassifier.FLOW_CLASSIFIER_EXT]
        fdb.FlowClassifierDbPlugin.path_prefix = (
            flowclassifier.FLOW_CLASSIFIER_PREFIX
        )
        super(OVSFlowClassifierDriverTestCase, self).setUp(
            ext_mgr=None,
            plugin=None,
            service_plugins=service_plugins
        )
        self.flowclassifier_plugin = importutils.import_object(
            flowclassifier_plugin)
        ext_mgr = api_ext.PluginAwareExtensionManager(
            test_flowclassifier_db.extensions_path,
            {
                flowclassifier.FLOW_CLASSIFIER_EXT: self.flowclassifier_plugin
            }
        )
        app = config.load_paste_app('extensions_test_app')
        self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
        self.ctx = context.get_admin_context()
        self.driver = driver.OVSFlowClassifierDriver()
        self.driver.initialize()
Esempio n. 13
0
    def create_port_changed_event(self, action, original_obj, returned_obj):
        port = None
        if action in ['update_port', 'delete_port']:
            port = returned_obj['port']

        elif action in ['update_floatingip', 'create_floatingip',
                        'delete_floatingip']:
            # NOTE(arosen) if we are associating a floatingip the
            # port_id is in the returned_obj. Otherwise on disassociate
            # it's in the original_object
            port_id = (returned_obj['floatingip'].get('port_id') or
                       original_obj.get('port_id'))

            if port_id is None:
                return

            ctx = context.get_admin_context()
            try:
                port = directory.get_plugin().get_port(ctx, port_id)
            except exc.PortNotFound:
                LOG.debug("Port %s was deleted, no need to send any "
                          "notification", port_id)
                return

        if port and self._is_compute_port(port):
            if action == 'delete_port':
                return self._get_port_delete_event(port)
            else:
                return self._get_network_changed_event(port['device_id'])
 def test_update_pool(self):
     ctx = context.get_admin_context()
     with self.loadbalancer(no_delete=True) as loadbalancer:
         lb_id = loadbalancer['loadbalancer']['id']
         self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id)
         with self.listener(loadbalancer_id=lb_id,
                            no_delete=True) as listener:
             listener_id = listener['listener']['id']
             self._update_status(models.LoadBalancer, constants.ACTIVE,
                                 lb_id)
             with self.pool(loadbalancer_id=lb_id, listener_id=listener_id,
                            no_delete=True) as pool:
                 pool_id = pool['pool']['id']
                 old_name = pool['pool']['name']
                 self._update_status(models.LoadBalancer, constants.ACTIVE,
                                     lb_id)
                 new_name = 'new_name'
                 pool['pool']['name'] = new_name
                 self.plugin_instance.update_pool(ctx, pool_id, pool)
                 calls = self.mock_api.update_pool.call_args_list
                 (_, old_called_pool,
                  new_called_pool, called_host) = calls[0][0]
                 self.assertEqual(pool_id, new_called_pool.id)
                 self.assertEqual(pool_id, old_called_pool.id)
                 self.assertEqual(old_name, old_called_pool.name)
                 self.assertEqual(new_name, new_called_pool.name)
                 self.assertEqual(constants.PENDING_UPDATE,
                                  new_called_pool.provisioning_status)
                 lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id)
                 self.assertEqual(constants.PENDING_UPDATE,
                                  lb.provisioning_status)
                 self.assertEqual('host', called_host)
 def test_create_member(self):
     with self.loadbalancer(no_delete=True) as loadbalancer:
         lb_id = loadbalancer['loadbalancer']['id']
         self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id)
         with self.listener(loadbalancer_id=lb_id,
                            no_delete=True) as listener:
             listener_id = listener['listener']['id']
             self._update_status(models.LoadBalancer, constants.ACTIVE,
                                 lb_id)
             with self.pool(listener_id=listener_id, loadbalancer_id=lb_id,
                            no_delete=True) as pool:
                 pool_id = pool['pool']['id']
                 self._update_status(models.LoadBalancer, constants.ACTIVE,
                                     lb_id)
                 with self.subnet(cidr='11.0.0.0/24') as subnet:
                     with self.member(pool_id=pool_id, subnet=subnet,
                                      no_delete=True) as member:
                         member_id = member['member']['id']
                         calls = self.mock_api.create_member.call_args_list
                         _, called_member, called_host = calls[0][0]
                         self.assertEqual(member_id, called_member.id)
                         self.assertEqual('host', called_host)
                         self.assertEqual(constants.PENDING_CREATE,
                                          called_member.provisioning_status)
                         ctx = context.get_admin_context()
                         lb = self.plugin_instance.db.get_loadbalancer(
                             ctx, lb_id)
                         self.assertEqual(constants.PENDING_UPDATE,
                                          lb.provisioning_status)
Esempio n. 16
0
    def sync_allocations(self):
        # determine current configured allocatable tunnel ids
        tunnel_ids = set()
        ranges = self.get_network_segment_ranges()
        for tun_min, tun_max in ranges:
            tunnel_ids |= set(moves.range(tun_min, tun_max + 1))

        tunnel_id_getter = operator.attrgetter(self.segmentation_key)
        tunnel_col = getattr(self.model, self.segmentation_key)
        ctx = context.get_admin_context()
        with db_api.CONTEXT_WRITER.using(ctx):
            # remove from table unallocated tunnels not currently allocatable
            # fetch results as list via all() because we'll be iterating
            # through them twice
            allocs = ctx.session.query(self.model).all()

            # collect those vnis that needs to be deleted from db
            unallocateds = (
                tunnel_id_getter(a) for a in allocs if not a.allocated)
            to_remove = (x for x in unallocateds if x not in tunnel_ids)
            # Immediately delete tunnels in chunks. This leaves no work for
            # flush at the end of transaction
            for chunk in chunks(to_remove, self.BULK_SIZE):
                (ctx.session.query(self.model).filter(tunnel_col.in_(chunk)).
                 filter_by(allocated=False).delete(synchronize_session=False))

            # collect vnis that need to be added
            existings = {tunnel_id_getter(a) for a in allocs}
            missings = list(tunnel_ids - existings)
            for chunk in chunks(missings, self.BULK_SIZE):
                bulk = [{self.segmentation_key: x, 'allocated': False}
                        for x in chunk]
                ctx.session.execute(self.model.__table__.insert(), bulk)
Esempio n. 17
0
    def __enter__(self):
        try:
            context = q_context.get_admin_context()
            db_storage_driver = cert_utils.DbCertificateStorageDriver(
                context)
            with client_cert.ClientCertificateManager(
                cert_utils.NSX_OPENSTACK_IDENTITY,
                None,
                db_storage_driver) as cert_manager:
                if not cert_manager.exists():
                    msg = _("Unable to load from nsx-db")
                    raise nsx_exc.ClientCertificateException(err_msg=msg)

                filename = self._filename
                if not os.path.exists(os.path.dirname(filename)):
                    if len(os.path.dirname(filename)) > 0:
                        fileutils.ensure_tree(os.path.dirname(filename))
                cert_manager.export_pem(filename)

                expires_in_days = cert_manager.expires_in_days()
                self._check_expiration(expires_in_days)
        except Exception as e:
            self._on_exit()
            raise e

        return self
Esempio n. 18
0
 def setUp(self):
     plugin = 'neutron.tests.unit.extensions.test_agent.TestAgentPlugin'
     # for these tests we need to enable overlapping ips
     cfg.CONF.set_default('allow_overlapping_ips', True)
     ext_mgr = AgentTestExtensionManager()
     super(AgentDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
     self.adminContext = context.get_admin_context()
Esempio n. 19
0
 def test_network_create_with_bad_provider_attrs_400(self):
     ctx = context.get_admin_context()
     ctx.tenant_id = 'an_admin'
     bad_data = {provider_net.SEGMENTATION_ID: "abc"}
     res, _1 = self._post_network_with_bad_provider_attrs(ctx, bad_data,
                                                          True)
     self.assertEqual(web_exc.HTTPBadRequest.code, res.status_int)
Esempio n. 20
0
 def _extract(self, resource_type, resource_id, field):
     # NOTE(salv-orlando): This check currently assumes the parent
     # resource is handled by the core plugin. It might be worth
     # having a way to map resources to plugins so to make this
     # check more general
     plugin = directory.get_plugin()
     if resource_type in service_const.EXT_PARENT_RESOURCE_MAPPING:
         plugin = directory.get_plugin(
             service_const.EXT_PARENT_RESOURCE_MAPPING[resource_type])
     f = getattr(plugin, 'get_%s' % resource_type)
     # f *must* exist, if not found it is better to let neutron
     # explode. Check will be performed with admin context
     try:
         data = f(context.get_admin_context(),
                  resource_id,
                  fields=[field])
     except exceptions.NotFound as e:
         # NOTE(kevinbenton): a NotFound exception can occur if a
         # list operation is happening at the same time as one of
         # the parents and its children being deleted. So we issue
         # a RetryRequest so the API will redo the lookup and the
         # problem items will be gone.
         raise db_exc.RetryRequest(e)
     except Exception:
         with excutils.save_and_reraise_exception():
             LOG.exception('Policy check error while calling %s!', f)
     return data[field]
Esempio n. 21
0
 def _ext_extend_subnet_dict(result, subnetdb):
     ctx = n_context.get_admin_context()
     # get the core plugin as this is a static method with no 'self'
     plugin = directory.get_plugin()
     with db_api.CONTEXT_WRITER.using(ctx):
         plugin._extension_manager.extend_subnet_dict(
             ctx.session, subnetdb, result)
Esempio n. 22
0
 def setUp(self):
     super(QosCoreResourceExtensionTestCase, self).setUp()
     self.core_extension = qos_core.QosCoreResourceExtension()
     policy_p = mock.patch('neutron.objects.qos.policy.QosPolicy')
     self.policy_m = policy_p.start()
     self.context = context.get_admin_context()
     self.non_admin_context = context.Context('user_id', 'tenant_id')
 def setUp(self):
     super(TestDriverController, self).setUp()
     self.setup_coreplugin(DB_PLUGIN_KLASS)
     self.fake_l3 = mock.Mock()
     self.dc = driver_controller.DriverController(self.fake_l3)
     self.fake_l3.l3_driver_controller = self.dc
     self.ctx = context.get_admin_context()
Esempio n. 24
0
def tag_default_ports(resource, event, trigger, **kwargs):
    nsxlib = v3_utils.get_connected_nsxlib()
    admin_cxt = neutron_context.get_admin_context()
    filters = v3_utils.get_plugin_filters(admin_cxt)

    # the plugin creation below will create the NS group and update the default
    # OS section to have the correct applied to group
    with v3_utils.NsxV3PluginWrapper() as _plugin:
        neutron_ports = _plugin.get_ports(admin_cxt, filters=filters)
        for port in neutron_ports:
            neutron_id = port['id']
            # get the network nsx id from the mapping table
            nsx_id = plugin_utils.get_port_nsx_id(admin_cxt.session,
                                                  neutron_id)
            if not nsx_id:
                continue
            device_owner = port['device_owner']
            if (device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF or
                device_owner == const.DEVICE_OWNER_DHCP):
                continue
            ps = _plugin._get_port_security_binding(admin_cxt,
                                                    neutron_id)
            if not ps:
                continue
            try:
                nsx_port = nsxlib.logical_port.get(nsx_id)
            except nsx_exc.ResourceNotFound:
                continue
            tags_update = nsx_port['tags']
            tags_update += [{'scope': security.PORT_SG_SCOPE,
                             'tag': plugin.NSX_V3_DEFAULT_SECTION}]
            nsxlib.logical_port.update(nsx_id, None,
                                       tags_update=tags_update)
Esempio n. 25
0
 def delete_agent_gateway_port(self, context, **kwargs):
     """Delete Floatingip agent gateway port."""
     network_id = kwargs.get('network_id')
     host = kwargs.get('host')
     admin_ctx = neutron_context.get_admin_context()
     self.l3plugin.delete_floatingip_agent_gateway_port(
         admin_ctx, host, network_id)
Esempio n. 26
0
 def setUp(self):
     super(TestOVSDBData, self).setUp()
     self.context = context.get_admin_context()
     self.ovsdb_identifier = 'fake_ovsdb_id'
     mock.patch.object(directory, 'get_plugin').start()
     mock.patch.object(managers, 'TypeManager').start()
     self.ovsdb_data = data.OVSDBData(self.ovsdb_identifier)
Esempio n. 27
0
    def setUp(self):
        super(FlavorPluginTestCase, self).setUp()

        self.config_parse()
        cfg.CONF.set_override(
            'service_plugins',
            ['neutron.tests.unit.extensions.test_flavors.DummyServicePlugin'])

        self.useFixture(
            fixtures.MonkeyPatch('neutron.manager.NeutronManager._instance'))

        self.plugin = flavors_plugin.FlavorsPlugin()
        self.ctx = context.get_admin_context()

        providers = [DummyServiceDriver.get_service_type() +
                     ":" + _provider + ":" + _driver]
        self.service_manager = servicetype_db.ServiceTypeManager.get_instance()
        self.service_providers = mock.patch.object(
            provconf.NeutronModule, 'service_providers').start()
        self.service_providers.return_value = providers
        for provider in providers:
            self.service_manager.add_provider_configuration(
                provider.split(':')[0], provconf.ProviderConfiguration())

        dbapi.context_manager.writer.get_engine()
Esempio n. 28
0
def _register_agent(agent, plugin=None):
    if not plugin:
        plugin = FakePlugin()
    admin_context = context.get_admin_context()
    plugin.create_or_update_agent(admin_context, agent)
    return plugin._get_agent_by_type_and_host(
        admin_context, agent['agent_type'], agent['host'])
Esempio n. 29
0
    def setUp(self):
        super(L3SchedulerBaseTest, self).setUp(PLUGIN_NAME)

        self.l3_plugin = l3_router_plugin.L3RouterPlugin()
        directory.add_plugin(plugin_constants.L3, self.l3_plugin)
        self.adminContext = context.get_admin_context()
        self.adminContext.tenant_id = _uuid()
Esempio n. 30
0
 def setUp(self):
     super(TestSubnetAllocation, self).setUp()
     self._tenant_id = 'test-tenant'
     self.setup_coreplugin(test_db_base_plugin_v2.DB_PLUGIN_KLASS)
     self.plugin = directory.get_plugin()
     self.ctx = context.get_admin_context()
     cfg.CONF.set_override('allow_overlapping_ips', True)
Esempio n. 31
0
def set_agent_admin_state(agent_id, admin_state_up=False):
    FakePlugin().update_agent(context.get_admin_context(), agent_id,
                              {'agent': {
                                  'admin_state_up': admin_state_up
                              }})
Esempio n. 32
0
 def setUp(self):
     super(TestIpamDriverLoader, self).setUp()
     self.ctx = context.get_admin_context()
Esempio n. 33
0
 def context(self):
     if 'neutron.context' not in self.environ:
         self.environ['neutron.context'] = context.get_admin_context()
     return self.environ['neutron.context']
 def setUp(self):
     super(TestHashRing, self).setUp()
     self.admin_ctx = context.get_admin_context()
     self.addCleanup(self._delete_objs)
Esempio n. 35
0
 def __init__(self):
     super(NeutronDbClient, self)
     self.context = neutron_context.get_admin_context()
Esempio n. 36
0
    def test_distributed_binding_multi_host_status(self):
        ctx = context.get_admin_context()
        with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
            port_id = port['port']['id']

            # Update port to bind for 1st host.
            self.plugin.update_distributed_port_binding(
                ctx, port_id, {
                    'port': {
                        portbindings.HOST_ID: 'host-ovs-no_filter',
                        'device_id': 'router1'
                    }
                })

            # Mark 1st device up.
            self.plugin.endpoints[0].update_device_up(
                ctx,
                agent_id="theAgentId",
                device=port_id,
                host='host-ovs-no_filter')

            # Get port and verify status is ACTIVE.
            port = self._show('ports', port_id)
            self.assertEqual('ACTIVE', port['port']['status'])

            # Update port to bind for a 2nd host.
            self.plugin.update_distributed_port_binding(
                ctx, port_id, {
                    'port': {
                        portbindings.HOST_ID: 'host-bridge-filter',
                        'device_id': 'router1'
                    }
                })

            # Mark 2nd device up.
            self.plugin.endpoints[0].update_device_up(
                ctx,
                agent_id="the2ndAgentId",
                device=port_id,
                host='host-bridge-filter')

            # Get port and verify status unchanged.
            port = self._show('ports', port_id)
            self.assertEqual('ACTIVE', port['port']['status'])

            # Mark 1st device down.
            self.plugin.endpoints[0].update_device_down(
                ctx,
                agent_id="theAgentId",
                device=port_id,
                host='host-ovs-no_filter')

            # Get port and verify status unchanged.
            port = self._show('ports', port_id)
            self.assertEqual('ACTIVE', port['port']['status'])

            # Mark 2nd device down.
            self.plugin.endpoints[0].update_device_down(
                ctx,
                agent_id="the2ndAgentId",
                device=port_id,
                host='host-bridge-filter')

            # Get port and verify status is DOWN.
            port = self._show('ports', port_id)
            self.assertEqual('DOWN', port['port']['status'])
Esempio n. 37
0
    def test_distributed_binding(self):
        ctx = context.get_admin_context()
        with self.port(device_owner=const.DEVICE_OWNER_DVR_INTERFACE) as port:
            port_id = port['port']['id']

            # Verify port's VIF type and status.
            self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
                             port['port'][portbindings.VIF_TYPE])
            self.assertEqual('DOWN', port['port']['status'])

            # Update port to bind for a host.
            self.plugin.update_distributed_port_binding(
                ctx, port_id, {
                    'port': {
                        portbindings.HOST_ID: 'host-ovs-no_filter',
                        'device_id': 'router1'
                    }
                })

            # Get port and verify VIF type and status unchanged.
            port = self._show('ports', port_id)
            self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
                             port['port'][portbindings.VIF_TYPE])
            self.assertEqual('DOWN', port['port']['status'])

            # Get and verify binding details for host
            details = self.plugin.endpoints[0].get_device_details(
                ctx,
                agent_id="theAgentId",
                device=port_id,
                host='host-ovs-no_filter')
            self.assertEqual('local', details['network_type'])

            # Get port and verify VIF type and changed status.
            port = self._show('ports', port_id)
            self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
                             port['port'][portbindings.VIF_TYPE])
            self.assertEqual('BUILD', port['port']['status'])

            # Mark device up.
            self.plugin.endpoints[0].update_device_up(
                ctx,
                agent_id="theAgentId",
                device=port_id,
                host='host-ovs-no_filter')

            # Get port and verify VIF type and changed status.
            port = self._show('ports', port_id)
            self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
                             port['port'][portbindings.VIF_TYPE])
            self.assertEqual('ACTIVE', port['port']['status'])

            # Mark device down.
            self.plugin.endpoints[0].update_device_down(
                ctx,
                agent_id="theAgentId",
                device=port_id,
                host='host-ovs-no_filter')

            # Get port and verify VIF type and changed status.
            port = self._show('ports', port_id)
            self.assertEqual(portbindings.VIF_TYPE_DISTRIBUTED,
                             port['port'][portbindings.VIF_TYPE])
            self.assertEqual('DOWN', port['port']['status'])
 def schedule_bgp_speaker_callback(self, resource, event, trigger, payload):
     plugin = payload['plugin']
     if event == events.AFTER_CREATE:
         ctx = nl_context.get_admin_context()
         plugin.schedule_bgp_speaker(ctx, payload['bgp_speaker'])
Esempio n. 39
0
 def setUp(self):
     super(TestDbQuotaDriver, self).setUp()
     self.plugin = FakePlugin()
     self.context = context.get_admin_context()
Esempio n. 40
0
 def __init__(self, group_name):
     self._hash_ring = None
     self._last_time_loaded = None
     self._cache_startup_timeout = True
     self._group = group_name
     self.admin_ctx = context.get_admin_context()
 def setUp(self):
     super(SecurityGroupDbMixinTestCase, self).setUp()
     self.setup_coreplugin(core_plugin=DB_PLUGIN_KLASS)
     self.ctx = context.get_admin_context()
     self.mixin = SecurityGroupDbMixinImpl()
Esempio n. 42
0
 def setUp(self):
     super(L3DvrTestCase, self).setUp(plugin='ml2')
     self.core_plugin = directory.get_plugin()
     self.ctx = context.get_admin_context()
     self.mixin = FakeL3Plugin()
     directory.add_plugin(plugin_constants.L3, self.mixin)
Esempio n. 43
0
 def __init__(self):
     # initialize the availability zones
     config.register_nsxv3_azs(cfg.CONF, cfg.CONF.nsx_v3.availability_zones)
     super(NsxV3PluginWrapper, self).__init__()
     self.context = context.get_admin_context()
Esempio n. 44
0
 def setUp(self):
     super(Ml2DBTestCase, self).setUp()
     self.ctx = context.get_admin_context()
     self.setup_coreplugin(PLUGIN_NAME)
Esempio n. 45
0
    def test_floatingip_update_with_port_and_qos_scenarios(self):
        ctx = context.get_admin_context()
        policy_obj_1 = policy.QosPolicy(ctx,
                                        id=uuidutils.generate_uuid(),
                                        project_id='tenant',
                                        name='pol2',
                                        rules=[])
        policy_obj_1.create()
        policy_obj_2 = policy.QosPolicy(ctx,
                                        id=uuidutils.generate_uuid(),
                                        project_id='tenant',
                                        name='pol3',
                                        rules=[])
        policy_obj_2.create()
        with self.network() as ext_net:
            network_id = ext_net['network']['id']
            self._set_net_external(network_id)
            with self.subnet(ext_net, cidr='10.10.10.0/24'), self.router(
            ) as router, self.subnet(
                    cidr='11.0.0.0/24') as private_subnet, self.port(
                        private_subnet) as port_1, self.port(
                            private_subnet) as port_2:
                self._add_external_gateway_to_router(router['router']['id'],
                                                     network_id)
                self._router_interface_action('add', router['router']['id'],
                                              private_subnet['subnet']['id'],
                                              None)
                fip = self._make_floatingip(self.fmt, network_id)
                self.assertIsNone(fip['floatingip'].get('port_id'))
                self.assertIsNone(fip['floatingip'].get(
                    qos_consts.QOS_POLICY_ID))

                # update from: {port_id: null, qos_policy_id: null}
                #        to  : {port_id: port_id_1, qos_policy_id: null}
                self._update_fip_with_port_or_qos_and_verify(
                    fip['floatingip']['id'], port_1['port']['id'], None)

                # update from: {port_id: port_id_1, qos_policy_id: null}
                #        to  : {port_id: port_id_1, qos_policy_id: policy_1}
                self._update_fip_with_port_or_qos_and_verify(
                    fip['floatingip']['id'], port_1['port']['id'],
                    policy_obj_1.id)

                # update from: {port_id: port_id_1, qos_policy_id: policy_1}
                #        to  : {port_id: port_id_2, qos_policy_id: policy_2}
                self._update_fip_with_port_or_qos_and_verify(
                    fip['floatingip']['id'], port_2['port']['id'],
                    policy_obj_2.id)

                # update from: {port_id: port_id_2, qos_policy_id: policy_2}
                #        to  : {port_id: port_id_1, qos_policy_id: null}
                self._update_fip_with_port_or_qos_and_verify(
                    fip['floatingip']['id'], port_1['port']['id'], None)

                # update from: {port_id: port_id_1, qos_policy_id: null}
                #        to  : {port_id: null, qos_policy_id: policy_1}
                self._update_fip_with_port_or_qos_and_verify(
                    fip['floatingip']['id'], None, policy_obj_1.id)

                # update from: {port_id: null, qos_policy_id: policy_1}
                #        to  : {port_id: null, qos_policy_id: null}
                self._update_fip_with_port_or_qos_and_verify(
                    fip['floatingip']['id'])
Esempio n. 46
0
 def __init__(self):
     super(NeutronDbClient, self).__init__()
     self.context = context.get_admin_context()
     self.filters = get_plugin_filters(self.context)
Esempio n. 47
0
 def setUp(self):
     super(LoggingRpcCallbackTestCase, self).setUp()
     self.context = context.get_admin_context()
     self.rpc_callback = server_rpc.LoggingApiSkeleton()
Esempio n. 48
0
 def setUp(self):
     super(LoggingDBApiTestCase, self).setUp()
     self.context = context.get_admin_context()
     self.sg_id, self.port_id, self.tenant_id = self._create_sg_and_port()
Esempio n. 49
0
 def setUp(self):
     super(TestUtils, self).setUp()
     self.ctx = context.get_admin_context()
 def test_firewall_group_deleted_not_found(self):
     ctx = context.get_admin_context()
     observed = self.callbacks.firewall_group_deleted(ctx, 'notfound')
     self.assertTrue(observed)
Esempio n. 51
0
def get_ovn_db_revisions():
    ctx = context.get_admin_context()
    return [
        row[0] for row in ctx.session.execute("SELECT version_num from %s;" %
                                              OVN_ALEMBIC_TABLE_NAME)
    ]  # nosec
Esempio n. 52
0
 def _create_quota_usage(self, resource, used, tenant_id=None):
     tenant_id = tenant_id or self.tenant_id
     return quota_api.set_quota_usage(context.get_admin_context(),
                                      resource,
                                      tenant_id,
                                      in_use=used)
Esempio n. 53
0
def port_mac_addresses():
    ctx = context.get_admin_context()
    return [
        port[0]
        for port in ctx.session.query(models_v2.Port.mac_address).all()
    ]
Esempio n. 54
0
 def get_network_availability_zones(self, net_db):
     ctx = n_context.get_admin_context()
     p = self._get_plugin_from_project(ctx, net_db['tenant_id'])
     return p.get_network_availability_zones(net_db)
Esempio n. 55
0
    def check_for_inconsistencies(self):
        # Only the worker holding a valid lock within OVSDB will run
        # this periodic
        if not self.has_lock:
            return

        admin_context = n_context.get_admin_context()
        create_update_inconsistencies = (
            revision_numbers_db.get_inconsistent_resources(admin_context))
        delete_inconsistencies = (
            revision_numbers_db.get_deleted_resources(admin_context))
        if not any([create_update_inconsistencies, delete_inconsistencies]):
            LOG.debug('Maintenance task: No inconsistencies found. Skipping')
            return

        LOG.debug('Maintenance task: Synchronizing Neutron '
                  'and OVN databases')
        self._log_maintenance_inconsistencies(create_update_inconsistencies,
                                              delete_inconsistencies)
        self._sync_timer.restart()

        dbg_log_msg = ('Maintenance task: Fixing resource %(res_uuid)s '
                       '(type: %(res_type)s) at %(type_)s')
        # Fix the create/update resources inconsistencies
        for row in create_update_inconsistencies:
            LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
                                    'res_type': row.resource_type,
                                    'type_': INCONSISTENCY_TYPE_CREATE_UPDATE})
            try:
                # NOTE(lucasagomes): The way to fix subnets is bit
                # different than other resources. A subnet in OVN language
                # is just a DHCP rule but, this rule only exist if the
                # subnet in Neutron has the "enable_dhcp" attribute set
                # to True. So, it's possible to have a consistent subnet
                # resource even when it does not exist in the OVN database.
                if row.resource_type == ovn_const.TYPE_SUBNETS:
                    self._fix_create_update_subnet(admin_context, row)
                else:
                    self._fix_create_update(admin_context, row)
            except Exception:
                LOG.exception('Maintenance task: Failed to fix resource '
                              '%(res_uuid)s (type: %(res_type)s)',
                              {'res_uuid': row.resource_uuid,
                               'res_type': row.resource_type})

        # Fix the deleted resources inconsistencies
        for row in delete_inconsistencies:
            LOG.debug(dbg_log_msg, {'res_uuid': row.resource_uuid,
                                    'res_type': row.resource_type,
                                    'type_': INCONSISTENCY_TYPE_DELETE})
            try:
                if row.resource_type == ovn_const.TYPE_SUBNETS:
                    self._ovn_client.delete_subnet(admin_context,
                                                   row.resource_uuid)
                else:
                    self._fix_delete(admin_context, row)
            except Exception:
                LOG.exception('Maintenance task: Failed to fix deleted '
                              'resource %(res_uuid)s (type: %(res_type)s)',
                              {'res_uuid': row.resource_uuid,
                               'res_type': row.resource_type})

        self._sync_timer.stop()
        LOG.info('Maintenance task: Synchronization finished '
                 '(took %.2f seconds)', self._sync_timer.elapsed())
Esempio n. 56
0
 def get_router_availability_zones(self, router):
     ctx = n_context.get_admin_context()
     p = self._get_plugin_from_project(ctx, router['tenant_id'])
     return p.get_router_availability_zones(router)
Esempio n. 57
0
def get_networks():
    ctx = context.get_admin_context()
    query = model_query.get_collection_query(ctx, models_v2.Network)
    return query.all()
Esempio n. 58
0
 def __init__(self, group):
     self._group = group
     self.ctx = n_context.get_admin_context()
Esempio n. 59
0
 def setUp(self):
     self.context = context.get_admin_context()
     super(NetworkRbacTestcase, self).setUp()
Esempio n. 60
0
def table_exists(table_name):
    ctx = context.get_admin_context()
    tables = [t[0] for t in ctx.session.execute("SHOW TABLES;")]
    return table_name in tables