Example #1
0
 def _gen_port(self):
     network_id = self.plugin.create_network(
         context.get_admin_context(),
         {
             "network": {
                 "name": "pecannet",
                 "tenant_id": "tenid",
                 "shared": False,
                 "admin_state_up": True,
                 "status": "ACTIVE",
             }
         },
     )["id"]
     self.port = self.plugin.create_port(
         context.get_admin_context(),
         {
             "port": {
                 "tenant_id": "tenid",
                 "network_id": network_id,
                 "fixed_ips": n_const.ATTR_NOT_SPECIFIED,
                 "mac_address": "00:11:22:33:44:55",
                 "admin_state_up": True,
                 "device_id": "FF",
                 "device_owner": "pecan",
                 "name": "pecan",
             }
         },
     )
Example #2
0
    def test_delete_pool_hm_with_vip(self):
        with self.subnet() as subnet:
            with self.health_monitor(do_delete=False) as hm:
                with self.pool(provider="radware", subnet_id=subnet["subnet"]["id"]) as pool:
                    with self.vip(pool=pool, subnet=subnet):
                        self.plugin_instance.create_pool_health_monitor(
                            context.get_admin_context(), hm, pool["pool"]["id"]
                        )

                        self.plugin_instance.delete_pool_health_monitor(
                            context.get_admin_context(), hm["health_monitor"]["id"], pool["pool"]["id"]
                        )

                        name, args, kwargs = self.driver_rest_call_mock.mock_calls[-2]
                        deletion_post_graph = str(args[2])

                        self.assertTrue(re.search(r".*\'hm_uuid_array\': \[\].*", deletion_post_graph))

                        calls = [
                            mock.call(
                                "POST",
                                "/api/workflow/" + pool["pool"]["id"] + "/action/BaseCreate",
                                mock.ANY,
                                driver.TEMPLATE_HEADER,
                            )
                        ]
                        self.driver_rest_call_mock.assert_has_calls(calls, any_order=True)

                        self.assertRaises(
                            loadbalancer.PoolMonitorAssociationNotFound,
                            self.plugin_instance.get_pool_health_monitor,
                            context.get_admin_context(),
                            hm["health_monitor"]["id"],
                            pool["pool"]["id"],
                        )
Example #3
0
    def test_create_hm_with_vip(self):
        with self.subnet() as subnet:
            with self.health_monitor() as hm:
                with self.pool(provider="radware", subnet_id=subnet["subnet"]["id"]) as pool:
                    with self.vip(pool=pool, subnet=subnet):

                        self.plugin_instance.create_pool_health_monitor(
                            context.get_admin_context(), hm, pool["pool"]["id"]
                        )

                        # Test REST calls
                        calls = [
                            mock.call(
                                "POST",
                                "/api/workflow/" + pool["pool"]["id"] + "/action/BaseCreate",
                                mock.ANY,
                                driver.TEMPLATE_HEADER,
                            ),
                            mock.call(
                                "POST",
                                "/api/workflow/" + pool["pool"]["id"] + "/action/BaseCreate",
                                mock.ANY,
                                driver.TEMPLATE_HEADER,
                            ),
                        ]
                        self.driver_rest_call_mock.assert_has_calls(calls, any_order=True)

                        phm = self.plugin_instance.get_pool_health_monitor(
                            context.get_admin_context(), hm["health_monitor"]["id"], pool["pool"]["id"]
                        )
                        self.assertEqual(phm["status"], constants.ACTIVE)
Example #4
0
 def test_get_pip(self):
     """Call _get_pip twice and verify that a Port is created once."""
     port_dict = {'fixed_ips': [{'subnet_id': '10.10.10.10',
                                 'ip_address': '11.11.11.11'}]}
     port_data = {
         'tenant_id': 'tenant_id',
         'name': 'port_name',
         'network_id': 'network_id',
         'mac_address': attributes.ATTR_NOT_SPECIFIED,
         'admin_state_up': False,
         'device_id': '',
         'device_owner': 'neutron:' + constants.LOADBALANCER,
         'fixed_ips': [{'subnet_id': '10.10.10.10'}]
     }
     self.plugin_instance._core_plugin.get_ports = mock.Mock(
         return_value=[])
     self.plugin_instance._core_plugin.create_port = mock.Mock(
         return_value=port_dict)
     radware_driver = self.plugin_instance.drivers['radware']
     radware_driver._get_pip(context.get_admin_context(),
                             'tenant_id', 'port_name',
                             'network_id', '10.10.10.10')
     self.plugin_instance._core_plugin.get_ports.assert_called_once_with(
             mock.ANY, filters={'name': ['port_name']})
     self.plugin_instance._core_plugin.create_port.assert_called_once_with(
             mock.ANY, {'port': port_data})
     self.plugin_instance._core_plugin.create_port.reset_mock()
     self.plugin_instance._core_plugin.get_ports.reset_mock()
     self.plugin_instance._core_plugin.get_ports.return_value = [port_dict]
     radware_driver._get_pip(context.get_admin_context(),
                             'tenant_id', 'port_name',
                             'network_id', '10.10.10.10')
     self.plugin_instance._core_plugin.get_ports.assert_called_once_with(
             mock.ANY, filters={'name': ['port_name']})
     self.assertFalse(self.plugin_instance._core_plugin.create_port.called)
    def test_listener_deployed(self):
        with self.loadbalancer(no_delete=True) as loadbalancer:
            self.plugin_instance.db.update_loadbalancer_provisioning_status(
                context.get_admin_context(),
                loadbalancer['loadbalancer']['id'])
            with self.listener(
                    loadbalancer_id=loadbalancer[
                        'loadbalancer']['id']) as listener:
                ctx = context.get_admin_context()

                l = self.plugin_instance.db.get_loadbalancer(
                    ctx, loadbalancer['loadbalancer']['id'])
                self.assertEqual('PENDING_UPDATE', l.provisioning_status)

                ll = self.plugin_instance.db.get_listener(
                    ctx, listener['listener']['id'])
                self.assertEqual('PENDING_CREATE', ll.provisioning_status)

                self.callbacks.loadbalancer_deployed(
                    ctx, loadbalancer['loadbalancer']['id'])

                l = self.plugin_instance.db.get_loadbalancer(
                    ctx, loadbalancer['loadbalancer']['id'])
                self.assertEqual('ACTIVE', l.provisioning_status)
                ll = self.plugin_instance.db.get_listener(
                    ctx, listener['listener']['id'])
                self.assertEqual('ACTIVE', ll.provisioning_status)
Example #6
0
    def test_delete_healthmonitor(self):
        ctx = context.get_admin_context()
        self._fake_router_edge_mapping()
        with contextlib.nested(
            self.subnet(),
            self.pool(),
            self.health_monitor(no_delete=True)
        ) as (subnet, pool, health_mon):
            net_id = subnet['subnet']['network_id']
            self._set_net_external(net_id)
            with self.vip(
                router_id=self.router_id, pool=pool,
                subnet=subnet):
                    self.plugin.create_pool_health_monitor(
                        context.get_admin_context(),
                        health_mon, pool['pool']['id']
                    )

            req = self.new_delete_request('health_monitors',
                                          health_mon['health_monitor']['id'])
            res = req.get_response(self.ext_api)
            self.assertEqual(res.status_int, 204)
            qry = ctx.session.query(ldb.HealthMonitor)
            qry = qry.filter_by(id=health_mon['health_monitor']['id'])
            self.assertIsNone(qry.first())
Example #7
0
    def test_delete_vip(self):
        with self.subnet() as subnet:
            with self.pool(provider='radware',
                           do_delete=False,
                           subnet_id=subnet['subnet']['id']) as pool:
                vip_data = {
                    'name': 'vip1',
                    'subnet_id': subnet['subnet']['id'],
                    'pool_id': pool['pool']['id'],
                    'description': '',
                    'protocol_port': 80,
                    'protocol': 'HTTP',
                    'connection_limit': -1,
                    'admin_state_up': True,
                    'status': constants.PENDING_CREATE,
                    'tenant_id': self._tenant_id,
                    'session_persistence': ''
                }

                vip = self.plugin_instance.create_vip(
                    context.get_admin_context(), {'vip': vip_data})

                self.plugin_instance.delete_vip(
                    context.get_admin_context(), vip['id'])

                calls = [
                    mock.call('DELETE', '/api/workflow/' + pool['pool']['id'],
                              None, None)
                ]
                self.driver_rest_call_mock.assert_has_calls(
                    calls, any_order=True)

                self.assertRaises(loadbalancer.VipNotFound,
                                  self.plugin_instance.get_vip,
                                  context.get_admin_context(), vip['id'])
Example #8
0
    def test_create_hm_with_vip(self):
        with self.subnet() as subnet:
            with self.health_monitor() as hm:
                with self.pool(provider='radware',
                               subnet_id=subnet['subnet']['id']) as pool:
                    with self.vip(pool=pool, subnet=subnet):

                        self.plugin_instance.create_pool_health_monitor(
                            context.get_admin_context(),
                            hm, pool['pool']['id']
                        )

                        # Test REST calls
                        calls = [
                            mock.call(
                                'POST', '/api/workflow/' + pool['pool']['id'] +
                                '/action/BaseCreate',
                                mock.ANY, driver.TEMPLATE_HEADER
                            ),
                            mock.call(
                                'POST', '/api/workflow/' + pool['pool']['id'] +
                                '/action/BaseCreate',
                                mock.ANY, driver.TEMPLATE_HEADER
                            )
                        ]
                        self.driver_rest_call_mock.assert_has_calls(
                            calls, any_order=True)

                        phm = self.plugin_instance.get_pool_health_monitor(
                            context.get_admin_context(),
                            hm['health_monitor']['id'], pool['pool']['id']
                        )
                        self.assertEqual(phm['status'], constants.ACTIVE)
Example #9
0
    def test_update_member_with_vip(self):
        with self.subnet() as subnet:
            with self.pool(provider="radware", subnet_id=subnet["subnet"]["id"]) as p:
                with self.member(pool_id=p["pool"]["id"]) as member:
                    with self.vip(pool=p, subnet=subnet):
                        self.plugin_instance.update_member(context.get_admin_context(), member["member"]["id"], member)
                        calls = [
                            mock.call(
                                "POST",
                                "/api/workflow/" + p["pool"]["id"] + "/action/BaseCreate",
                                mock.ANY,
                                driver.TEMPLATE_HEADER,
                            ),
                            mock.call(
                                "POST",
                                "/api/workflow/" + p["pool"]["id"] + "/action/BaseCreate",
                                mock.ANY,
                                driver.TEMPLATE_HEADER,
                            ),
                        ]
                        self.driver_rest_call_mock.assert_has_calls(calls, any_order=True)

                        updated_member = self.plugin_instance.get_member(
                            context.get_admin_context(), member["member"]["id"]
                        )

                        updated_member = self.plugin_instance.get_member(
                            context.get_admin_context(), member["member"]["id"]
                        )
                        self.assertEqual(updated_member["status"], constants.ACTIVE)
Example #10
0
    def test_delete_member_with_vip(self):
        with self.subnet() as subnet:
            with self.pool(provider="radware", subnet_id=subnet["subnet"]["id"]) as p:
                with self.member(pool_id=p["pool"]["id"], do_delete=False) as m:
                    with self.vip(pool=p, subnet=subnet):

                        # Reset mock and
                        # wait for being sure the member
                        # Changed status from PENDING-CREATE
                        # to ACTIVE

                        self.plugin_instance.delete_member(context.get_admin_context(), m["member"]["id"])

                        name, args, kwargs = self.driver_rest_call_mock.mock_calls[-2]
                        deletion_post_graph = str(args[2])

                        self.assertTrue(re.search(r".*\'member_address_array\': \[\].*", deletion_post_graph))

                        calls = [
                            mock.call(
                                "POST",
                                "/api/workflow/" + p["pool"]["id"] + "/action/BaseCreate",
                                mock.ANY,
                                driver.TEMPLATE_HEADER,
                            )
                        ]
                        self.driver_rest_call_mock.assert_has_calls(calls, any_order=True)

                        self.assertRaises(
                            loadbalancer.MemberNotFound,
                            self.plugin_instance.get_member,
                            context.get_admin_context(),
                            m["member"]["id"],
                        )
Example #11
0
 def _enforce_device_owner_not_router_intf_or_device_id(self, context, device_owner, device_id, tenant_id):
     """Prevent tenants from replacing the device id of router ports with
     a router uuid belonging to another tenant.
     """
     if device_owner not in constants.ROUTER_INTERFACE_OWNERS:
         return
     if not context.is_admin:
         # check to make sure device_id does not match another tenants
         # router.
         if device_id:
             if hasattr(self, "get_router"):
                 try:
                     ctx_admin = ctx.get_admin_context()
                     router = self.get_router(ctx_admin, device_id)
                 except l3.RouterNotFound:
                     return
             else:
                 l3plugin = manager.NeutronManager.get_service_plugins().get(service_constants.L3_ROUTER_NAT)
                 if l3plugin:
                     try:
                         ctx_admin = ctx.get_admin_context()
                         router = l3plugin.get_router(ctx_admin, device_id)
                     except l3.RouterNotFound:
                         return
                 else:
                     # raise as extension doesn't support L3 anyways.
                     raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id)
             if tenant_id != router["tenant_id"]:
                 raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id)
Example #12
0
    def test_update_member_with_vip(self):
        with self.subnet() as subnet:
            with self.pool(provider='radware') as p:
                with self.member(pool_id=p['pool']['id']) as member:
                    with self.vip(pool=p, subnet=subnet):
                        self.plugin_instance.update_member(
                            context.get_admin_context(),
                            member['member']['id'], member
                        )
                        calls = [
                            mock.call(
                                'POST', '/api/workflow/' + p['pool']['id'] +
                                '/action/BaseCreate',
                                mock.ANY, driver.TEMPLATE_HEADER
                            ),
                            mock.call(
                                'POST', '/api/workflow/' + p['pool']['id'] +
                                '/action/BaseCreate',
                                mock.ANY, driver.TEMPLATE_HEADER
                            )
                        ]
                        self.driver_rest_call_mock.assert_has_calls(
                            calls, any_order=True)

                        updated_member = self.plugin_instance.get_member(
                            context.get_admin_context(),
                            member['member']['id']
                        )

                        updated_member = self.plugin_instance.get_member(
                            context.get_admin_context(),
                            member['member']['id']
                        )
                        self.assertEqual(updated_member['status'],
                                         constants.ACTIVE)
 def test_create_delete_l3_policy_with_routers(self):
     with self.router() as router1:
         with self.router() as router2:
             routers = [router1['router']['id'], router2['router']['id']]
             l3p = self.create_l3_policy(routers=routers)
             l3p_id = l3p['l3_policy']['id']
             test_l3p_id = self._gbp_plugin.get_l3p_id_from_router_id(
                 nctx.get_admin_context(),
                 router1['router']['id'])
             self.assertEqual(l3p_id, test_l3p_id)
             test_l3p_id = self._gbp_plugin.get_l3p_id_from_router_id(
                 nctx.get_admin_context(),
                 router2['router']['id'])
             self.assertEqual(l3p_id, test_l3p_id)
             self.assertEqual(sorted(routers),
                              sorted(l3p['l3_policy']['routers']))
             req = self.new_show_request('l3_policies', l3p_id,
                                         fmt=self.fmt)
             res = self.deserialize(self.fmt,
                                    req.get_response(self.ext_api))
             self.assertEqual(sorted(routers),
                              sorted(res['l3_policy']['routers']))
             req = self.new_delete_request('l3_policies', l3p_id)
             res = req.get_response(self.ext_api)
             self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
 def test_delete_listener(self):
     with self.loadbalancer(no_delete=True) as loadbalancer:
         lb_id = loadbalancer['loadbalancer']['id']
         self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id)
         with self.listener(loadbalancer_id=lb_id,
                            no_delete=True) as listener:
             listener_id = listener['listener']['id']
             self._update_status(models.LoadBalancer, constants.ACTIVE,
                                 lb_id)
             ctx = context.get_admin_context()
             self.plugin_instance.delete_listener(
                 ctx, listener['listener']['id'])
             calls = self.mock_api.delete_listener.call_args_list
             _, called_listener, called_host = calls[0][0]
             self.assertEqual(listener_id, called_listener.id)
             self.assertEqual('host', called_host)
             self.assertEqual(constants.PENDING_DELETE,
                              called_listener.provisioning_status)
             ctx = context.get_admin_context()
             lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id)
             self.assertEqual(constants.ACTIVE,
                              lb.provisioning_status)
             self.assertRaises(
                 loadbalancerv2.EntityNotFound,
                 self.plugin_instance.db.get_listener, ctx, listener_id)
Example #15
0
 def _get_profile_id(cls, p_type, resource, name):
     try:
         tenant_id = manager.NeutronManager.get_service_plugins()[
             constants.L3_ROUTER_NAT].l3_tenant_id()
     except AttributeError:
         return
     if tenant_id is None:
         return
     core_plugin = manager.NeutronManager.get_plugin()
     if p_type == 'net_profile':
         profiles = core_plugin.get_network_profiles(
             n_context.get_admin_context(),
             {'tenant_id': [tenant_id], 'name': [name]},
             ['id'])
     else:
         profiles = core_plugin.get_policy_profiles(
             n_context.get_admin_context(),
             {'tenant_id': [tenant_id], 'name': [name]},
             ['id'])
     if len(profiles) == 1:
         return profiles[0]['id']
     elif len(profiles) > 1:
         # Profile must have a unique name.
         LOG.error(_LE('The %(resource)s %(name)s does not have unique '
                       'name. Please refer to admin guide and create one.'),
                   {'resource': resource, 'name': name})
     else:
         # Profile has not been created.
         LOG.error(_LE('There is no %(resource)s %(name)s. Please refer to '
                     'admin guide and create one.'),
                   {'resource': resource, 'name': name})
    def _setup_core_resources(self):
        core_plugin = neutron.manager.NeutronManager.get_plugin()

        self._network = core_plugin.create_network(
            q_context.get_admin_context(),
            {
                'network':
                {
                    'tenant_id': self._tenant_id,
                    'name': 'test net',
                    'admin_state_up': True,
                    'shared': False,
                }
            }
        )

        self._subnet = core_plugin.create_subnet(
            q_context.get_admin_context(),
            {
                'subnet':
                {
                    'network_id': self._network['id'],
                    'name': 'test subnet',
                    'cidr': '192.168.1.0/24',
                    'ip_version': 4,
                    'gateway_ip': '192.168.1.1',
                    'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
                    'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
                    'host_routes': attributes.ATTR_NOT_SPECIFIED,
                    'enable_dhcp': True,
                }
            }
        )

        self._subnet_id = self._subnet['id']
Example #17
0
    def test_delete_vip(self):
        with self.subnet() as subnet:
            with self.pool(provider="radware", do_delete=False, subnet_id=subnet["subnet"]["id"]) as pool:
                vip_data = {
                    "name": "vip1",
                    "subnet_id": subnet["subnet"]["id"],
                    "pool_id": pool["pool"]["id"],
                    "description": "",
                    "protocol_port": 80,
                    "protocol": "HTTP",
                    "connection_limit": -1,
                    "admin_state_up": True,
                    "status": constants.PENDING_CREATE,
                    "tenant_id": self._tenant_id,
                    "session_persistence": "",
                }

                vip = self.plugin_instance.create_vip(context.get_admin_context(), {"vip": vip_data})

                self.plugin_instance.delete_vip(context.get_admin_context(), vip["id"])

                calls = [mock.call("DELETE", "/api/workflow/" + pool["pool"]["id"], None, None)]
                self.driver_rest_call_mock.assert_has_calls(calls, any_order=True)

                self.assertRaises(
                    loadbalancer.VipNotFound, self.plugin_instance.get_vip, context.get_admin_context(), vip["id"]
                )
Example #18
0
 def test_update_pool(self):
     with self.subnet():
         with self.pool() as pool:
             del pool["pool"]["provider"]
             del pool["pool"]["status"]
             self.plugin_instance.update_pool(context.get_admin_context(), pool["pool"]["id"], pool)
             pool_db = self.plugin_instance.get_pool(context.get_admin_context(), pool["pool"]["id"])
             self.assertEqual(pool_db["status"], constants.PENDING_UPDATE)
Example #19
0
    def reschedule_resources_from_down_agents(self, agent_type,
                                              get_down_bindings,
                                              agent_id_attr,
                                              resource_id_attr,
                                              resource_name,
                                              reschedule_resource,
                                              rescheduling_failed):
        """Reschedule resources from down neutron agents
        if admin state is up.
        """
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents(agent_type, agent_dead_limit)

        context = ncontext.get_admin_context()
        try:
            down_bindings = get_down_bindings(context, agent_dead_limit)

            agents_back_online = set()
            for binding in down_bindings:
                binding_agent_id = getattr(binding, agent_id_attr)
                binding_resource_id = getattr(binding, resource_id_attr)
                if binding_agent_id in agents_back_online:
                    continue
                else:
                    # we need new context to make sure we use different DB
                    # transaction - otherwise we may fetch same agent record
                    # each time due to REPEATABLE_READ isolation level
                    context = ncontext.get_admin_context()
                    agent = self._get_agent(context, binding_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding_agent_id)
                        continue

                LOG.warning(_LW(
                    "Rescheduling %(resource_name)s %(resource)s from agent "
                    "%(agent)s because the agent did not report to the server "
                    "in the last %(dead_time)s seconds."),
                    {'resource_name': resource_name,
                     'resource': binding_resource_id,
                     'agent': binding_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    reschedule_resource(context, binding_resource_id)
                except (rescheduling_failed, oslo_messaging.RemoteError):
                    # Catch individual rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule %(resource_name)s "
                                      "%(resource)s"),
                                  {'resource_name': resource_name,
                                   'resource': binding_resource_id})
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(_LE("Exception encountered during %(resource_name)s "
                              "rescheduling."),
                          {'resource_name': resource_name})
    def test_member_crud(self):
        with self.subnet(cidr='10.0.0.0/24') as s:
            with self.loadbalancer(
                    subnet=s,
                    vip_address=WF_APPLY_PARAMS['parameters']['vip_address']
            ) as lb:
                lb_id = lb['loadbalancer']['id']
                with self.listener(loadbalancer_id=lb_id) as l:
                    listener_id = l['listener']['id']
                    with self.pool(
                        protocol=lb_con.PROTOCOL_HTTP,
                        listener_id=listener_id) as p:
                        pool_id = p['pool']['id']
                        with self.member(
                            no_delete=True, address='10.0.1.10',
                            pool_id=pool_id, subnet=s) as m1:
                            member1_id = m1['member']['id']

                            self.driver_rest_call_mock.reset_mock()
                            rest_call_function_mock.__dict__.update(
                                {'WORKFLOW_MISSING': False})

                            with self.member(
                                no_delete=True, pool_id=pool_id,
                                subnet=s, address='10.0.1.20') as m2:
                                member2_id = m2['member']['id']
                                self.compare_apply_call()

                                self.driver_rest_call_mock.reset_mock()
                                m = self.plugin_instance.db.get_pool_member(
                                    context.get_admin_context(),
                                    m1['member']['id']).to_dict(pool=False)

                                m['weight'] = 2
                                self.plugin_instance.update_pool_member(
                                    context.get_admin_context(),
                                    m1['member']['id'], p['pool']['id'],
                                    {'member': m})
                                self.update_member(pool_id, id=member1_id,
                                                   weight=2)
                                self.compare_apply_call()

                                self.driver_rest_call_mock.reset_mock()

                                self.plugin_instance.delete_pool_member(
                                    context.get_admin_context(),
                                    member2_id, pool_id)
                                self.delete_member(member2_id, pool_id)
                                self.compare_apply_call()

                                lb = self.plugin_instance.db.get_loadbalancer(
                                    context.get_admin_context(),
                                    lb_id).to_dict(listener=False)
                                self.assertEqual('ACTIVE',
                                             lb['provisioning_status'])
    def reschedule_routers_from_down_agents(self):
        """Reschedule routers from down l3 agents if admin state is up."""
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('L3', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = n_ctx.get_admin_context()
        try:
            down_bindings = (
                context.session.query(RouterL3AgentBinding).
                join(agents_db.Agent).
                filter(agents_db.Agent.heartbeat_timestamp < cutoff,
                       agents_db.Agent.admin_state_up).
                outerjoin(l3_attrs_db.RouterExtraAttributes,
                          l3_attrs_db.RouterExtraAttributes.router_id ==
                          RouterL3AgentBinding.router_id).
                filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha ==
                              sql.false(),
                              l3_attrs_db.RouterExtraAttributes.ha ==
                              sql.null())))

            agents_back_online = set()
            for binding in down_bindings:
                if binding.l3_agent_id in agents_back_online:
                    continue
                else:
                    # we need new context to make sure we use different DB
                    # transaction - otherwise we may fetch same agent record
                    # each time due to REPEATABLE_READ isolation level
                    context = n_ctx.get_admin_context()
                    agent = self._get_agent(context, binding.l3_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding.l3_agent_id)
                        continue

                LOG.warning(_LW(
                    "Rescheduling router %(router)s from agent %(agent)s "
                    "because the agent did not report to the server in "
                    "the last %(dead_time)s seconds."),
                    {'router': binding.router_id,
                     'agent': binding.l3_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    self.reschedule_router(context, binding.router_id)
                except (l3agentscheduler.RouterReschedulingFailed,
                        oslo_messaging.RemoteError):
                    # Catch individual router rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception(_LE("Failed to reschedule router %s"),
                                  binding.router_id)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(_LE("Exception encountered during router "
                              "rescheduling."))
Example #22
0
 def test_delete_member_without_vip(self):
     with self.subnet():
         with self.pool(provider='radware') as p:
             with self.member(pool_id=p['pool']['id'], no_delete=True) as m:
                 self.plugin_instance.delete_member(
                     context.get_admin_context(), m['member']['id']
                 )
                 self.assertRaises(loadbalancer.MemberNotFound,
                                   self.plugin_instance.get_member,
                                   context.get_admin_context(),
                                   m['member']['id'])
Example #23
0
 def test_delete_member_without_vip(self):
     with self.subnet():
         with self.pool(provider="radware") as p:
             with self.member(pool_id=p["pool"]["id"], do_delete=False) as m:
                 self.plugin_instance.delete_member(context.get_admin_context(), m["member"]["id"])
                 self.assertRaises(
                     loadbalancer.MemberNotFound,
                     self.plugin_instance.get_member,
                     context.get_admin_context(),
                     m["member"]["id"],
                 )
Example #24
0
 def test_pool_port(self):
     with self.port(no_delete=True) as port:
         with self.pool() as pool:
             h_db.add_pool_port(context.get_admin_context(),
                                pool['pool']['id'], port['port']['id'])
             pool_port = h_db.get_pool_port(context.get_admin_context(),
                                            pool['pool']['id'])
             self.assertIsNotNone(pool_port)
         pool_port = h_db.get_pool_port(context.get_admin_context(),
                                        pool['pool']['id'])
         self.assertIsNone(pool_port)
Example #25
0
 def test_update_pool(self):
     with self.subnet():
         with self.pool() as pool:
             del pool['pool']['provider']
             del pool['pool']['status']
             self.plugin_instance.update_pool(
                 context.get_admin_context(),
                 pool['pool']['id'], pool)
             pool_db = self.plugin_instance.get_pool(
                 context.get_admin_context(), pool['pool']['id'])
             self.assertEqual(pool_db['status'], constants.PENDING_UPDATE)
Example #26
0
 def _gen_port(self):
     network_id = self.plugin.create_network(context.get_admin_context(), {
         'network':
         {'name': 'pecannet', 'tenant_id': 'tenid', 'shared': False,
          'admin_state_up': True, 'status': 'ACTIVE'}})['id']
     self.port = self.plugin.create_port(context.get_admin_context(), {
         'port':
         {'tenant_id': 'tenid', 'network_id': network_id,
          'fixed_ips': n_const.ATTR_NOT_SPECIFIED,
          'mac_address': '00:11:22:33:44:55',
          'admin_state_up': True, 'device_id': 'FF',
          'device_owner': 'pecan', 'name': 'pecan'}})
Example #27
0
 def test_l3_cleanup_on_net_delete(self):
     l3plugin = manager.NeutronManager.get_service_plugins().get(service_constants.L3_ROUTER_NAT)
     kwargs = {"arg_list": (external_net.EXTERNAL,), external_net.EXTERNAL: True}
     with self.network(**kwargs) as n:
         with self.subnet(network=n, cidr="200.0.0.0/22"):
             l3plugin.create_floatingip(
                 context.get_admin_context(),
                 {"floatingip": {"floating_network_id": n["network"]["id"], "tenant_id": n["network"]["tenant_id"]}},
             )
     self._delete("networks", n["network"]["id"])
     flips = l3plugin.get_floatingips(context.get_admin_context())
     self.assertFalse(flips)
Example #28
0
    def test_update_member(self):
        self._fake_router_edge_mapping()
        with contextlib.nested(
            self.subnet(),
            self.pool(name="pool1"),
            self.pool(name="pool2"),
            self.health_monitor()
        ) as (subnet, pool1, pool2, monitor):
            net_id = subnet['subnet']['network_id']
            self._set_net_external(net_id)
            self.plugin.create_pool_health_monitor(
                context.get_admin_context(),
                monitor, pool1['pool']['id']
            )
            self.plugin.create_pool_health_monitor(
                context.get_admin_context(),
                monitor, pool2['pool']['id']
            )
            with self.vip(
                router_id=self.router_id,
                pool=pool1, subnet=subnet):
                keys = [('address', "192.168.1.100"),
                        ('tenant_id', self._tenant_id),
                        ('protocol_port', 80),
                        ('weight', 10),
                        ('pool_id', pool2['pool']['id']),
                        ('admin_state_up', False),
                        ('status', 'ACTIVE')]
                with self.member(
                    pool_id=pool1['pool']['id']) as member:

                    pool1_update = self._show_pool(pool1['pool']['id'])
                    self.assertEqual(len(pool1_update['pool']['members']), 1)
                    pool2_update = self._show_pool(pool2['pool']['id'])
                    self.assertEqual(len(pool1_update['pool']['members']), 1)
                    self.assertFalse(pool2_update['pool']['members'])

                    data = {'member': {'pool_id': pool2['pool']['id'],
                                       'weight': 10,
                                       'admin_state_up': False}}
                    req = self.new_update_request('members',
                                                  data,
                                                  member['member']['id'])
                    raw_res = req.get_response(self.ext_api)
                    self.assertEquals(web_exc.HTTPOk.code, raw_res.status_int)
                    res = self.deserialize(self.fmt, raw_res)
                    for k, v in keys:
                        self.assertEqual(res['member'][k], v)
                    pool1_update = self._show_pool(pool1['pool']['id'])
                    pool2_update = self._show_pool(pool2['pool']['id'])
                    self.assertEqual(len(pool2_update['pool']['members']), 1)
                    self.assertFalse(pool1_update['pool']['members'])
    def test_create_pool_health_monitor(self):
        with contextlib.nested(self.pool(name="pool"), self.health_monitor(), self.health_monitor()) as (
            pool,
            health_mon1,
            health_mon2,
        ):
            res = self.plugin.create_pool_health_monitor(context.get_admin_context(), health_mon1, pool["pool"]["id"])
            self.assertEqual({"health_monitor": [health_mon1["health_monitor"]["id"]]}, res)

            res = self.plugin.create_pool_health_monitor(context.get_admin_context(), health_mon2, pool["pool"]["id"])
            self.assertEqual(
                {"health_monitor": [health_mon1["health_monitor"]["id"], health_mon2["health_monitor"]["id"]]}, res
            )
Example #30
0
 def test_handle_expired_object(self):
     rp = directory.get_plugin("revision_plugin")
     with self.port():
         with self.ctx.session.begin():
             ipal_obj = self.ctx.session.query(models_v2.IPAllocation).one()
             # load port into our session
             port_obj = self.ctx.session.query(models_v2.Port).one()
             # simulate concurrent delete in another session
             nctx.get_admin_context().session.query(models_v2.Port).delete()
             # expire the port so the revision bumping code will trigger a
             # lookup on its attributes and encounter an ObjectDeletedError
             self.ctx.session.expire(port_obj)
             rp._bump_related_revisions(self.ctx.session, ipal_obj)
Example #31
0
 def setUp(self):
     super(TestVlanBitmap, self).setUp()
     self.context = n_context.get_admin_context()
Example #32
0
 def setUp(self):
     super(L3DvrTestCase, self).setUp()
     self.ctx = context.get_admin_context()
     self.mixin = l3_dvr_db.L3_NAT_with_dvr_db_mixin()
Example #33
0
File: db.py Project: zioc/neutron
 def __init__(self):
     self.admin_ctx = nctx.get_admin_context()
Example #34
0
 def setUp(self):
     super(TestODLShim, self).setUp()
     self.context = context.get_admin_context()
     self.plugin = mock.Mock()
     self.driver = driver.OpenDaylightMechanismDriver()
     self.driver.odl_drv = mock.Mock()
    def remove_networks_from_down_agents(self):
        """Remove networks from down DHCP agents if admin state is up.

        Reschedule them if configured so.
        """

        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents('DHCP', agent_dead_limit)
        cutoff = self.get_cutoff_time(agent_dead_limit)

        context = ncontext.get_admin_context()
        try:
            down_bindings = (context.session.query(
                ndab_model.NetworkDhcpAgentBinding).join(
                    agent_model.Agent).filter(
                        agent_model.Agent.heartbeat_timestamp < cutoff,
                        agent_model.Agent.admin_state_up))
            dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
            dead_bindings = [
                b for b in self._filter_bindings(context, down_bindings)
            ]
            agents = self.get_agents_db(
                context, {'agent_type': [constants.AGENT_TYPE_DHCP]})
            if not agents:
                # No agents configured so nothing to do.
                return
            active_agents = [
                agent for agent in agents
                if self.is_eligible_agent(context, True, agent)
            ]
            if not active_agents:
                LOG.warning(
                    _LW("No DHCP agents available, "
                        "skipping rescheduling"))
                return
            for binding in dead_bindings:
                LOG.warning(
                    _LW("Removing network %(network)s from agent "
                        "%(agent)s because the agent did not report "
                        "to the server in the last %(dead_time)s "
                        "seconds."), {
                            'network': binding.network_id,
                            'agent': binding.dhcp_agent_id,
                            'dead_time': agent_dead_limit
                        })
                # save binding object to avoid ObjectDeletedError
                # in case binding is concurrently deleted from the DB
                saved_binding = {
                    'net': binding.network_id,
                    'agent': binding.dhcp_agent_id
                }
                try:
                    # do not notify agent if it considered dead
                    # so when it is restarted it won't see network delete
                    # notifications on its queue
                    self.remove_network_from_dhcp_agent(context,
                                                        binding.dhcp_agent_id,
                                                        binding.network_id,
                                                        notify=False)
                except dhcpagentscheduler.NetworkNotHostedByDhcpAgent:
                    # measures against concurrent operation
                    LOG.debug(
                        "Network %(net)s already removed from DHCP "
                        "agent %(agent)s", saved_binding)
                    # still continue and allow concurrent scheduling attempt
                except Exception:
                    LOG.exception(
                        _LE("Unexpected exception occurred while "
                            "removing network %(net)s from agent "
                            "%(agent)s"), saved_binding)

                if cfg.CONF.network_auto_schedule:
                    self._schedule_network(context, saved_binding['net'],
                                           dhcp_notifier)
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception(
                _LE("Exception encountered during network "
                    "rescheduling"))
 def setUp(self):
     super(TestRevisionPlugin, self).setUp()
     self.cp = manager.NeutronManager.get_plugin()
     self.l3p = (
         manager.NeutronManager.get_service_plugins()['L3_ROUTER_NAT'])
     self.ctx = nctx.get_admin_context()
    def test_update_csr_firewall_port_id(self):

        with self.router(tenant_id=self._tenant_id) as r, \
                self.subnet() as s1, \
                self.subnet(cidr='20.0.0.0/24') as s2:

            body = self._router_interface_action(
                'add',
                r['router']['id'],
                s1['subnet']['id'],
                None)
            port_id1 = body['port_id']

            body = self._router_interface_action(
                'add',
                r['router']['id'],
                s2['subnet']['id'],
                None)
            port_id2 = body['port_id']

            self.fake_vendor_ext['if_list']['port']['id'] = port_id1
            self.fake_vendor_ext['if_list']['direction'] = 'inside'
            self.mock_get_hosting_info.return_value = self.fake_vendor_ext

            with self.firewall(port_id=port_id1,
                 direction='both') as fw:
                ctx = context.get_admin_context()
                fw_id = fw['firewall']['id']
                status_data = {'acl_id': 100}

                res = self.callbacks.set_firewall_status(ctx, fw_id,
                    const.ACTIVE, status_data)

                # update direction on same port
                data = {'firewall': {'name': 'firewall_2',
                    'direction': 'both', 'port_id': port_id2}}
                req = self.new_update_request('firewalls', data,
                    fw['firewall']['id'])
                req.environ['neutron.context'] = context.Context(
                    '', 'test-tenant')
                res = self.deserialize(self.fmt,
                req.get_response(self.ext_api))

                csrfw = self.lookup_firewall_csr_association(ctx,
                    fw['firewall']['id'])

                self.assertEqual('firewall_2', res['firewall']['name'])
                self.assertEqual(port_id2, csrfw['port_id'])
                self.assertEqual('both', csrfw['direction'])

                # cant be in PENDING_XXX state for delete clean up
                with ctx.session.begin(subtransactions=True):
                    fw_db = self.plugin._get_firewall(ctx, fw_id)
                    fw_db['status'] = const.ACTIVE
                    ctx.session.flush()
            self._router_interface_action('remove',
                r['router']['id'],
                s1['subnet']['id'],
                None)
            self._router_interface_action(
                'remove',
                r['router']['id'],
                s2['subnet']['id'],
                None)
Example #38
0
 def _get_port(self, context, port_id):
     _core_plugin = manager.NeutronManager.get_plugin()
     # TODO(tmorin): should not need an admin context
     return _core_plugin.get_port(n_context.get_admin_context(), port_id)
Example #39
0
 def _test_remove_all_hosting_devices(self):
     """Removes all hosting devices created during a test."""
     devmgr = manager.NeutronManager.get_service_plugins()[
         cisco_constants.DEVICE_MANAGER]
     context = n_context.get_admin_context()
     devmgr.delete_all_hosting_devices(context, True)
Example #40
0
 def setUp(self):
     super(TestIpamDriverLoader, self).setUp()
     self.ctx = context.get_admin_context()
Example #41
0
 def periodic_tasks(self, raise_on_error=False):
     """Tasks to be run at a periodic interval."""
     ctxt = context.get_admin_context()
     self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
Example #42
0
 def test_create_pool_port_no_port(self):
     with self.pool() as pool:
         self.assertRaises(n_exc.DBError,
                           h_db.add_pool_port,
                           context.get_admin_context(),
                           pool['pool']['id'], None)
Example #43
0
    def _get_all_data(self,
                      get_ports=True,
                      get_floating_ips=True,
                      get_routers=True):
        admin_context = qcontext.get_admin_context()
        networks = []
        # this method is used by the ML2 driver so it can't directly invoke
        # the self.get_(ports|networks) methods
        plugin = manager.NeutronManager.get_plugin()
        all_networks = plugin.get_networks(admin_context) or []
        for net in all_networks:
            mapped_network = self._get_mapped_network_with_subnets(net)
            flips_n_ports = mapped_network
            if get_floating_ips:
                flips_n_ports = self._get_network_with_floatingips(
                    mapped_network)

            if get_ports:
                ports = []
                net_filter = {'network_id': [net.get('id')]}
                net_ports = plugin.get_ports(admin_context,
                                             filters=net_filter) or []
                for port in net_ports:
                    mapped_port = self._map_state_and_status(port)
                    mapped_port['attachment'] = {
                        'id': port.get('device_id'),
                        'mac': port.get('mac_address'),
                    }
                    mapped_port = self._extend_port_dict_binding(
                        admin_context, mapped_port)
                    ports.append(mapped_port)
                flips_n_ports['ports'] = ports

            if flips_n_ports:
                networks.append(flips_n_ports)

        data = {'networks': networks}

        if get_routers and self.l3_plugin:
            routers = []
            all_routers = self.l3_plugin.get_routers(admin_context) or []
            for router in all_routers:
                interfaces = []
                mapped_router = self._map_state_and_status(router)
                router_filter = {
                    'device_owner': [const.DEVICE_OWNER_ROUTER_INTF],
                    'device_id': [router.get('id')]
                }
                router_ports = self.get_ports(admin_context,
                                              filters=router_filter) or []
                for port in router_ports:
                    net_id = port.get('network_id')
                    subnet_id = port['fixed_ips'][0]['subnet_id']
                    intf_details = self._get_router_intf_details(
                        admin_context, net_id, subnet_id)
                    interfaces.append(intf_details)
                mapped_router['interfaces'] = interfaces

                routers.append(mapped_router)

            data.update({'routers': routers})
        return data
Example #44
0
def main():
    common_config.init(sys.argv[1:])
    common_config.setup_logging()
    register_options()
    grid.GridManager(context.get_admin_context()).sync(force_sync=True)
Example #45
0
 def setUp(self):
     core_plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
     super(L3DvrTestCase, self).setUp(plugin=core_plugin)
     self.core_plugin = manager.NeutronManager.get_plugin()
     self.ctx = context.get_admin_context()
     self.mixin = FakeL3Plugin()
Example #46
0
 def setUp(self):
     super(NsxDBTestCase, self).setUp()
     self.ctx = context.get_admin_context()
Example #47
0
 def setUp(self):
     plugin = 'neutron.plugins.ml2.plugin.Ml2Plugin'
     self.setup_coreplugin(plugin)
     super(L3DvrSchedulerTestCase, self).setUp()
     self.adminContext = q_context.get_admin_context()
     self.dut = L3DvrScheduler()
Example #48
0
 def _synchronize_state(self, sp):
     # If the plugin has been destroyed, stop the LoopingCall
     if not self._plugin:
         raise loopingcall.LoopingCallDone
     start = timeutils.utcnow()
     # Reset page cursor variables if necessary
     if sp.current_chunk == 0:
         sp.ls_cursor = sp.lr_cursor = sp.lp_cursor = 'start'
     LOG.info(_("Running state synchronization task. Chunk: %s"),
              sp.current_chunk)
     # Fetch chunk_size data from NSX
     try:
         (lswitches, lrouters,
          lswitchports) = (self._fetch_nsx_data_chunk(sp))
     except (api_exc.RequestTimeout, api_exc.NsxApiException):
         sleep_interval = self._sync_backoff
         # Cap max back off to 64 seconds
         self._sync_backoff = min(self._sync_backoff * 2, 64)
         LOG.exception(
             _("An error occurred while communicating with "
               "NSX backend. Will retry synchronization "
               "in %d seconds"), sleep_interval)
         return sleep_interval
     LOG.debug(_("Time elapsed querying NSX: %s"),
               timeutils.utcnow() - start)
     if sp.total_size:
         num_chunks = ((sp.total_size / sp.chunk_size) +
                       (sp.total_size % sp.chunk_size != 0))
     else:
         num_chunks = 1
     LOG.debug(_("Number of chunks: %d"), num_chunks)
     # Find objects which have changed on NSX side and need
     # to be synchronized
     (ls_uuids, lr_uuids,
      lp_uuids) = self._nsx_cache.process_updates(lswitches, lrouters,
                                                  lswitchports)
     # Process removed objects only at the last chunk
     scan_missing = (sp.current_chunk == num_chunks - 1
                     and not sp.init_sync_performed)
     if sp.current_chunk == num_chunks - 1:
         self._nsx_cache.process_deletes()
         ls_uuids = self._nsx_cache.get_lswitches(
             changed_only=not scan_missing)
         lr_uuids = self._nsx_cache.get_lrouters(
             changed_only=not scan_missing)
         lp_uuids = self._nsx_cache.get_lswitchports(
             changed_only=not scan_missing)
     LOG.debug(_("Time elapsed hashing data: %s"),
               timeutils.utcnow() - start)
     # Get an admin context
     ctx = context.get_admin_context()
     # Synchronize with database
     self._synchronize_lswitches(ctx, ls_uuids, scan_missing=scan_missing)
     self._synchronize_lrouters(ctx, lr_uuids, scan_missing=scan_missing)
     self._synchronize_lswitchports(ctx,
                                    lp_uuids,
                                    scan_missing=scan_missing)
     # Increase chunk counter
     LOG.info(
         _("Synchronization for chunk %(chunk_num)d of "
           "%(total_chunks)d performed"), {
               'chunk_num': sp.current_chunk + 1,
               'total_chunks': num_chunks
           })
     sp.current_chunk = (sp.current_chunk + 1) % num_chunks
     added_delay = 0
     if sp.current_chunk == 0:
         # Ensure init_sync_performed is True
         if not sp.init_sync_performed:
             sp.init_sync_performed = True
         # Add additional random delay
         added_delay = random.randint(0, self._max_rand_delay)
     LOG.debug(_("Time elapsed at end of sync: %s"),
               timeutils.utcnow() - start)
     return self._sync_interval / num_chunks + added_delay
Example #49
0
 def setUp(self):
     super(Ml2DBTestCase, self).setUp()
     self.ctx = context.get_admin_context()
     self.setup_coreplugin(PLUGIN_NAME)
Example #50
0
 def context(self):
     if 'neutron.context' not in self.environ:
         self.environ['neutron.context'] = context.get_admin_context()
     return self.environ['neutron.context']
 def setUp(self):
     super(TestAutoScheduleSegments, self).setUp()
     self.plugin = self.driver
     self.segments_plugin = importutils.import_object(
         'neutron.services.segments.plugin.Plugin')
     self.ctx = context.get_admin_context()
Example #52
0
 def setUp(self):
     super(L3DvrTestCase, self).setUp()
     db.configure_db()
     self.ctx = context.get_admin_context()
     self.addCleanup(db.clear_db)
     self.mixin = l3_dvr_db.L3_NAT_with_dvr_db_mixin()
Example #53
0
 def _save_network(self, network_id):
     ctx = context.get_admin_context()
     with ctx.session.begin(subtransactions=True):
         ctx.session.add(models_v2.Network(id=network_id))
     network = ctx.session.query(models_v2.Network).one()
     return network.standard_attr_id
Example #54
0
 def setUp(self):
     super(ServiceTypeManagerTestCase, self).setUp()
     st_db.ServiceTypeManager._instance = None
     self.manager = st_db.ServiceTypeManager.get_instance()
     self.ctx = context.get_admin_context()
    def setUp(self):
        super(L3SchedulerBaseTest, self).setUp()

        self.l3_plugin = l3_router_plugin.L3RouterPlugin()
        self.adminContext = context.get_admin_context()
        self.adminContext.tenant_id = '_func_test_tenant_'
Example #56
0
 def setUp(self):
     super(NsxDBTestCase, self).setUp()
     db.configure_db()
     self.ctx = context.get_admin_context()
     self.addCleanup(db.clear_db)
Example #57
0
 def _test_enforce_adminonly_attribute(self, action):
     admin_context = context.get_admin_context()
     target = {'shared': True}
     result = policy.enforce(admin_context, action, target)
     self.assertEqual(result, True)
Example #58
0
from neutron.db import agentschedulers_db
from neutron.db import l3_attrs_db
from neutron.db import model_base
from neutron.extensions import l3agentscheduler
from neutron import manager
from neutron.openstack.common.gettextutils import _LI, _LW
from neutron.openstack.common import log as logging
from neutron.openstack.common import loopingcall
from neutron.openstack.common import timeutils
import time

from neutron.api.rpc.handlers import l3_rpc

config.init(sys.argv[1:])

context = n_ctx.get_admin_context()

core_plugin = manager.NeutronManager.get_plugin()
subnet_ids = set([
    u'35eb4730-3230-4ace-b01b-2faf49a77c61',
    u'4f3649f7-e434-4dc7-872c-edb5844e8f6c',
    u'ae8720de-b61f-4046-9d52-9a167ba97a91',
    u'aab013e4-3da9-4dbf-9080-8ed069fe2164',
    u'f337c1e6-76eb-457b-be87-b0a8e7c10ea9',
    u'50ffd24d-bb79-4699-8bad-06ba470295f3',
    u'cb854358-58bb-4ece-ba02-9b6bacd2339c',
    u'b3867eb4-e903-4649-9442-80bb0209a025',
    u'e8eec67f-37cd-47a2-9d2e-b7647fe26495',
    u'17b6256e-5bbc-4055-a17d-887945ab117d',
    u'a8fdc483-a5c0-4430-b747-0fdb722b9941',
    u'08793e3b-50f0-4a09-8da1-ce3569d4db97',
    def setUp(self):
        super(L3SchedulerBaseTest, self).setUp(PLUGIN_NAME)

        self.l3_plugin = l3_router_plugin.L3RouterPlugin()
        self.adminContext = context.get_admin_context()
        self.adminContext.tenant_id = _uuid()
Example #60
0
 def test_update_non_existent_port(self):
     ctx = context.get_admin_context()
     plugin = manager.NeutronManager.get_plugin()
     data = {'port': {'admin_state_up': False}}
     self.assertRaises(exc.PortNotFound, plugin.update_port, ctx,
                       'invalid-uuid', data)