Example #1
0
    def _test_create_policy_rule(self, plugin, q_ctx, t_ctx, pod_id,
                                 bottom_policy):
        project_id = 'test_prject_id'
        t_policy = {
            'policy': {
                'name': 'test_qos',
                'description': 'This policy limits the ports to 10Mbit max.',
                'project_id': project_id
            }
        }

        res = plugin.create_policy(q_ctx, t_policy)

        rule_data = {"bandwidth_limit_rule": {"max_kbps": "10000"}}

        t_rule = plugin.create_policy_rule(q_ctx, rule.QosBandwidthLimitRule,
                                           res['id'], rule_data)
        res1 = plugin.get_policy(q_ctx, res['id'])

        self.assertEqual(1, len(res1['rules']))
        self.assertEqual(t_rule['id'], res1['rules'][0]['id'])

        b_policy_id = uuidutils.generate_uuid()
        b_policy = {
            'id': b_policy_id,
            'name': b_policy_id,
            'description': '',
            'tenant_id': project_id,
            'rules': []
        }
        bottom_policy.append(b_policy)
        db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id, pod_id,
                                       project_id, constants.RT_QOS)
Example #2
0
    def _test_delete_policy(self, plugin, q_ctx, t_ctx, pod_id, bottom_policy):
        project_id = 'test_prject_id'
        t_policy = {
            'policy': {
                'name': 'test_qos',
                'description': 'This policy limits the ports to 10Mbit max.',
                'project_id': project_id
            }
        }

        res = plugin.create_policy(q_ctx, t_policy)
        b_policy_id = uuidutils.generate_uuid()
        b_policy = {
            'id': b_policy_id,
            'name': b_policy_id,
            'description': '',
            'tenant_id': project_id
        }
        bottom_policy.append(b_policy)
        db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id, pod_id,
                                       project_id, constants.RT_QOS)

        self.assertEqual(1, len(bottom_policy))
        plugin.delete_policy(q_ctx, res['id'])
        self.assertEqual(0, len(bottom_policy))
Example #3
0
    def _test_create_policy_rule(self, plugin, q_ctx,
                                 t_ctx, pod_id, bottom_policy):
        project_id = 'test_prject_id'
        t_policy = {
            'policy': {
                'name': 'test_qos',
                'description': 'This policy limits the ports to 10Mbit max.',
                'project_id': project_id
            }
        }

        res = plugin.create_policy(q_ctx, t_policy)

        rule_data = {
            "bandwidth_limit_rule": {
                "max_kbps": "10000"
            }
        }

        t_rule = plugin.create_policy_rule(
            q_ctx, rule.QosBandwidthLimitRule, res['id'], rule_data)
        res1 = plugin.get_policy(q_ctx, res['id'])

        self.assertEqual(1, len(res1['rules']))
        self.assertEqual(t_rule['id'], res1['rules'][0]['id'])

        b_policy_id = uuidutils.generate_uuid()
        b_policy = {'id': b_policy_id, 'name': b_policy_id, 'description': '',
                    'tenant_id': project_id, 'rules': []}
        bottom_policy.append(b_policy)
        db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id,
                                       pod_id, project_id, constants.RT_QOS)
 def test_get_create_element_routing_conflict(self):
     pod = self._prepare_pod()
     resource_id = 'fake_resource_id'
     _type = 'fake_resource'
     ele = {'id': resource_id}
     body = {'name': resource_id}
     api.create_resource_mapping(self.t_ctx, resource_id, None,
                                 pod['pod_id'], self.project_id, _type)
     self.assertRaises(exceptions.RoutingCreateFail,
                       lock_handle.get_or_create_element, self.t_ctx,
                       self.q_ctx, self.project_id, pod, ele, _type, body,
                       list_resource, create_resource)
Example #5
0
 def test_get_create_element_routing_conflict(self):
     pod = self._prepare_pod()
     resource_id = 'fake_resource_id'
     _type = 'fake_resource'
     ele = {'id': resource_id}
     body = {'name': resource_id}
     api.create_resource_mapping(self.t_ctx, resource_id, None,
                                 pod['pod_id'], self.project_id, _type)
     self.assertRaises(
         exceptions.RoutingCreateFail, lock_handle.get_or_create_element,
         self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
         list_resource, create_resource)
Example #6
0
    def ensure_resource_mapping(t_ctx, project_id, pod, entries):
        """Ensure resource mapping

        :param t_ctx: tricircle context
        :param project_id: project id
        :param pod: bottom pod
        :param entries: a list of (top_id, bottom_id, resource_type) tuples.
        :return: None
        """
        for top_id, btm_id, resource_type in entries:
            if db_api.get_bottom_id_by_top_id_region_name(
                    t_ctx, top_id, pod['region_name'], resource_type):
                continue
            db_api.create_resource_mapping(t_ctx, top_id, btm_id,
                                           pod['pod_id'], project_id,
                                           resource_type)
Example #7
0
    def ensure_resource_mapping(t_ctx, project_id, pod, entries):
        """Ensure resource mapping

        :param t_ctx: tricircle context
        :param project_id: project id
        :param pod: bottom pod
        :param entries: a list of (top_id, bottom_id, resource_type) tuples.
        :return: None
        """
        for top_id, btm_id, resource_type in entries:
            if db_api.get_bottom_id_by_top_id_region_name(
                    t_ctx, top_id, pod['region_name'], resource_type):
                continue
            db_api.create_resource_mapping(t_ctx, top_id, btm_id,
                                           pod['pod_id'], project_id,
                                           resource_type)
    def test_get_list_element_create_fail(self):
        pod = self._prepare_pod()
        resource_id = 'fake_resource_id'
        _type = 'fake_resource'
        ele = {'id': resource_id}
        body = {'name': resource_id}
        routing = api.create_resource_mapping(self.t_ctx, resource_id, None,
                                              pod['pod_id'], self.project_id,
                                              _type)
        api.update_resource_routing(
            self.t_ctx, routing['id'], {
                'created_at': constants.expire_time,
                'updated_at': constants.expire_time
            })

        def fake_list_resource(t_ctx, q_ctx, pod, body, _type):
            raise q_exceptions.ConnectionFailed()

        self.assertRaises(q_exceptions.ConnectionFailed,
                          lock_handle.get_or_create_element, self.t_ctx,
                          self.q_ctx, self.project_id, pod, ele, _type, body,
                          fake_list_resource, create_resource)
        # the original routing is not deleted
        routing = api.get_resource_routing(self.t_ctx, routing['id'])
        self.assertIsNone(routing['bottom_id'])
Example #9
0
    def _create_resource_for_project(self, job, project_id):
        # create resource for project ${project_id}
        pod_id = uuidutils.generate_uuid()

        resource_type, resource_id = (
            constants.job_primary_resource_map[job['type']])
        routing = db_api.create_resource_mapping(
            self.context, job['resource'][resource_id],
            job['resource'][resource_id], pod_id, project_id,
            resource_type)
        self.assertIsNotNone(routing)
Example #10
0
    def _test_update_policy(self, plugin, q_ctx, t_ctx,
                            pod_id, bottom_policy):
        project_id = 'test_prject_id'
        t_policy = {
            'policy': {
                'name': 'test_qos',
                'description': 'This policy limits the ports to 10Mbit max.',
                'project_id': project_id
            }
        }

        res = plugin.create_policy(q_ctx, t_policy)

        updated_qos = {
            'policy': {
                'name': 'test_updated_qos'
            }
        }

        updated_res = plugin.update_policy(q_ctx, res['id'], updated_qos)
        self.assertEqual(res['id'], updated_res['id'])
        self.assertEqual('test_updated_qos', updated_res['name'])

        b_policy_id = uuidutils.generate_uuid()
        b_policy = {
            'id': b_policy_id, 'name': b_policy_id, 'description': '',
            'tenant_id': project_id
        }
        bottom_policy.append(b_policy)
        db_api.create_resource_mapping(t_ctx, res['id'], b_policy_id,
                                       pod_id, project_id, constants.RT_QOS)

        updated_qos = {
            'policy': {
                'name': 'test_policy'
            }
        }

        updated_res = plugin.update_policy(q_ctx, res['id'], updated_qos)
        self.assertEqual('test_policy', updated_res['name'])
        self.assertEqual('test_policy', bottom_policy[0]['name'])
Example #11
0
    def post(self, **kw):
        context = t_context.extract_context_from_environ()

        if not policy.enforce(context, policy.ADMIN_API_ROUTINGS_CREATE):
            return utils.format_api_error(
                403, _("Unauthorized to create resource routing"))

        if 'routing' not in kw:
            return utils.format_api_error(
                400, _("Request body not found"))

        routing = kw['routing']

        for field in ('top_id', 'bottom_id', 'pod_id',
                      'project_id', 'resource_type'):
            value = routing.get(field)
            if value is None or len(value.strip()) == 0:
                return utils.format_api_error(
                    400, _("Field %(field)s can not be empty") % {
                        'field': field})

        # the resource type should be properly provisioned.
        resource_type = routing.get('resource_type').strip()
        if not constants.is_valid_resource_type(resource_type):
            return utils.format_api_error(
                400, _('There is no such resource type'))

        try:
            top_id = routing.get('top_id').strip()
            bottom_id = routing.get('bottom_id').strip()
            pod_id = routing.get('pod_id').strip()
            project_id = routing.get('project_id').strip()

            routing = db_api.create_resource_mapping(context, top_id,
                                                     bottom_id, pod_id,
                                                     project_id,
                                                     resource_type)
            if not routing:
                return utils.format_api_error(
                    409, _('Resource routing already exists'))
        except Exception as e:
            LOG.exception('Failed to create resource routing: '
                          '%(exception)s ', {'exception': e})
            return utils.format_api_error(
                500, _('Failed to create resource routing'))

        return {'routing': routing}
Example #12
0
    def post(self, **kw):
        context = t_context.extract_context_from_environ()

        if not policy.enforce(context, policy.ADMIN_API_ROUTINGS_CREATE):
            return utils.format_api_error(
                403, _("Unauthorized to create resource routing"))

        if 'routing' not in kw:
            return utils.format_api_error(400, _("Request body not found"))

        routing = kw['routing']

        for field in ('top_id', 'bottom_id', 'pod_id', 'project_id',
                      'resource_type'):
            value = routing.get(field)
            if value is None or len(value.strip()) == 0:
                return utils.format_api_error(
                    400,
                    _("Field %(field)s can not be empty") % {'field': field})

        # the resource type should be properly provisioned.
        resource_type = routing.get('resource_type').strip()
        if not constants.is_valid_resource_type(resource_type):
            return utils.format_api_error(400,
                                          _('There is no such resource type'))

        try:
            top_id = routing.get('top_id').strip()
            bottom_id = routing.get('bottom_id').strip()
            pod_id = routing.get('pod_id').strip()
            project_id = routing.get('project_id').strip()

            routing = db_api.create_resource_mapping(context, top_id,
                                                     bottom_id, pod_id,
                                                     project_id, resource_type)
            if not routing:
                return utils.format_api_error(
                    409, _('Resource routing already exists'))
        except Exception as e:
            LOG.exception(
                _LE('Failed to create resource routing: '
                    '%(exception)s '), {'exception': e})
            return utils.format_api_error(
                500, _('Failed to create resource routing'))

        return {'routing': routing}
Example #13
0
    def test_get_create_element_routing_expire_resource_missing(self):
        pod = self._prepare_pod()
        resource_id = 'fake_resource_id'
        _type = 'fake_resource'
        ele = {'id': resource_id}
        body = {'name': resource_id}
        routing = api.create_resource_mapping(self.t_ctx, resource_id, None,
                                              pod['pod_id'], self.project_id,
                                              _type)
        api.update_resource_routing(self.t_ctx, routing['id'],
                                    {'created_at': constants.expire_time,
                                     'updated_at': constants.expire_time})

        is_new, b_resource_id = lock_handle.get_or_create_element(
            self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
            list_resource, create_resource)
        self.assertTrue(is_new)
        self.assertEqual(b_resource_id, RES[0]['id'])
Example #14
0
    def _prepare_project_id_for_job(self, job):
        # prepare the project id for job creation, currently job parameter
        # contains job type and job resource information.
        job_type = job['type']
        if job_type == constants.JT_SEG_RULE_SETUP:
            project_id = job['resource']['project_id']
        else:
            project_id = uuidutils.generate_uuid()
            pod_id = uuidutils.generate_uuid()

            resource_type, resource_id = (
                constants.job_primary_resource_map[job_type])
            routing = db_api.create_resource_mapping(
                self.context, job['resource'][resource_id],
                job['resource'][resource_id], pod_id, project_id,
                resource_type)
            self.assertIsNotNone(routing)

        return project_id
Example #15
0
    def _prepare_project_id_for_job(self, job):
        # prepare the project id for job creation, currently job parameter
        # contains job type and job resource information.
        job_type = job['type']
        if job_type in (constants.JT_SEG_RULE_SETUP,
                        constants.JT_RESOURCE_RECYCLE):
            project_id = job['resource']['project_id']
        else:
            project_id = uuidutils.generate_uuid()
            pod_id = uuidutils.generate_uuid()

            resource_type, resource_id = (
                constants.job_primary_resource_map[job_type])
            routing = db_api.create_resource_mapping(
                self.context, job['resource'][resource_id],
                job['resource'][resource_id], pod_id, project_id,
                resource_type)
            self.assertIsNotNone(routing)

        return project_id
Example #16
0
    def test_get_create_element_routing_expire_resource_missing(self):
        pod = self._prepare_pod()
        resource_id = 'fake_resource_id'
        _type = 'fake_resource'
        ele = {'id': resource_id}
        body = {'name': resource_id}
        routing = api.create_resource_mapping(self.t_ctx, resource_id, None,
                                              pod['pod_id'], self.project_id,
                                              _type)
        api.update_resource_routing(
            self.t_ctx, routing['id'], {
                'created_at': constants.expire_time,
                'updated_at': constants.expire_time
            })

        is_new, b_resource_id = lock_handle.get_or_create_element(
            self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
            list_resource, create_resource)
        self.assertTrue(is_new)
        self.assertEqual(b_resource_id, RES[0]['id'])
Example #17
0
    def test_get_list_element_create_fail(self):
        pod = self._prepare_pod()
        resource_id = 'fake_resource_id'
        _type = 'fake_resource'
        ele = {'id': resource_id}
        body = {'name': resource_id}
        routing = api.create_resource_mapping(self.t_ctx, resource_id, None,
                                              pod['pod_id'], self.project_id,
                                              _type)
        api.update_resource_routing(self.t_ctx, routing['id'],
                                    {'created_at': constants.expire_time,
                                     'updated_at': constants.expire_time})

        def fake_list_resource(t_ctx, q_ctx, pod, body, _type):
            raise q_exceptions.ConnectionFailed()

        self.assertRaises(
            q_exceptions.ConnectionFailed, lock_handle.get_or_create_element,
            self.t_ctx, self.q_ctx, self.project_id, pod, ele, _type, body,
            fake_list_resource, create_resource)
        # the original routing is not deleted
        routing = api.get_resource_routing(self.t_ctx, routing['id'])
        self.assertIsNone(routing['bottom_id'])
Example #18
0
    def test_configure_extra_routes_ew_gw(self, router_update, subnet_update):
        for i in (1, 2):
            pod_dict = {
                'pod_id': 'pod_id_%d' % i,
                'region_name': 'pod_%d' % i,
                'az_name': 'az_name_%d' % i
            }
            db_api.create_pod(self.context, pod_dict)
        for i in (1, 2, 3):
            router = {'id': 'top_router_%d_id' % i}
            TOP_ROUTER.append(router)

        # gateway in podX is attached to routerX
        gw_map = {
            'net1_pod1_gw': '10.0.1.1',
            'net2_pod2_gw': '10.0.2.1',
            'net3_pod1_gw': '10.0.3.3',
            'net3_pod2_gw': '10.0.3.4'
        }
        # interfaces are all attached to router3
        inf_map = {
            'net1_pod1_inf': '10.0.1.3',
            'net2_pod2_inf': '10.0.2.3',
            'net3_pod1_inf': '10.0.3.5',
            'net3_pod2_inf': '10.0.3.6'
        }
        get_gw_map = lambda n_idx, p_idx: gw_map['net%d_pod%d_gw' %
                                                 (n_idx, p_idx)]
        get_inf_map = lambda n_idx, p_idx: inf_map['net%d_pod%d_inf' %
                                                   (n_idx, p_idx)]
        bridge_infos = []

        for net_idx, router_idx, pod_idx in [(1, 1, 1), (3, 1, 1), (1, 3, 1),
                                             (3, 3, 1), (2, 2, 2), (3, 2, 2),
                                             (2, 3, 2), (3, 3, 2)]:
            region_name = 'pod_%d' % pod_idx
            pod_id = 'pod_id_%d' % pod_idx
            top_router_id = 'top_router_%d_id' % router_idx

            network = {'id': 'network_%d_id' % net_idx}
            router = {'id': 'router_%d_%d_id' % (pod_idx, router_idx)}
            subnet = {
                'id': 'subnet_%d_id' % net_idx,
                'network_id': network['id'],
                'cidr': '10.0.%d.0/24' % net_idx,
                'gateway_ip': get_gw_map(net_idx, pod_idx)
            }
            port = {
                'network_id': network['id'],
                'device_id': router['id'],
                'device_owner': 'network:router_interface',
                'fixed_ips': [{
                    'subnet_id': subnet['id']
                }]
            }
            if router_idx == 3:
                port['fixed_ips'][0]['ip_address'] = get_inf_map(
                    net_idx, pod_idx)
            else:
                port['fixed_ips'][0]['ip_address'] = get_gw_map(
                    net_idx, pod_idx)

            if net_idx == pod_idx and router_idx == 3:
                vm_idx = net_idx * 2 + pod_idx + 10
                vm_ip = '10.0.%d.%d' % (net_idx, vm_idx)
                vm_port = {
                    'id': 'vm_port_%d_id' % vm_idx,
                    'network_id': network['id'],
                    'device_id': 'vm%d_id' % vm_idx,
                    'device_owner': 'compute:None',
                    'fixed_ips': [{
                        'subnet_id': subnet['id'],
                        'ip_address': vm_ip
                    }]
                }
                bridge_network = {'id': 'bridge_network_%d_id' % net_idx}
                bridge_subnet = {
                    'id': 'bridge_subnet_%d_id' % net_idx,
                    'network_id': bridge_network['id'],
                    'cidr': '100.0.1.0/24',
                    'gateway_ip': '100.0.1.1'
                }
                bridge_cidr = bridge_subnet['cidr']
                bridge_port_ip = '%s.%d' % (
                    bridge_cidr[:bridge_cidr.rindex('.')], 2 + pod_idx)
                bridge_infos.append({
                    'router_id': router['id'],
                    'bridge_ip': bridge_port_ip,
                    'vm_ip': vm_ip
                })
                bridge_port = {
                    'network_id':
                    bridge_network['id'],
                    'device_id':
                    router['id'],
                    'device_owner':
                    'network:router_gateway',
                    'fixed_ips': [{
                        'subnet_id': bridge_subnet['id'],
                        'ip_address': bridge_port_ip
                    }]
                }
                RES_MAP[region_name]['port'].append(vm_port)
                RES_MAP[region_name]['network'].append(bridge_network)
                RES_MAP[region_name]['subnet'].append(bridge_subnet)
                RES_MAP[region_name]['port'].append(bridge_port)

            RES_MAP[region_name]['network'].append(network)
            RES_MAP[region_name]['subnet'].append(subnet)
            RES_MAP[region_name]['port'].append(port)
            RES_MAP[region_name]['router'].append(router)

            db_api.create_resource_mapping(self.context, top_router_id,
                                           router['id'], pod_id, 'project_id',
                                           constants.RT_ROUTER)
        # the above codes create this topology
        # pod1: net1 is attached to R1, default gateway is set on R1
        #       net1 is attached to R3
        #       net3 is attached to R1, default gateway is set on R1
        #       net3 is attached to R3
        # pod2: net2 is attached to R2, default gateway is set on R2
        #       net2 is attached to R3
        #       net3 is attached to R2, default gateway is set on R2
        #       net3 is attached to R3

        target_router_id = 'top_router_3_id'
        project_id = uuidutils.generate_uuid()
        db_api.new_job(self.context, project_id, constants.JT_CONFIGURE_ROUTE,
                       target_router_id)
        self.xmanager.configure_route(
            self.context,
            payload={constants.JT_CONFIGURE_ROUTE: target_router_id})

        # for the following paths, packets will go to R3 via the interface
        # which is attached to R3
        # net1 in pod1 -> net2 in pod2
        # net2 in pod2 -> net1 in pod1
        # net3 in pod1 -> net2 in pod2
        # net3 in pod2 -> net1 in pod1
        expect_calls = [
            mock.call(
                self.context, 'subnet_1_id', {
                    'subnet': {
                        'host_routes': [{
                            'nexthop': get_inf_map(1, 1),
                            'destination': '10.0.2.0/24'
                        }]
                    }
                }),
            mock.call(
                self.context, 'subnet_2_id', {
                    'subnet': {
                        'host_routes': [{
                            'nexthop': get_inf_map(2, 2),
                            'destination': '10.0.1.0/24'
                        }]
                    }
                }),
            mock.call(
                self.context, 'subnet_3_id', {
                    'subnet': {
                        'host_routes': [{
                            'nexthop': get_inf_map(3, 1),
                            'destination': '10.0.2.0/24'
                        }]
                    }
                }),
            mock.call(
                self.context, 'subnet_3_id', {
                    'subnet': {
                        'host_routes': [{
                            'nexthop': get_inf_map(3, 2),
                            'destination': '10.0.1.0/24'
                        }]
                    }
                })
        ]
        subnet_update.assert_has_calls(expect_calls, any_order=True)
        expect_calls = []
        for i in (0, 1):
            bridge_info = bridge_infos[i]
            expect_call = mock.call(
                self.context, bridge_infos[1 - i]['router_id'], {
                    'router': {
                        'routes': [{
                            'nexthop': bridge_info['bridge_ip'],
                            'destination': bridge_info['vm_ip'] + '/32'
                        }]
                    }
                })
            expect_calls.append(expect_call)
        router_update.assert_has_calls(expect_calls, any_order=True)
Example #19
0
    def test_setup_shadow_ports(self, mock_setup):
        project_id = uuidutils.generate_uuid()
        net1_id = uuidutils.generate_uuid()
        subnet1_id = uuidutils.generate_uuid()
        port1_id = uuidutils.generate_uuid()
        port2_id = uuidutils.generate_uuid()
        for i in (1, 2):
            pod_id = 'pod_id_%d' % i
            pod_dict = {'pod_id': pod_id,
                        'region_name': 'pod_%d' % i,
                        'az_name': 'az_name_%d' % i}
            db_api.create_pod(self.context, pod_dict)
            db_api.create_resource_mapping(
                self.context, net1_id, net1_id, pod_id, project_id,
                constants.RT_NETWORK)
        TOP_NETWORK.append({'id': net1_id, 'tenant_id': project_id})
        BOTTOM1_PORT.append({'id': port1_id,
                             'network_id': net1_id,
                             'device_owner': 'compute:None',
                             'binding:vif_type': 'ovs',
                             'binding:host_id': 'host1',
                             'device_id': None,
                             'mac_address': 'fa:16:3e:d4:01:03',
                             'fixed_ips': [{'subnet_id': subnet1_id,
                                            'ip_address': '10.0.1.3'}]})
        BOTTOM2_PORT.append({'id': port2_id,
                             'network_id': net1_id,
                             'device_owner': 'compute:None',
                             'binding:vif_type': 'ovs',
                             'binding:host_id': 'host2',
                             'device_id': None,
                             'mac_address': 'fa:16:3e:d4:01:03',
                             'fixed_ips': [{'subnet_id': subnet1_id,
                                            'ip_address': '10.0.1.4'}]})
        db_api.ensure_agent_exists(
            self.context, 'pod_id_1', 'host1', q_constants.AGENT_TYPE_OVS,
            '192.168.1.101')
        db_api.ensure_agent_exists(
            self.context, 'pod_id_2', 'host2', q_constants.AGENT_TYPE_OVS,
            '192.168.1.102')

        resource_id = 'pod_id_1#' + net1_id
        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        # check shadow port in pod1 is created and updated
        client1 = FakeClient('pod_1')
        sd_ports = client1.list_ports(
            self.context, [{'key': 'device_owner',
                            'comparator': 'eq',
                            'value': constants.DEVICE_OWNER_SHADOW}])
        self.assertEqual(sd_ports[0]['fixed_ips'][0]['ip_address'],
                         '10.0.1.4')
        self.assertIn(constants.PROFILE_FORCE_UP,
                      sd_ports[0]['binding:profile'])

        # check job to setup shadow ports for pod2 is registered
        mock_setup.assert_called_once_with(self.context, project_id,
                                           'pod_id_2', net1_id)

        # update shadow port to down and test again, this is possible when we
        # succeed to create shadow port but fail to update it to active
        profile = sd_ports[0]['binding:profile']
        profile.pop(constants.PROFILE_FORCE_UP)
        client1.update_ports(self.context, sd_ports[0]['id'],
                             {'port': {'status': q_constants.PORT_STATUS_DOWN,
                                       'binding:profile': profile}})

        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        # check shadow port is udpated to active again
        sd_port = client1.get_ports(self.context, sd_ports[0]['id'])
        self.assertIn(constants.PROFILE_FORCE_UP, sd_port['binding:profile'])

        # manually trigger shadow ports setup in pod2
        resource_id = 'pod_id_2#' + net1_id
        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        client2 = FakeClient('pod_2')
        sd_ports = client2.list_ports(
            self.context, [{'key': 'device_owner',
                            'comparator': 'eq',
                            'value': constants.DEVICE_OWNER_SHADOW}])
        self.assertEqual(sd_ports[0]['fixed_ips'][0]['ip_address'],
                         '10.0.1.3')
Example #20
0
    def test_configure_extra_routes_ew_gw(self, router_update, subnet_update):
        for i in (1, 2):
            pod_dict = {'pod_id': 'pod_id_%d' % i,
                        'region_name': 'pod_%d' % i,
                        'az_name': 'az_name_%d' % i}
            db_api.create_pod(self.context, pod_dict)
        for i in (1, 2, 3):
            router = {'id': 'top_router_%d_id' % i}
            TOP_ROUTER.append(router)

        # gateway in podX is attached to routerX
        gw_map = {'net1_pod1_gw': '10.0.1.1',
                  'net2_pod2_gw': '10.0.2.1',
                  'net3_pod1_gw': '10.0.3.3',
                  'net3_pod2_gw': '10.0.3.4'}
        # interfaces are all attached to router3
        inf_map = {'net1_pod1_inf': '10.0.1.3',
                   'net2_pod2_inf': '10.0.2.3',
                   'net3_pod1_inf': '10.0.3.5',
                   'net3_pod2_inf': '10.0.3.6'}
        get_gw_map = lambda n_idx, p_idx: gw_map[
            'net%d_pod%d_gw' % (n_idx, p_idx)]
        get_inf_map = lambda n_idx, p_idx: inf_map[
            'net%d_pod%d_inf' % (n_idx, p_idx)]
        bridge_infos = []

        for net_idx, router_idx, pod_idx in [(1, 1, 1), (3, 1, 1), (1, 3, 1),
                                             (3, 3, 1), (2, 2, 2), (3, 2, 2),
                                             (2, 3, 2), (3, 3, 2)]:
            region_name = 'pod_%d' % pod_idx
            pod_id = 'pod_id_%d' % pod_idx
            top_router_id = 'top_router_%d_id' % router_idx

            network = {'id': 'network_%d_id' % net_idx}
            router = {'id': 'router_%d_%d_id' % (pod_idx, router_idx)}
            subnet = {'id': 'subnet_%d_id' % net_idx,
                      'network_id': network['id'],
                      'cidr': '10.0.%d.0/24' % net_idx,
                      'gateway_ip': get_gw_map(net_idx, pod_idx)}
            port = {'network_id': network['id'],
                    'device_id': router['id'],
                    'device_owner': 'network:router_interface',
                    'fixed_ips': [{'subnet_id': subnet['id']}]}
            if router_idx == 3:
                port['fixed_ips'][0][
                    'ip_address'] = get_inf_map(net_idx, pod_idx)
            else:
                port['fixed_ips'][0][
                    'ip_address'] = get_gw_map(net_idx, pod_idx)

            if net_idx == pod_idx and router_idx == 3:
                vm_idx = net_idx * 2 + pod_idx + 10
                vm_ip = '10.0.%d.%d' % (net_idx, vm_idx)
                vm_port = {'id': 'vm_port_%d_id' % vm_idx,
                           'network_id': network['id'],
                           'device_id': 'vm%d_id' % vm_idx,
                           'device_owner': 'compute:None',
                           'fixed_ips': [{'subnet_id': subnet['id'],
                                          'ip_address': vm_ip}]}
                bridge_network = {'id': 'bridge_network_%d_id' % net_idx}
                bridge_subnet = {'id': 'bridge_subnet_%d_id' % net_idx,
                                 'network_id': bridge_network['id'],
                                 'cidr': '100.0.1.0/24',
                                 'gateway_ip': '100.0.1.1'}
                bridge_cidr = bridge_subnet['cidr']
                bridge_port_ip = '%s.%d' % (
                    bridge_cidr[:bridge_cidr.rindex('.')], 2 + pod_idx)
                bridge_infos.append({'router_id': router['id'],
                                     'bridge_ip': bridge_port_ip,
                                     'vm_ip': vm_ip})
                bridge_port = {
                    'network_id': bridge_network['id'],
                    'device_id': router['id'],
                    'device_owner': 'network:router_gateway',
                    'fixed_ips': [{'subnet_id': bridge_subnet['id'],
                                   'ip_address': bridge_port_ip}]
                }
                RES_MAP[region_name]['port'].append(vm_port)
                RES_MAP[region_name]['network'].append(bridge_network)
                RES_MAP[region_name]['subnet'].append(bridge_subnet)
                RES_MAP[region_name]['port'].append(bridge_port)

            RES_MAP[region_name]['network'].append(network)
            RES_MAP[region_name]['subnet'].append(subnet)
            RES_MAP[region_name]['port'].append(port)
            RES_MAP[region_name]['router'].append(router)

            db_api.create_resource_mapping(self.context, top_router_id,
                                           router['id'], pod_id, 'project_id',
                                           constants.RT_ROUTER)
        # the above codes create this topology
        # pod1: net1 is attached to R1, default gateway is set on R1
        #       net1 is attached to R3
        #       net3 is attached to R1, default gateway is set on R1
        #       net3 is attached to R3
        # pod2: net2 is attached to R2, default gateway is set on R2
        #       net2 is attached to R3
        #       net3 is attached to R2, default gateway is set on R2
        #       net3 is attached to R3

        target_router_id = 'top_router_3_id'
        project_id = uuidutils.generate_uuid()
        db_api.new_job(self.context, project_id,
                       constants.JT_CONFIGURE_ROUTE, target_router_id)
        self.xmanager.configure_route(
            self.context,
            payload={constants.JT_CONFIGURE_ROUTE: target_router_id})

        # for the following paths, packets will go to R3 via the interface
        # which is attached to R3
        # net1 in pod1 -> net2 in pod2
        # net2 in pod2 -> net1 in pod1
        # net3 in pod1 -> net2 in pod2
        # net3 in pod2 -> net1 in pod1
        expect_calls = [
            mock.call(self.context, 'subnet_1_id', {'subnet': {
                'host_routes': [{'nexthop': get_inf_map(1, 1),
                                 'destination': '10.0.2.0/24'}]}}),
            mock.call(self.context, 'subnet_2_id', {'subnet': {
                'host_routes': [{'nexthop': get_inf_map(2, 2),
                                 'destination': '10.0.1.0/24'}]}}),
            mock.call(self.context, 'subnet_3_id', {'subnet': {
                'host_routes': [{'nexthop': get_inf_map(3, 1),
                                 'destination': '10.0.2.0/24'}]}}),
            mock.call(self.context, 'subnet_3_id', {'subnet': {
                'host_routes': [{'nexthop': get_inf_map(3, 2),
                                 'destination': '10.0.1.0/24'}]}})]
        subnet_update.assert_has_calls(expect_calls, any_order=True)
        expect_calls = []
        for i in (0, 1):
            bridge_info = bridge_infos[i]
            expect_call = mock.call(
                self.context, bridge_infos[1 - i]['router_id'],
                {'router': {'routes': [
                    {'nexthop': bridge_info['bridge_ip'],
                     'destination': bridge_info['vm_ip'] + '/32'}]}})
            expect_calls.append(expect_call)
        router_update.assert_has_calls(expect_calls, any_order=True)
Example #21
0
    def test_setup_shadow_ports(self, mock_setup):
        project_id = uuidutils.generate_uuid()
        net1_id = uuidutils.generate_uuid()
        subnet1_id = uuidutils.generate_uuid()
        port1_id = uuidutils.generate_uuid()
        port2_id = uuidutils.generate_uuid()
        for i in (1, 2):
            pod_id = 'pod_id_%d' % i
            pod_dict = {
                'pod_id': pod_id,
                'region_name': 'pod_%d' % i,
                'az_name': 'az_name_%d' % i
            }
            db_api.create_pod(self.context, pod_dict)
            db_api.create_resource_mapping(self.context, net1_id, net1_id,
                                           pod_id, project_id,
                                           constants.RT_NETWORK)
        TOP_NETWORK.append({'id': net1_id, 'tenant_id': project_id})
        BOTTOM1_PORT.append({
            'id':
            port1_id,
            'network_id':
            net1_id,
            'device_owner':
            'compute:None',
            'binding:vif_type':
            'ovs',
            'binding:host_id':
            'host1',
            'mac_address':
            'fa:16:3e:d4:01:03',
            'fixed_ips': [{
                'subnet_id': subnet1_id,
                'ip_address': '10.0.1.3'
            }]
        })
        BOTTOM2_PORT.append({
            'id':
            port2_id,
            'network_id':
            net1_id,
            'device_owner':
            'compute:None',
            'binding:vif_type':
            'ovs',
            'binding:host_id':
            'host2',
            'mac_address':
            'fa:16:3e:d4:01:03',
            'fixed_ips': [{
                'subnet_id': subnet1_id,
                'ip_address': '10.0.1.4'
            }]
        })
        db_api.ensure_agent_exists(self.context, 'pod_id_1', 'host1',
                                   q_constants.AGENT_TYPE_OVS, '192.168.1.101')
        db_api.ensure_agent_exists(self.context, 'pod_id_2', 'host2',
                                   q_constants.AGENT_TYPE_OVS, '192.168.1.102')

        resource_id = 'pod_id_1#' + net1_id
        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        # check shadow port in pod1 is created and updated
        client1 = FakeClient('pod_1')
        sd_ports = client1.list_ports(
            self.context, [{
                'key': 'device_owner',
                'comparator': 'eq',
                'value': constants.DEVICE_OWNER_SHADOW
            }])
        self.assertEqual(sd_ports[0]['fixed_ips'][0]['ip_address'], '10.0.1.4')
        self.assertIn(constants.PROFILE_FORCE_UP,
                      sd_ports[0]['binding:profile'])

        # check job to setup shadow ports for pod2 is registered
        mock_setup.assert_called_once_with(self.context, project_id,
                                           'pod_id_2', net1_id)

        # update shadow port to down and test again, this is possible when we
        # succeed to create shadow port but fail to update it to active
        profile = sd_ports[0]['binding:profile']
        profile.pop(constants.PROFILE_FORCE_UP)
        client1.update_ports(
            self.context, sd_ports[0]['id'], {
                'port': {
                    'status': q_constants.PORT_STATUS_DOWN,
                    'binding:profile': profile
                }
            })

        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        # check shadow port is udpated to active again
        sd_port = client1.get_ports(self.context, sd_ports[0]['id'])
        self.assertIn(constants.PROFILE_FORCE_UP, sd_port['binding:profile'])

        # manually trigger shadow ports setup in pod2
        resource_id = 'pod_id_2#' + net1_id
        db_api.new_job(self.context, project_id,
                       constants.JT_SHADOW_PORT_SETUP, resource_id)
        self.xmanager.setup_shadow_ports(
            self.context,
            payload={constants.JT_SHADOW_PORT_SETUP: resource_id})

        client2 = FakeClient('pod_2')
        sd_ports = client2.list_ports(
            self.context, [{
                'key': 'device_owner',
                'comparator': 'eq',
                'value': constants.DEVICE_OWNER_SHADOW
            }])
        self.assertEqual(sd_ports[0]['fixed_ips'][0]['ip_address'], '10.0.1.3')
Example #22
0
    def _setup_router_one_pod(self, ctx, t_pod, b_pod, t_client, t_net,
                              t_router, t_bridge_net, t_bridge_subnet,
                              is_ext_net_pod):
        # NOTE(zhiyuan) after the bridge network combination, external network
        # is attached to a separate router, which is created in central plugin,
        # so is_ext_net_pod is not used in the current implementation, but we
        # choose to keep this parameter since it's an important attribute of a
        # pod and we may need to use it later.
        b_client = self._get_client(b_pod['region_name'])

        is_distributed = t_router.get('distributed', False)
        router_body = {'router': {'name': t_router['id'],
                                  'distributed': is_distributed}}
        project_id = t_router['tenant_id']

        # create bottom router in target bottom pod
        _, b_router_id = self.helper.prepare_bottom_element(
            ctx, project_id, b_pod, t_router, constants.RT_ROUTER, router_body)

        # create top bridge port
        q_ctx = None  # no need to pass neutron context when using client
        t_bridge_port_id = self.helper.get_bridge_interface(
            ctx, q_ctx, project_id, t_pod, t_bridge_net['id'], b_router_id)

        # create bottom bridge port
        # if target bottom pod is hosting real external network, we create
        # another bottom router and attach the bridge network as internal
        # network, but this work is done by central plugin when user sets
        # router gateway.
        t_bridge_port = t_client.get_ports(ctx, t_bridge_port_id)
        (is_new, b_bridge_port_id, b_bridge_subnet_id,
         b_bridge_net_id) = self.helper.get_bottom_bridge_elements(
            ctx, project_id, b_pod, t_bridge_net, True, t_bridge_subnet, None)

        # we attach the bridge port as router gateway
        # add_gateway is update operation, which can run multiple times
        gateway_ip = t_bridge_port['fixed_ips'][0]['ip_address']
        b_client.action_routers(
            ctx, 'add_gateway', b_router_id,
            {'network_id': b_bridge_net_id,
             'enable_snat': False,
             'external_fixed_ips': [{'subnet_id': b_bridge_subnet_id,
                                     'ip_address': gateway_ip}]})

        # attach internal port to bottom router
        t_ports = self._get_router_interfaces(t_client, ctx, t_router['id'],
                                              t_net['id'])
        b_net_id = db_api.get_bottom_id_by_top_id_region_name(
            ctx, t_net['id'], b_pod['region_name'], constants.RT_NETWORK)
        if b_net_id:
            b_ports = self._get_router_interfaces(b_client, ctx, b_router_id,
                                                  b_net_id)
        else:
            b_ports = []
        if not t_ports and b_ports:
            # remove redundant bottom interface
            b_port = b_ports[0]
            request_body = {'port_id': b_port['id']}
            b_client.action_routers(ctx, 'remove_interface', b_router_id,
                                    request_body)
        elif t_ports and not b_ports:
            # create new bottom interface
            t_port = t_ports[0]

            # only consider ipv4 address currently
            t_subnet_id = t_port['fixed_ips'][0]['subnet_id']
            t_subnet = t_client.get_subnets(ctx, t_subnet_id)

            if CONF.enable_api_gateway:
                (b_net_id,
                 subnet_map) = self.helper.prepare_bottom_network_subnets(
                    ctx, q_ctx, project_id, b_pod, t_net, [t_subnet])
            else:
                (b_net_id,
                 subnet_map) = (t_net['id'], {t_subnet['id']: t_subnet['id']})

            # the gateway ip of bottom subnet is set to the ip of t_port, so
            # we just attach the bottom subnet to the bottom router and neutron
            # server in the bottom pod will create the interface for us, using
            # the gateway ip.
            b_client.action_routers(ctx, 'add_interface', b_router_id,
                                    {'subnet_id': subnet_map[t_subnet_id]})

        if not t_router['external_gateway_info']:
            return

        # handle floatingip
        t_ext_net_id = t_router['external_gateway_info']['network_id']
        t_fips = t_client.list_floatingips(ctx, [{'key': 'floating_network_id',
                                                  'comparator': 'eq',
                                                  'value': t_ext_net_id}])
        # skip unbound top floatingip
        t_ip_fip_map = dict([(fip['floating_ip_address'],
                              fip) for fip in t_fips if fip['port_id']])
        mappings = db_api.get_bottom_mappings_by_top_id(ctx, t_ext_net_id,
                                                        constants.RT_NETWORK)
        # bottom external network should exist
        b_ext_pod, b_ext_net_id = mappings[0]
        b_ext_client = self._get_client(b_ext_pod['region_name'])
        b_fips = b_ext_client.list_floatingips(
            ctx, [{'key': 'floating_network_id', 'comparator': 'eq',
                   'value': b_ext_net_id}])
        b_ip_fip_map = dict([(fip['floating_ip_address'],
                              fip) for fip in b_fips])
        add_fips = [ip for ip in t_ip_fip_map if ip not in b_ip_fip_map]
        del_fips = [ip for ip in b_ip_fip_map if ip not in t_ip_fip_map]

        for add_fip in add_fips:
            fip = t_ip_fip_map[add_fip]
            t_int_port_id = fip['port_id']
            b_int_port_id = db_api.get_bottom_id_by_top_id_region_name(
                ctx, t_int_port_id, b_pod['region_name'], constants.RT_PORT)
            if not b_int_port_id:
                LOG.warning(_LW('Port %(port_id)s associated with floating ip '
                                '%(fip)s is not mapped to bottom pod'),
                            {'port_id': t_int_port_id, 'fip': add_fip})
                continue
            t_int_port = t_client.get_ports(ctx, t_int_port_id)
            if t_int_port['network_id'] != t_net['id']:
                # only handle floating ip association for the given top network
                continue

            if b_ext_pod['pod_id'] != b_pod['pod_id']:
                # if the internal port is not located in the external network
                # pod, we need to create a copied port in that pod for floating
                # ip association purpose
                t_int_net_id = t_int_port['network_id']
                t_int_subnet_id = t_int_port['fixed_ips'][0]['subnet_id']
                port_body = {
                    'port': {
                        'tenant_id': project_id,
                        'admin_state_up': True,
                        'name': constants.shadow_port_name % t_int_port['id'],
                        'network_id': t_int_net_id,
                        'fixed_ips': [{'ip_address': t_int_port[
                            'fixed_ips'][0]['ip_address']}]
                    }
                }
                self.helper.prepare_bottom_element(
                    ctx, project_id, b_ext_pod, t_int_port,
                    constants.RT_SD_PORT, port_body)
                # create routing entries for copied network and subnet so we
                # can easily find them during central network and subnet
                # deletion, create_resource_mapping will catch DBDuplicateEntry
                # exception and ignore it so it's safe to call this function
                # multiple times
                db_api.create_resource_mapping(ctx, t_int_net_id, t_int_net_id,
                                               b_ext_pod['pod_id'], project_id,
                                               constants.RT_SD_NETWORK)
                db_api.create_resource_mapping(ctx, t_int_subnet_id,
                                               t_int_subnet_id,
                                               b_ext_pod['pod_id'], project_id,
                                               constants.RT_SD_SUBNET)

            self._safe_create_bottom_floatingip(
                ctx, b_pod, b_ext_client, b_ext_net_id, add_fip,
                b_int_port_id)

        for del_fip in del_fips:
            fip = b_ip_fip_map[del_fip]
            if b_ext_pod['pod_id'] != b_pod['pod_id'] and fip['port_id']:
                # expire the routing entry for copy port
                with ctx.session.begin():
                    core.update_resources(
                        ctx, models.ResourceRouting,
                        [{'key': 'bottom_id', 'comparator': 'eq',
                          'value': fip['port_id']},
                         {'key': 'resource_type', 'comparator': 'eq',
                          'value': constants.RT_SD_PORT}],
                        {'bottom_id': None,
                         'created_at': constants.expire_time,
                         'updated_at': constants.expire_time})
                # delete copy port
                b_ext_client.delete_ports(ctx, fip['port_id'])
                # delete the expired entry, even if this deletion fails, we
                # still have a chance that lock_handle module will delete it
                with ctx.session.begin():
                    core.delete_resources(ctx, models.ResourceRouting,
                                          [{'key': 'top_id',
                                            'comparator': 'eq',
                                            'value': fip['port_id']},
                                           {'key': 'resource_type',
                                            'comparator': 'eq',
                                            'value': constants.RT_SD_PORT}])
                    # delete port before floating ip disassociation, copy
                    # network and copy subnet are deleted during central
                    # network and subnet deletion
            b_ext_client.delete_floatingips(ctx, fip['id'])