def test_delete_port_chain(self):
        project_id = TEST_TENANT_ID
        q_ctx = FakeNeutronContext()
        t_ctx = context.get_db_context()
        self._basic_pod_setup()
        fake_plugin = FakeSfcPlugin()
        ids = {'t_ppg_id': [uuidutils.generate_uuid()],
               'b_ppg_id': [uuidutils.generate_uuid()],
               't_fc_id': [uuidutils.generate_uuid()],
               'b_fc_id': [uuidutils.generate_uuid()]}
        t_pc_id1, _ = self._prepare_port_chain_test(
            project_id, t_ctx, 'pod_1', 0, True, ids)

        fake_plugin.delete_port_chain(q_ctx, t_pc_id1)
        pc_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_pc_id1, constants.RT_PORT_CHAIN)
        self.assertEqual(len(TOP_PORTCHAINS), 0)
        self.assertEqual(len(BOTTOM1_PORTCHAINS), 0)
        self.assertEqual(len(pc_mappings), 0)

        t_pc_id2, _ = self._prepare_port_chain_test(
            project_id, t_ctx, 'pod_1', 0, True, ids)
        BOTTOM1_PORTCHAINS.pop()
        fake_plugin.delete_port_chain(q_ctx, t_pc_id2)
        pc_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_pc_id2, constants.RT_PORT_CHAIN)
        self.assertEqual(len(TOP_PORTCHAINS), 0)
        self.assertEqual(len(pc_mappings), 0)
    def test_delete_flow_classifier(self):
        project_id = TEST_TENANT_ID
        q_ctx = FakeNeutronContext()
        t_ctx = context.get_db_context()
        self._basic_pod_setup()
        fake_plugin = FakeFcPlugin()

        src_port_id = uuidutils.generate_uuid()

        t_fc_id1, _ = self._prepare_flow_classifier_test(
            project_id, t_ctx, 'pod_1', 0, src_port_id, True)
        fake_plugin.delete_flow_classifier(q_ctx, t_fc_id1)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_fc_id1, constants.RT_FLOW_CLASSIFIER)
        self.assertEqual(len(TOP_FLOWCLASSIFIERS), 0)
        self.assertEqual(len(BOTTOM1_FLOWCLASSIFIERS), 0)
        self.assertEqual(len(ppg_mappings), 0)

        t_fc_id2, _ = self._prepare_flow_classifier_test(
            project_id, t_ctx, 'pod_1', 0, src_port_id, True)
        BOTTOM1_FLOWCLASSIFIERS.pop()
        fake_plugin.delete_flow_classifier(q_ctx, t_fc_id2)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_fc_id2, constants.RT_FLOW_CLASSIFIER)
        self.assertEqual(len(TOP_FLOWCLASSIFIERS), 0)
        self.assertEqual(len(ppg_mappings), 0)
    def test_delete_port_pair(self):
        project_id = TEST_TENANT_ID
        q_ctx = FakeNeutronContext()
        t_ctx = context.get_db_context()
        self._basic_pod_setup()
        fake_plugin = FakeSfcPlugin()

        ingress = uuidutils.generate_uuid()
        egress = uuidutils.generate_uuid()
        t_pp1_id, _ = self._prepare_port_pair_test(
            project_id, t_ctx, 'pod_1', 0, ingress, egress, True)
        fake_plugin.delete_port_pair(q_ctx, t_pp1_id)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_pp1_id, constants.RT_PORT_PAIR_GROUP)
        self.assertEqual(len(TOP_PORTPAIRS), 0)
        self.assertEqual(len(BOTTOM1_PORTPAIRS), 0)
        self.assertEqual(len(ppg_mappings), 0)

        t_pp2_id, _ = self._prepare_port_pair_test(
            project_id, t_ctx, 'pod_1', 0, ingress, egress, True)
        BOTTOM1_PORTPAIRS.pop()
        fake_plugin.delete_port_pair(q_ctx, t_pp2_id)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_pp2_id, constants.RT_PORT_PAIR_GROUP)
        self.assertEqual(len(TOP_PORTPAIRS), 0)
        self.assertEqual(len(ppg_mappings), 0)
    def test_delete_port_chain(self):
        project_id = TEST_TENANT_ID
        q_ctx = FakeNeutronContext()
        t_ctx = context.get_db_context()
        self._basic_pod_setup()
        fake_plugin = FakeSfcPlugin()
        ids = {
            't_ppg_id': [uuidutils.generate_uuid()],
            'b_ppg_id': [uuidutils.generate_uuid()],
            't_fc_id': [uuidutils.generate_uuid()],
            'b_fc_id': [uuidutils.generate_uuid()]
        }
        t_pc_id1, _ = self._prepare_port_chain_test(project_id, t_ctx, 'pod_1',
                                                    0, True, ids)

        fake_plugin.delete_port_chain(q_ctx, t_pc_id1)
        pc_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_pc_id1, constants.RT_PORT_CHAIN)
        self.assertEqual(len(TOP_PORTCHAINS), 0)
        self.assertEqual(len(BOTTOM1_PORTCHAINS), 0)
        self.assertEqual(len(pc_mappings), 0)

        t_pc_id2, _ = self._prepare_port_chain_test(project_id, t_ctx, 'pod_1',
                                                    0, True, ids)
        BOTTOM1_PORTCHAINS.pop()
        fake_plugin.delete_port_chain(q_ctx, t_pc_id2)
        pc_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_pc_id2, constants.RT_PORT_CHAIN)
        self.assertEqual(len(TOP_PORTCHAINS), 0)
        self.assertEqual(len(pc_mappings), 0)
    def test_delete_port_pair_group(self):
        project_id = TEST_TENANT_ID
        q_ctx = FakeNeutronContext()
        t_ctx = context.get_db_context()
        self._basic_pod_setup()
        fake_plugin = FakeSfcPlugin()

        t_pp_id = uuidutils.generate_uuid()
        b_pp_id = uuidutils.generate_uuid()

        t_ppg_id1, _ = self._prepare_port_pair_group_test(
            project_id, t_ctx, 'pod_1', 0, [t_pp_id], True, [b_pp_id])
        fake_plugin.delete_port_pair_group(q_ctx, t_ppg_id1)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_ppg_id1, constants.RT_PORT_PAIR_GROUP)
        self.assertEqual(len(TOP_PORTPAIRGROUPS), 0)
        self.assertEqual(len(BOTTOM1_PORTPAIRGROUPS), 0)
        self.assertEqual(len(ppg_mappings), 0)

        t_ppg_id2, _ = self._prepare_port_pair_group_test(
            project_id, t_ctx, 'pod_1', 0, [t_pp_id], True, [b_pp_id])
        BOTTOM1_PORTPAIRGROUPS.pop()
        fake_plugin.delete_port_pair_group(q_ctx, t_ppg_id2)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_ppg_id2, constants.RT_PORT_PAIR_GROUP)
        self.assertEqual(len(TOP_PORTPAIRGROUPS), 0)
        self.assertEqual(len(ppg_mappings), 0)
    def test_delete_flow_classifier(self):
        project_id = TEST_TENANT_ID
        q_ctx = FakeNeutronContext()
        t_ctx = context.get_db_context()
        self._basic_pod_setup()
        fake_plugin = FakeFcPlugin()

        src_port_id = uuidutils.generate_uuid()

        t_fc_id1, _ = self._prepare_flow_classifier_test(
            project_id, t_ctx, 'pod_1', 0, src_port_id, True)
        fake_plugin.delete_flow_classifier(q_ctx, t_fc_id1)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_fc_id1, constants.RT_FLOW_CLASSIFIER)
        self.assertEqual(len(TOP_FLOWCLASSIFIERS), 0)
        self.assertEqual(len(BOTTOM1_FLOWCLASSIFIERS), 0)
        self.assertEqual(len(ppg_mappings), 0)

        t_fc_id2, _ = self._prepare_flow_classifier_test(
            project_id, t_ctx, 'pod_1', 0, src_port_id, True)
        BOTTOM1_FLOWCLASSIFIERS.pop()
        fake_plugin.delete_flow_classifier(q_ctx, t_fc_id2)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_fc_id2, constants.RT_FLOW_CLASSIFIER)
        self.assertEqual(len(TOP_FLOWCLASSIFIERS), 0)
        self.assertEqual(len(ppg_mappings), 0)
    def test_delete_port_pair_group(self):
        project_id = TEST_TENANT_ID
        q_ctx = FakeNeutronContext()
        t_ctx = context.get_db_context()
        self._basic_pod_setup()
        fake_plugin = FakeSfcPlugin()

        t_pp_id = uuidutils.generate_uuid()
        b_pp_id = uuidutils.generate_uuid()

        t_ppg_id1, _ = self._prepare_port_pair_group_test(
            project_id, t_ctx, 'pod_1', 0, [t_pp_id], True, [b_pp_id])
        fake_plugin.delete_port_pair_group(q_ctx, t_ppg_id1)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_ppg_id1, constants.RT_PORT_PAIR_GROUP)
        self.assertEqual(len(TOP_PORTPAIRGROUPS), 0)
        self.assertEqual(len(BOTTOM1_PORTPAIRGROUPS), 0)
        self.assertEqual(len(ppg_mappings), 0)

        t_ppg_id2, _ = self._prepare_port_pair_group_test(
            project_id, t_ctx, 'pod_1', 0, [t_pp_id], True, [b_pp_id])
        BOTTOM1_PORTPAIRGROUPS.pop()
        fake_plugin.delete_port_pair_group(q_ctx, t_ppg_id2)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_ppg_id2, constants.RT_PORT_PAIR_GROUP)
        self.assertEqual(len(TOP_PORTPAIRGROUPS), 0)
        self.assertEqual(len(ppg_mappings), 0)
    def test_delete_port_pair(self):
        project_id = TEST_TENANT_ID
        q_ctx = FakeNeutronContext()
        t_ctx = context.get_db_context()
        self._basic_pod_setup()
        fake_plugin = FakeSfcPlugin()

        ingress = uuidutils.generate_uuid()
        egress = uuidutils.generate_uuid()
        t_pp1_id, _ = self._prepare_port_pair_test(project_id, t_ctx, 'pod_1',
                                                   0, ingress, egress, True)
        fake_plugin.delete_port_pair(q_ctx, t_pp1_id)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_pp1_id, constants.RT_PORT_PAIR_GROUP)
        self.assertEqual(len(TOP_PORTPAIRS), 0)
        self.assertEqual(len(BOTTOM1_PORTPAIRS), 0)
        self.assertEqual(len(ppg_mappings), 0)

        t_pp2_id, _ = self._prepare_port_pair_test(project_id, t_ctx, 'pod_1',
                                                   0, ingress, egress, True)
        BOTTOM1_PORTPAIRS.pop()
        fake_plugin.delete_port_pair(q_ctx, t_pp2_id)
        ppg_mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, t_pp2_id, constants.RT_PORT_PAIR_GROUP)
        self.assertEqual(len(TOP_PORTPAIRS), 0)
        self.assertEqual(len(ppg_mappings), 0)
Exemplo n.º 9
0
    def post(self, **kw):
        context = t_context.extract_context_from_environ()

        if 'volumeAttachment' not in kw:
            return utils.format_nova_error(
                400, _('volumeAttachment is not set'))
        body = kw['volumeAttachment']
        if 'volumeId' not in body:
            return utils.format_nova_error(
                400, _('Invalid input for field/attribute volumeAttachment'))

        server_mappings = db_api.get_bottom_mappings_by_top_id(
            context, self.server_id, constants.RT_SERVER)
        if not server_mappings:
            return utils.format_nova_error(404, _('Instance %s could not be '
                                                  'found.') % self.server_id)
        volume_mappings = db_api.get_bottom_mappings_by_top_id(
            context, body['volumeId'], constants.RT_VOLUME)
        if not volume_mappings:
            return utils.format_nova_error(
                404, _('Volume %s could not be found') % body['volumeId'])

        server_pod_name = server_mappings[0][0]['pod_name']
        volume_pod_name = volume_mappings[0][0]['pod_name']
        if server_pod_name != volume_pod_name:
            LOG.error(_LE('Server %(server)s is in pod %(server_pod)s and '
                          'volume %(volume)s is in pod %(volume_pod)s, which '
                          'are not the same.'),
                      {'server': self.server_id,
                       'server_pod': server_pod_name,
                       'volume': body['volumeId'],
                       'volume_pod': volume_pod_name})
            return utils.format_nova_error(
                400, _('Server and volume not in the same pod'))

        device = None
        if 'device' in body:
            device = body['device']
            # this regular expression is copied from nova/block_device.py
            match = re.match('(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$',
                             device)
            if not match:
                return utils.format_nova_error(
                    400, _('The supplied device path (%s) is '
                           'invalid.') % device)

        client = self._get_client(server_pod_name)
        volume = client.action_server_volumes(
            context, 'create_server_volume',
            server_mappings[0][1], volume_mappings[0][1], device)
        return {'volumeAttachment': volume.to_dict()}
Exemplo n.º 10
0
    def get_real_shadow_resource_iterator(t_ctx, res_type, res_id):
        shadow_res_type = t_constants.REAL_SHADOW_TYPE_MAP[res_type]
        mappings = db_api.get_bottom_mappings_by_top_id(
            t_ctx, res_id, res_type)
        mappings.extend(db_api.get_bottom_mappings_by_top_id(
            t_ctx, res_id, shadow_res_type))

        processed_pod_set = set()
        for pod, bottom_res_id in mappings:
            region_name = pod['region_name']
            if region_name in processed_pod_set:
                continue
            processed_pod_set.add(region_name)
            yield pod, bottom_res_id
Exemplo n.º 11
0
    def post(self, **kw):
        context = t_context.extract_context_from_environ()

        if 'volumeAttachment' not in kw:
            pecan.abort(400, 'Request body not found')
            return
        body = kw['volumeAttachment']
        if 'volumeId' not in body:
            pecan.abort(400, 'Volume not set')
            return

        server_mappings = db_api.get_bottom_mappings_by_top_id(
            context, self.server_id, constants.RT_SERVER)
        if not server_mappings:
            pecan.abort(404, 'Server not found')
            return
        volume_mappings = db_api.get_bottom_mappings_by_top_id(
            context, body['volumeId'], constants.RT_VOLUME)
        if not volume_mappings:
            pecan.abort(404, 'Volume not found')
            return

        server_pod_name = server_mappings[0][0]['pod_name']
        volume_pod_name = volume_mappings[0][0]['pod_name']
        if server_pod_name != volume_pod_name:
            LOG.error(_LE('Server %(server)s is in pod %(server_pod)s and '
                          'volume %(volume)s is in pod %(volume_pod)s, which '
                          'are not the same.'),
                      {'server': self.server_id,
                       'server_pod': server_pod_name,
                       'volume': body['volumeId'],
                       'volume_pod': volume_pod_name})
            pecan.abort(400, 'Server and volume not in the same pod')
            return

        device = None
        if 'device' in body:
            device = body['device']
            # this regular expression is copied from nova/block_device.py
            match = re.match('(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$',
                             device)
            if not match:
                pecan.abort(400, 'Invalid device path')
                return

        client = self._get_client(server_pod_name)
        volume = client.action_server_volumes(
            context, 'create_server_volume',
            server_mappings[0][1], volume_mappings[0][1], device)
        return {'volumeAttachment': volume.to_dict()}
Exemplo n.º 12
0
 def test_get_bottom_mappings_by_top_id(self):
     for i in xrange(3):
         pod = {'pod_id': 'test_pod_uuid_%d' % i,
                'pod_name': 'test_pod_%d' % i,
                'az_name': 'test_az_uuid_%d' % i}
         api.create_pod(self.context, pod)
     route1 = {
         'top_id': 'top_uuid',
         'pod_id': 'test_pod_uuid_0',
         'resource_type': 'port'}
     route2 = {
         'top_id': 'top_uuid',
         'pod_id': 'test_pod_uuid_1',
         'bottom_id': 'bottom_uuid_1',
         'resource_type': 'port'}
     route3 = {
         'top_id': 'top_uuid',
         'pod_id': 'test_pod_uuid_2',
         'bottom_id': 'bottom_uuid_2',
         'resource_type': 'neutron'}
     routes = [route1, route2, route3]
     with self.context.session.begin():
         for route in routes:
             core.create_resource(
                 self.context, models.ResourceRouting, route)
     mappings = api.get_bottom_mappings_by_top_id(self.context,
                                                  'top_uuid', 'port')
     self.assertEqual('test_pod_uuid_1', mappings[0][0]['pod_id'])
     self.assertEqual('bottom_uuid_1', mappings[0][1])
Exemplo n.º 13
0
    def create_security_group_rule(self, q_context, security_group_rule):
        rule = security_group_rule['security_group_rule']
        if rule['remote_group_id']:
            raise n_exceptions.RemoteGroupNotSupported()
        sg_id = rule['security_group_id']
        sg = self.get_security_group(q_context, sg_id)
        if sg['name'] == 'default':
            raise n_exceptions.DefaultGroupUpdateNotSupported()

        new_rule = super(TricircleSecurityGroupMixin,
                         self).create_security_group_rule(q_context,
                                                          security_group_rule)

        t_context = context.get_context_from_neutron_context(q_context)
        mappings = db_api.get_bottom_mappings_by_top_id(
            t_context, sg_id, constants.RT_SG)

        try:
            for pod, b_sg_id in mappings:
                client = self._get_client(pod['pod_name'])
                rule['security_group_id'] = b_sg_id
                self._safe_create_security_group_rule(
                    t_context, client, {'security_group_rule': rule})
        except Exception:
            super(TricircleSecurityGroupMixin,
                  self).delete_security_group_rule(q_context, new_rule['id'])
            raise n_exceptions.BottomPodOperationFailure(
                resource='security group rule', pod_name=pod['pod_name'])
        return new_rule
Exemplo n.º 14
0
 def update_port_pair_group_precommit(self, context):
     plugin_context = context._plugin_context
     t_ctx = t_context.get_context_from_neutron_context(
         context._plugin_context)
     port_pair_group = context.current
     mappings = db_api.get_bottom_mappings_by_top_id(
         t_ctx, port_pair_group['id'], t_constants.RT_PORT_PAIR_GROUP)
     if mappings:
         portchain_id = self._get_chain_id_by_group_id(
             plugin_context, context._plugin, port_pair_group['id'])
         if port_pair_group['port_pairs']:
             net_id = self._get_net_id_by_portpairgroups(
                 plugin_context, context._plugin, [port_pair_group['id']])
         elif context.original['port_pairs']:
             portpair_id = context.original['port_pairs'][0]
             port_pair = context._plugin._get_port_pair(
                 plugin_context, portpair_id)
             net_id = self._get_net_id_by_port_id(
                 plugin_context, port_pair['ingress'])
         else:
             net_id = ''
         if not portchain_id and not net_id:
             return
         self.xjob_handler.sync_service_function_chain(
             t_ctx, port_pair_group['project_id'], portchain_id, net_id,
             t_constants.POD_NOT_SPECIFIED)
Exemplo n.º 15
0
 def update_port_pair_group_precommit(self, context):
     plugin_context = context._plugin_context
     t_ctx = t_context.get_context_from_neutron_context(
         context._plugin_context)
     port_pair_group = context.current
     mappings = db_api.get_bottom_mappings_by_top_id(
         t_ctx, port_pair_group['id'], t_constants.RT_PORT_PAIR_GROUP)
     if mappings:
         portchain_id = self._get_chain_id_by_group_id(
             plugin_context, context._plugin, port_pair_group['id'])
         if port_pair_group['port_pairs']:
             net_id = self._get_net_id_by_portpairgroups(
                 plugin_context, context._plugin, [port_pair_group['id']])
         elif context.original['port_pairs']:
             portpair_id = context.original['port_pairs'][0]
             port_pair = context._plugin._get_port_pair(
                 plugin_context, portpair_id)
             net_id = self._get_net_id_by_port_id(plugin_context,
                                                  port_pair['ingress'])
         else:
             net_id = ''
         if not portchain_id and not net_id:
             return
         self.xjob_handler.sync_service_function_chain(
             t_ctx, port_pair_group['project_id'], portchain_id, net_id,
             t_constants.POD_NOT_SPECIFIED)
Exemplo n.º 16
0
    def delete_security_group_rule(self, q_context, _id):
        rule = self.get_security_group_rule(q_context, _id)
        if rule['remote_group_id']:
            raise n_exceptions.RemoteGroupNotSupported()
        sg_id = rule['security_group_id']
        sg = self.get_security_group(q_context, sg_id)
        if sg['name'] == 'default':
            raise n_exceptions.DefaultGroupUpdateNotSupported()

        t_context = context.get_context_from_neutron_context(q_context)
        mappings = db_api.get_bottom_mappings_by_top_id(
            t_context, sg_id, constants.RT_SG)

        try:
            for pod, b_sg_id in mappings:
                client = self._get_client(pod['pod_name'])
                rule['security_group_id'] = b_sg_id
                b_sg = client.get_security_groups(t_context, b_sg_id)
                for b_rule in b_sg['security_group_rules']:
                    if not self._compare_rule(b_rule, rule):
                        continue
                    self._safe_delete_security_group_rule(t_context, client,
                                                          b_rule['id'])
                    break
        except Exception:
            raise n_exceptions.BottomPodOperationFailure(
                resource='security group rule', pod_name=pod['pod_name'])

        super(TricircleSecurityGroupMixin,
              self).delete_security_group_rule(q_context, _id)
Exemplo n.º 17
0
    def update_subnet(self, ctx, payload):
        """update bottom subnet

        if bottom pod id equal to POD_NOT_SPECIFIED, dispatch jobs for every
        mapped bottom pod via RPC, otherwise update subnet in the specified
        pod.

        :param ctx: tricircle context
        :param payload: dict whose key is JT_SUBNET_UPDATE and value
        is "top_subnet_id#bottom_pod_id"
        :return: None
        """
        (b_pod_id, t_subnet_id) = payload[
            constants.JT_SUBNET_UPDATE].split('#')
        if b_pod_id == constants.POD_NOT_SPECIFIED:
            mappings = db_api.get_bottom_mappings_by_top_id(
                ctx, t_subnet_id, constants.RT_SUBNET)
            b_pods = [mapping[0] for mapping in mappings]
            for b_pod in b_pods:
                self.xjob_handler.update_subnet(ctx, t_subnet_id,
                                                b_pod['pod_id'])
            return

        t_client = self._get_client()
        t_subnet = t_client.get_subnets(ctx, t_subnet_id)
        if not t_subnet:
            return
        b_pod = db_api.get_pod(ctx, b_pod_id)
        b_region_name = b_pod['region_name']
        b_subnet_id = db_api.get_bottom_id_by_top_id_region_name(
            ctx, t_subnet_id, b_region_name, constants.RT_SUBNET)
        b_client = self._get_client(region_name=b_region_name)
        b_subnet = b_client.get_subnets(ctx, b_subnet_id)
        b_gateway_ip = b_subnet['gateway_ip']

        # we need to remove the bottom subnet gateway ip from the top subnet
        # allaction pools
        b_allocation_pools = helper.NetworkHelper.get_bottom_subnet_pools(
            t_subnet, b_gateway_ip)

        # bottom gateway_ip doesn't need to be updated, because it is reserved
        # by top pod.
        # name is not allowed to be updated, because it is used by
        # lock_handle to retrieve bottom/local resources that have been
        # created but not registered in the resource routing table
        body = {
            'subnet':
                {'description': t_subnet['description'],
                 'enable_dhcp': t_subnet['enable_dhcp'],
                 'allocation_pools': b_allocation_pools,
                 'host_routes': t_subnet['host_routes'],
                 'dns_nameservers': t_subnet['dns_nameservers']}
        }
        try:
            b_client.update_subnets(ctx, b_subnet_id, body)
        except q_cli_exceptions.NotFound:
            LOG.error(_LE('subnet: %(subnet_id)s not found, '
                          'pod name: %(name)s'),
                      {'subnet_id': b_subnet_id, 'name': b_region_name})
Exemplo n.º 18
0
 def test_prepare_neutron_element(self):
     t_pod, b_pod = self._prepare_pod()
     port = {'id': 'top_port_id'}
     body = {'port': {'name': 'top_port_id'}}
     _, bottom_port_id = self.controller._prepare_neutron_element(
         self.context, b_pod, port, 'port', body)
     mappings = api.get_bottom_mappings_by_top_id(self.context,
                                                  'top_port_id', 'port')
     self.assertEqual(bottom_port_id, mappings[0][1])
Exemplo n.º 19
0
 def test_prepare_neutron_element(self):
     t_pod, b_pod = self._prepare_pod()
     port = {'id': 'top_port_id'}
     body = {'port': {'name': 'top_port_id'}}
     _, bottom_port_id = self.controller._prepare_neutron_element(
         self.context, b_pod, port, 'port', body)
     mappings = api.get_bottom_mappings_by_top_id(self.context,
                                                  'top_port_id', 'port')
     self.assertEqual(bottom_port_id, mappings[0][1])
Exemplo n.º 20
0
 def test_get_bottom_mappings_by_top_id(self):
     self._create_pod(0, 'test_az_uuid_0')
     self._create_pod(1, 'test_az_uuid_1')
     self._create_pod(2, 'test_az_uuid_2')
     self._create_resource_mappings()
     mappings = api.get_bottom_mappings_by_top_id(self.context,
                                                  'top_uuid', 'network')
     self.assertEqual('test_pod_uuid_1', mappings[0][0]['pod_id'])
     self.assertEqual('top_uuid', mappings[0][1])
Exemplo n.º 21
0
    def get_trunks(self, context, filters=None, fields=None,
                   sorts=None, limit=None, marker=None, page_reverse=False):
        ret = []
        bottom_top_map = {}
        top_bottom_map = {}
        t_ctx = t_context.get_context_from_neutron_context(context)

        route_filters = [{'key': 'resource_type',
                          'comparator': 'eq',
                          'value': t_constants.RT_TRUNK}]
        routes = db_api.list_resource_routings(t_ctx, route_filters)
        for route in routes:
            bottom_top_map[route['bottom_id']] = route['top_id']
            top_bottom_map[route['top_id']] = route['bottom_id']

        if limit:
            if marker:
                mappings = db_api.get_bottom_mappings_by_top_id(
                    t_ctx, marker, t_constants.RT_TRUNK)
                # if mapping exists, we retrieve trunk information
                # from bottom, otherwise from top
                if mappings:
                    pod_id = mappings[0][0]['pod_id']
                    current_pod = db_api.get_pod(t_ctx, pod_id)
                    ret = self._get_trunks_from_pod_with_limit(
                        context, current_pod, bottom_top_map, top_bottom_map,
                        filters, limit, marker)
                else:
                    ret = self._get_trunks_from_top_with_limit(
                        context, top_bottom_map, filters, limit, marker)
            else:
                current_pod = db_api.get_next_bottom_pod(t_ctx)
                # if current_pod exists, we retrieve trunk information
                # from bottom, otherwise from top
                if current_pod:
                    ret = self._get_trunks_from_pod_with_limit(
                        context, current_pod, bottom_top_map, top_bottom_map,
                        filters, limit, None)
                else:
                    ret = self._get_trunks_from_top_with_limit(
                        context, top_bottom_map, filters, limit, None)
        else:
            pods = db_api.list_pods(t_ctx)
            _filters = self._transform_trunk_filters(filters, top_bottom_map)
            for pod in pods:
                if not pod['az_name']:
                    continue
                client = self._get_client(pod['region_name'])
                pod_trunks = client.list_trunks(t_ctx, filters=_filters)
                ret.extend(pod_trunks)
            ret = self._map_trunks_from_bottom_to_top(ret, bottom_top_map)
            top_trunks = self._get_trunks_from_top(context,
                                                   top_bottom_map, filters)
            ret.extend(top_trunks)

        return [super(TricircleTrunkPlugin, self)._fields(trunk, fields)
                for trunk in ret]
Exemplo n.º 22
0
    def _get_pod_by_top_id(self, context, _id):

        mappings = db_api.get_bottom_mappings_by_top_id(
            context, _id, cons.RT_VOLUME)

        if not mappings or len(mappings) != 1:
            return None

        return mappings[0][0]
Exemplo n.º 23
0
 def test_get_bottom_mappings_by_top_id(self):
     self._create_pod(0, 'test_az_uuid_0')
     self._create_pod(1, 'test_az_uuid_1')
     self._create_pod(2, 'test_az_uuid_2')
     self._create_resource_mappings()
     mappings = api.get_bottom_mappings_by_top_id(self.context,
                                                  'top_uuid', 'network')
     self.assertEqual('test_pod_uuid_1', mappings[0][0]['pod_id'])
     self.assertEqual('top_uuid', mappings[0][1])
Exemplo n.º 24
0
    def _handle_sg_rule_for_default_group(self, context, pod, default_sg,
                                          project_id):
        top_client = self._get_client()
        new_b_rules = []
        for t_rule in default_sg['security_group_rules']:
            if not t_rule['remote_group_id']:
                # leave sg_id empty here
                new_b_rules.append(
                    self._construct_bottom_rule(t_rule, ''))
                continue
            if t_rule['ethertype'] != 'IPv4':
                continue
            subnets = top_client.list_subnets(
                context, [{'key': 'tenant_id', 'comparator': 'eq',
                           'value': project_id}])
            bridge_ip_net = netaddr.IPNetwork('100.0.0.0/8')
            for subnet in subnets:
                ip_net = netaddr.IPNetwork(subnet['cidr'])
                if ip_net in bridge_ip_net:
                    continue
                # leave sg_id empty here
                new_b_rules.append(
                    self._construct_bottom_rule(t_rule, '',
                                                subnet['cidr']))

        mappings = db_api.get_bottom_mappings_by_top_id(
            context, default_sg['id'], constants.RT_SG)
        for pod, b_sg_id in mappings:
            client = self._get_client(pod['pod_name'])
            b_sg = client.get_security_groups(context, b_sg_id)
            add_rules = []
            del_rules = []
            match_index = set()
            for b_rule in b_sg['security_group_rules']:
                match = False
                for i, rule in enumerate(new_b_rules):
                    if self._compare_rule(b_rule, rule):
                        match = True
                        match_index.add(i)
                        break
                if not match:
                    del_rules.append(b_rule)
            for i, rule in enumerate(new_b_rules):
                if i not in match_index:
                    add_rules.append(rule)

            for del_rule in del_rules:
                self._safe_delete_security_group_rule(
                    context, client, del_rule['id'])
            if add_rules:
                rule_body = {'security_group_rules': []}
                for add_rule in add_rules:
                    add_rule['security_group_id'] = b_sg_id
                    rule_body['security_group_rules'].append(add_rule)
                self._safe_create_security_group_rule(context,
                                                      client, rule_body)
Exemplo n.º 25
0
    def _get_pod_by_top_id(self, context, _id):

        mappings = db_api.get_bottom_mappings_by_top_id(
            context, _id,
            cons.RT_VOLUME)

        if not mappings or len(mappings) != 1:
            return None

        return mappings[0][0]
Exemplo n.º 26
0
    def test_delete_mappings_by_top_id(self):
        self._create_pod(0, 'test_az_uuid_0')
        self._create_pod(1, 'test_az_uuid_1')
        self._create_pod(2, 'test_az_uuid_2')
        self._create_resource_mappings()
        top_id = 'top_uuid'

        api.delete_mappings_by_top_id(self.context, top_id,
                                      pod_id='test_pod_uuid_0')
        mappings = api.get_bottom_mappings_by_top_id(
            self.context, top_id, 'network')
        # entry in pod_uuid_0 is deleted, entry in pod_uuid_1 is left
        self.assertEqual(1, len(mappings))
        self.assertEqual('test_pod_uuid_1', mappings[0][0]['pod_id'])

        api.delete_mappings_by_top_id(self.context, top_id)
        mappings = api.get_bottom_mappings_by_top_id(
            self.context, top_id, 'network')
        self.assertEqual(0, len(mappings))
Exemplo n.º 27
0
 def test_prepare_neutron_element_create_res_exception(self, mock_method):
     mock_method.side_effect = FakeException()
     t_pod, b_pod = self._prepare_pod()
     port = {'id': 'top_port_id'}
     body = {'port': {'name': 'top_port_id'}}
     self.assertRaises(FakeException,
                       self.controller._prepare_neutron_element,
                       self.context, b_pod, port, 'port', body)
     mappings = api.get_bottom_mappings_by_top_id(self.context,
                                                  'top_port_id', 'port')
     self.assertEqual(0, len(mappings))
Exemplo n.º 28
0
 def delete_trunk(self, context, trunk_id):
     t_ctx = t_context.get_context_from_neutron_context(context)
     res = super(TricircleTrunkPlugin, self).get_trunk(context, trunk_id)
     with context.session.begin():
         super(TricircleTrunkPlugin, self).delete_trunk(context, trunk_id)
         mappings = db_api.get_bottom_mappings_by_top_id(
             t_ctx, trunk_id, t_constants.RT_TRUNK)
         if mappings:
             b_pod = mappings[0][0]
             self.xjob_handler.sync_trunk(t_ctx, res['project_id'],
                                          trunk_id, b_pod['pod_id'])
Exemplo n.º 29
0
    def setup_bottom_router(self, ctx, payload):
        (b_pod_id,
         t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#')

        if b_pod_id == constants.POD_NOT_SPECIFIED:
            mappings = db_api.get_bottom_mappings_by_top_id(
                ctx, t_net_id, constants.RT_NETWORK)
            b_pods = [mapping[0] for mapping in mappings]
            for b_pod in b_pods:
                # NOTE(zhiyuan) we create one job for each pod to avoid
                # conflict caused by different workers operating the same pod
                self.xjob_handler.setup_bottom_router(
                    ctx, t_net_id, t_router_id, b_pod['pod_id'])
            return

        t_client = self._get_client()
        t_pod = db_api.get_top_pod(ctx)
        t_router = t_client.get_routers(ctx, t_router_id)
        if not t_router:
            # we just end this job if top router no longer exists
            return
        t_net = t_client.get_networks(ctx, t_net_id)
        if not t_net:
            # we just end this job if top network no longer exists
            return
        project_id = t_router['tenant_id']

        b_pod = db_api.get_pod(ctx, b_pod_id)

        t_ew_bridge_net_name = constants.ew_bridge_net_name % project_id
        t_ew_bridge_subnet_name = constants.ew_bridge_subnet_name % project_id
        t_ew_bridge_net = self._get_resource_by_name(t_client, ctx, 'network',
                                                     t_ew_bridge_net_name)
        t_ew_bridge_subnet = self._get_resource_by_name(
            t_client, ctx, 'subnet', t_ew_bridge_subnet_name)

        ext_nets = t_client.list_networks(ctx,
                                          filters=[{'key': 'router:external',
                                                    'comparator': 'eq',
                                                    'value': True}])
        ext_net_pod_names = set(
            [ext_net[AZ_HINTS][0] for ext_net in ext_nets])

        if not ext_net_pod_names:
            need_ns_bridge = False
        elif b_pod['pod_name'] in ext_net_pod_names:
            need_ns_bridge = False
        else:
            need_ns_bridge = True
        self._setup_router_one_pod(ctx, t_pod, b_pod, t_client, t_net,
                                   t_router, t_ew_bridge_net,
                                   t_ew_bridge_subnet, need_ns_bridge)

        self.xjob_handler.configure_extra_routes(ctx, t_router_id)
Exemplo n.º 30
0
    def test_delete_mappings_by_top_id(self):
        self._create_pod(0, 'test_az_uuid_0')
        self._create_pod(1, 'test_az_uuid_1')
        self._create_pod(2, 'test_az_uuid_2')
        self._create_resource_mappings()
        top_id = 'top_uuid'
        api.delete_mappings_by_top_id(self.context, top_id)

        mappings = api.get_bottom_mappings_by_top_id(
            self.context, top_id, 'network')
        self.assertEqual(len(mappings), 0)
Exemplo n.º 31
0
 def test_prepare_neutron_element_create_res_exception(self, mock_method):
     mock_method.side_effect = FakeException()
     t_pod, b_pod = self._prepare_pod()
     port = {'id': 'top_port_id'}
     body = {'port': {'name': 'top_port_id'}}
     self.assertRaises(FakeException,
                       self.controller._prepare_neutron_element,
                       self.context, b_pod, port, 'port', body)
     mappings = api.get_bottom_mappings_by_top_id(self.context,
                                                  'top_port_id', 'port')
     self.assertEqual(0, len(mappings))
Exemplo n.º 32
0
    def setup_bottom_router(self, ctx, payload):
        (b_pod_id,
         t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#')

        if b_pod_id == constants.POD_NOT_SPECIFIED:
            mappings = db_api.get_bottom_mappings_by_top_id(
                ctx, t_net_id, constants.RT_NETWORK)
            b_pods = [mapping[0] for mapping in mappings]
            for b_pod in b_pods:
                # NOTE(zhiyuan) we create one job for each pod to avoid
                # conflict caused by different workers operating the same pod
                self.xjob_handler.setup_bottom_router(
                    ctx, t_net_id, t_router_id, b_pod['pod_id'])
            return

        t_client = self._get_client()
        t_pod = db_api.get_top_pod(ctx)
        t_router = t_client.get_routers(ctx, t_router_id)
        if not t_router:
            # we just end this job if top router no longer exists
            return
        t_net = t_client.get_networks(ctx, t_net_id)
        if not t_net:
            # we just end this job if top network no longer exists
            return
        project_id = t_router['tenant_id']

        b_pod = db_api.get_pod(ctx, b_pod_id)

        t_ew_bridge_net_name = constants.ew_bridge_net_name % project_id
        t_ew_bridge_subnet_name = constants.ew_bridge_subnet_name % project_id
        t_ew_bridge_net = self._get_resource_by_name(t_client, ctx, 'network',
                                                     t_ew_bridge_net_name)
        t_ew_bridge_subnet = self._get_resource_by_name(
            t_client, ctx, 'subnet', t_ew_bridge_subnet_name)

        ext_nets = t_client.list_networks(ctx,
                                          filters=[{'key': 'router:external',
                                                    'comparator': 'eq',
                                                    'value': True}])
        ext_net_pod_names = set(
            [ext_net[AZ_HINTS][0] for ext_net in ext_nets])

        if not ext_net_pod_names:
            need_ns_bridge = False
        elif b_pod['pod_name'] in ext_net_pod_names:
            need_ns_bridge = False
        else:
            need_ns_bridge = True
        self._setup_router_one_pod(ctx, t_pod, b_pod, t_client, t_net,
                                   t_router, t_ew_bridge_net,
                                   t_ew_bridge_subnet, need_ns_bridge)

        self.xjob_handler.configure_extra_routes(ctx, t_router_id)
Exemplo n.º 33
0
 def update_trunk(self, context, trunk_id, trunk):
     # update trunk
     t_ctx = t_context.get_context_from_neutron_context(context)
     with context.session.begin():
         res = super(TricircleTrunkDriver, self).update_trunk(
             context, trunk_id, trunk)
         mappings = db_api.get_bottom_mappings_by_top_id(
             t_ctx, trunk_id, t_constants.RT_TRUNK)
         if mappings:
             b_pod = mappings[0][0]
             self.xjob_handler.sync_trunk(t_ctx, res['project_id'],
                                          trunk_id, b_pod['pod_id'])
     return res
Exemplo n.º 34
0
def get_pod_by_top_id(context, _id):
    """Get pod resource from pod table .

    :param _id: the top id of resource
    :returns: pod resource
    """
    mappings = db_api.get_bottom_mappings_by_top_id(context, _id,
                                                    cons.RT_VOLUME)

    if not mappings or len(mappings) != 1:
        return None

    return mappings[0][0]
Exemplo n.º 35
0
    def remove_subports(self, context, trunk_id, subports):
        t_ctx = t_context.get_context_from_neutron_context(context)
        with context.session.begin():
            self.update_subports_device_id(context, subports, '', '')
            res = super(TricircleTrunkDriver, self).remove_subports(
                context, trunk_id, subports)
            mappings = db_api.get_bottom_mappings_by_top_id(
                t_ctx, trunk_id, t_constants.RT_TRUNK)
            if mappings:
                b_pod = mappings[0][0]
                self.xjob_handler.sync_trunk(
                    t_ctx, res['project_id'], trunk_id, b_pod['pod_id'])

        return res
Exemplo n.º 36
0
def get_pod_by_top_id(context, _id):
    """Get pod resource from pod table .

    :param _id: the top id of resource
    :returns: pod resource
    """
    mappings = db_api.get_bottom_mappings_by_top_id(
        context, _id,
        cons.RT_VOLUME)

    if not mappings or len(mappings) != 1:
        return None

    return mappings[0][0]
Exemplo n.º 37
0
    def update_network(self, ctx, payload):
        """update bottom network

        if bottom pod id equal to POD_NOT_SPECIFIED, dispatch jobs for every
        mapped bottom pod via RPC, otherwise update network in the specified
        pod.

        :param ctx: tricircle context
        :param payload: dict whose key is JT_NETWORK_UPDATE and value
        is "top_network_id#bottom_pod_id"
        :return: None
        """
        (b_pod_id, t_network_id) = payload[
            constants.JT_NETWORK_UPDATE].split('#')
        if b_pod_id == constants.POD_NOT_SPECIFIED:
            mappings = db_api.get_bottom_mappings_by_top_id(
                ctx, t_network_id, constants.RT_NETWORK)
            b_pods = [mapping[0] for mapping in mappings]
            for b_pod in b_pods:
                self.xjob_handler.update_network(ctx, t_network_id,
                                                 b_pod['pod_id'])
            return

        t_client = self._get_client()
        t_network = t_client.get_networks(ctx, t_network_id)
        if not t_network:
            return
        b_pod = db_api.get_pod(ctx, b_pod_id)
        b_region_name = b_pod['region_name']
        b_client = self._get_client(region_name=b_region_name)
        b_network_id = db_api.get_bottom_id_by_top_id_region_name(
            ctx, t_network_id, b_region_name, constants.RT_NETWORK)
        # name is not allowed to be updated, because it is used by
        # lock_handle to retrieve bottom/local resources that have been
        # created but not registered in the resource routing table
        body = {
            'network': {
                'description': t_network['description'],
                'admin_state_up': t_network['admin_state_up'],
                'shared': t_network['shared']
            }
        }

        try:
            b_client.update_networks(ctx, b_network_id, body)
        except q_cli_exceptions.NotFound:
            LOG.error(_LE('network: %(net_id)s not found,'
                          'pod name: %(name)s'),
                      {'net_id': b_network_id, 'name': b_region_name})
Exemplo n.º 38
0
 def update_port_chain_precommit(self, context):
     plugin_context = context._plugin_context
     t_ctx = t_context.get_context_from_neutron_context(plugin_context)
     port_chain = context.current
     mappings = db_api.get_bottom_mappings_by_top_id(
         t_ctx, port_chain['id'], t_constants.RT_PORT_CHAIN)
     if mappings:
         net_id = self._get_net_id_by_portpairgroups(
             plugin_context, context._plugin,
             port_chain['port_pair_groups'])
         if not net_id:
             return
         self.xjob_handler.sync_service_function_chain(
             t_ctx, port_chain['project_id'], port_chain['id'], net_id,
             t_constants.POD_NOT_SPECIFIED)
Exemplo n.º 39
0
 def update_port_chain_precommit(self, context):
     plugin_context = context._plugin_context
     t_ctx = t_context.get_context_from_neutron_context(plugin_context)
     port_chain = context.current
     mappings = db_api.get_bottom_mappings_by_top_id(
         t_ctx, port_chain['id'], t_constants.RT_PORT_CHAIN)
     if mappings:
         net_id = self._get_net_id_by_portpairgroups(
             plugin_context, context._plugin,
             port_chain['port_pair_groups'])
         if not net_id:
             return
         self.xjob_handler.sync_service_function_chain(
             t_ctx, port_chain['project_id'], port_chain['id'],
             net_id, t_constants.POD_NOT_SPECIFIED)
Exemplo n.º 40
0
    def add_subports(self, context, trunk_id, subports):
        t_ctx = t_context.get_context_from_neutron_context(context)
        with context.session.begin():
            res = super(TricircleTrunkPlugin, self).add_subports(
                context, trunk_id, subports)
            self.update_subports_device_id(context, subports, trunk_id,
                                           t_constants.DEVICE_OWNER_SUBPORT)
            mappings = db_api.get_bottom_mappings_by_top_id(
                t_ctx, trunk_id, t_constants.RT_TRUNK)
            if mappings:
                b_pod = mappings[0][0]
                self.xjob_handler.sync_trunk(
                    t_ctx, res['project_id'], trunk_id, b_pod['pod_id'])

        return res
Exemplo n.º 41
0
    def configure_extra_routes(self, ctx, payload):
        # TODO(zhiyuan) performance and reliability issue
        # better have a job tracking mechanism
        t_router_id = payload['router']

        b_pods, b_router_ids = zip(*db_api.get_bottom_mappings_by_top_id(
            ctx, t_router_id, constants.RT_ROUTER))

        router_bridge_ip_map = {}
        router_cidr_map = {}
        for i, b_pod in enumerate(b_pods):
            bottom_client = self._get_client(pod_name=b_pod['pod_name'])
            b_inferfaces = bottom_client.list_ports(
                ctx, filters=[{'key': 'device_id',
                               'comparator': 'eq',
                               'value': b_router_ids[i]},
                              {'key': 'device_owner',
                               'comparator': 'eq',
                               'value': 'network:router_interface'}])
            cidrs = []
            for b_inferface in b_inferfaces:
                ip = b_inferface['fixed_ips'][0]['ip_address']
                ew_bridge_cidr = '100.0.0.0/9'
                ns_bridge_cidr = '100.128.0.0/9'
                if netaddr.IPAddress(ip) in netaddr.IPNetwork(ew_bridge_cidr):
                    router_bridge_ip_map[b_router_ids[i]] = ip
                    continue
                if netaddr.IPAddress(ip) in netaddr.IPNetwork(ns_bridge_cidr):
                    continue
                b_subnet = bottom_client.get_subnets(
                    ctx, b_inferface['fixed_ips'][0]['subnet_id'])
                cidrs.append(b_subnet['cidr'])
            router_cidr_map[b_router_ids[i]] = cidrs

        for i, b_router_id in enumerate(b_router_ids):
            if b_router_id not in router_bridge_ip_map:
                continue
            bottom_client = self._get_client(pod_name=b_pods[i]['pod_name'])
            extra_routes = []
            for router_id, cidrs in router_cidr_map.iteritems():
                if router_id == b_router_id:
                    continue
                for cidr in cidrs:
                    extra_routes.append(
                        {'nexthop': router_bridge_ip_map[router_id],
                         'destination': cidr})
            bottom_client.update_routers(ctx, b_router_id,
                                         {'router': {'routes': extra_routes}})
Exemplo n.º 42
0
    def _test_update_port_with_qos_policy(self, plugin, client, q_ctx, t_ctx,
                                          pod_id, t_port_id, b_port_id,
                                          bottom_policy):
        res = \
            self._create_policy_in_top(self, plugin, q_ctx, t_ctx,
                                       pod_id, bottom_policy)

        update_body = {'port': {'qos_policy_id': res['id']}}
        top_port = plugin.update_port(q_ctx, t_port_id, update_body)
        self.assertEqual(top_port['qos_policy_id'], res['id'])

        route_res = \
            db_api.get_bottom_mappings_by_top_id(t_ctx, res['id'],
                                                 constants.RT_QOS)
        bottom_port = client.get_ports(q_ctx, b_port_id)
        self.assertEqual(bottom_port['qos_policy_id'], route_res[0][1])
Exemplo n.º 43
0
    def sync_service_function_chain(self, ctx, payload):
        (b_pod_id, t_port_chain_id, net_id) = payload[
            constants.JT_SFC_SYNC].split('#')

        if b_pod_id == constants.POD_NOT_SPECIFIED:
            mappings = db_api.get_bottom_mappings_by_top_id(
                ctx, net_id, constants.RT_NETWORK)
            b_pods = [mapping[0] for mapping in mappings]
            for b_pod in b_pods:
                payload = '%s#%s#%s' % (b_pod['pod_id'], t_port_chain_id,
                                        net_id)
                super(FakeBaseXManager, self).sync_service_function_chain(
                    ctx, {constants.JT_SFC_SYNC: payload})
        else:
            super(FakeBaseXManager, self).sync_service_function_chain(
                ctx, payload)
Exemplo n.º 44
0
 def delete_port_pair(self, context):
     t_ctx = t_context.get_context_from_neutron_context(
         context._plugin_context)
     portpair_id = context.current['id']
     mappings = db_api.get_bottom_mappings_by_top_id(
         t_ctx, portpair_id, t_constants.RT_PORT_PAIR)
     for b_pod, b_portpair_id in mappings:
         b_region_name = b_pod['region_name']
         b_client = self._get_client(b_region_name)
         try:
             b_client.delete_port_pairs(t_ctx, b_portpair_id)
         except client_exceptions.NotFound:
             LOG.debug(('port pair: %(portpair_id)s not found, '
                        'region name: %(name)s'),
                       {'portpair_id': portpair_id, 'name': b_region_name})
         db_api.delete_mappings_by_bottom_id(t_ctx, b_portpair_id)
Exemplo n.º 45
0
    def sync_service_function_chain(self, ctx, payload):
        (b_pod_id, t_port_chain_id,
         net_id) = payload[constants.JT_SFC_SYNC].split('#')

        if b_pod_id == constants.POD_NOT_SPECIFIED:
            mappings = db_api.get_bottom_mappings_by_top_id(
                ctx, net_id, constants.RT_NETWORK)
            b_pods = [mapping[0] for mapping in mappings]
            for b_pod in b_pods:
                payload = '%s#%s#%s' % (b_pod['pod_id'], t_port_chain_id,
                                        net_id)
                super(FakeBaseXManager, self).sync_service_function_chain(
                    ctx, {constants.JT_SFC_SYNC: payload})
        else:
            super(FakeBaseXManager,
                  self).sync_service_function_chain(ctx, payload)
Exemplo n.º 46
0
 def delete_flow_classifier(self, context):
     t_ctx = t_context.get_context_from_neutron_context(
         context._plugin_context)
     flowclassifier_id = context.current['id']
     mappings = db_api.get_bottom_mappings_by_top_id(
         t_ctx, flowclassifier_id, t_constants.RT_FLOW_CLASSIFIER)
     for b_pod, b_classifier_id in mappings:
         b_region_name = b_pod['region_name']
         b_client = self._get_client(b_region_name)
         try:
             b_client.delete_flow_classifiers(t_ctx, b_classifier_id)
         except client_exceptions.NotFound:
             LOG.debug(('flow classifier: %(classifier_id)s not found, '
                        'region name: %(name)s'),
                       {'classifier_id': flowclassifier_id,
                        'name': b_region_name})
         db_api.delete_mappings_by_bottom_id(t_ctx, b_classifier_id)
Exemplo n.º 47
0
    def _get_ports_with_policy(self, context, policy):
        networks_ids = policy.get_bound_networks()

        ports_with_net_policy = obj_reg.load_class('Port').get_objects(
            context, network_id=networks_ids)

        # Filter only these ports which don't have overwritten policy
        ports_with_net_policy = [
            port for port in ports_with_net_policy if
            port.qos_policy_id is None
        ]

        ports_ids = policy.get_bound_ports()
        ports_with_policy = obj_reg.load_class('Port').get_objects(
            context, id=ports_ids)
        t_ports = list(set(ports_with_policy + ports_with_net_policy))

        t_ctx = t_context.get_context_from_neutron_context(context)
        for t_port in t_ports:
            mappings = db_api.get_bottom_mappings_by_top_id(
                t_ctx, t_port.id, t_constants.RT_PORT)
            if mappings:
                b_pod, b_port_id = mappings[0]
                b_region_name = b_pod['region_name']
                b_client = self._get_client(region_name=b_region_name)
                b_port = b_client.get_ports(t_ctx, b_port_id)
                new_binding = obj_reg.new_instance(
                    'PortBinding',
                    port_id=t_port.id,
                    vif_type=b_port.get('binding:vif_type',
                                        portbindings.VIF_TYPE_UNBOUND),
                    vnic_type=b_port.get('binding:vnic_type',
                                         portbindings.VNIC_NORMAL)
                )
                t_port.binding = new_binding
            else:
                new_binding = obj_reg.new_instance(
                    'PortBinding',
                    port_id=t_port.id,
                    vif_type=portbindings.VIF_TYPE_UNBOUND,
                    vnic_type=portbindings.VNIC_NORMAL
                )
                t_port.binding = new_binding

        return t_ports
Exemplo n.º 48
0
    def delete(self, _id):
        context = t_context.extract_context_from_environ()

        mappings = db_api.get_bottom_mappings_by_top_id(context, _id,
                                                        constants.RT_SERVER)
        if not mappings:
            pecan.response.status = 404
            return {'Error': {'message': _('Server not found'), 'code': 404}}

        pod, bottom_id = mappings[0]
        client = self._get_client(pod['pod_name'])
        top_client = self._get_client()
        try:
            server_ports = top_client.list_ports(
                context, filters=[{'key': 'device_id', 'comparator': 'eq',
                                   'value': _id}])
            ret = client.delete_servers(context, bottom_id)
            # none return value indicates server not found
            if ret is None:
                self._remove_stale_mapping(context, _id)
                pecan.response.status = 404
                return {'Error': {'message': _('Server not found'),
                                  'code': 404}}
            for server_port in server_ports:
                self.xjob_handler.delete_server_port(context,
                                                     server_port['id'])
        except Exception as e:
            code = 500
            message = _('Delete server %(server_id)s fails') % {
                'server_id': _id}
            if hasattr(e, 'code'):
                code = e.code
            ex_message = str(e)
            if ex_message:
                message = ex_message
            LOG.error(message)

            pecan.response.status = code
            return {'Error': {'message': message, 'code': code}}

        # NOTE(zhiyuan) Security group rules for default security group are
        # also kept until subnet is deleted.
        pecan.response.status = 204
        return pecan.response