Esempio n. 1
0
    def prepare_top_snat_port(self, t_ctx, q_ctx, project_id, t_net_id,
                              t_subnet_id):
        """Create top centralized snat port

        :param t_ctx: tricircle context
        :param q_ctx: neutron context
        :param project_id: project id
        :param t_net_id: top network id
        :param t_subnet_id: top subnet id
        :return: top centralized snat port
        """
        t_snat_name = t_constants.snat_port_name % t_subnet_id
        t_snat_port_body = {
            'port': {
                'tenant_id': project_id,
                'admin_state_up': True,
                'network_id': t_net_id,
                'name': t_snat_name,
                'binding:profile': {},
                'device_id': '',
                'device_owner': constants.DEVICE_OWNER_ROUTER_SNAT,
            }
        }
        if self.call_obj:
            t_snat_port_body['port'].update(
                {'mac_address': constants.ATTR_NOT_SPECIFIED,
                 'fixed_ips': constants.ATTR_NOT_SPECIFIED})

        # NOTE(zhiyuan) for one subnet in different pods, we just create one
        # centralized snat port. though snat port in different pods will have
        # the same IP, VM packets will only got to the local router namespace
        _, t_snat_port_id = self.prepare_top_element(
            t_ctx, q_ctx, project_id, db_api.get_top_pod(t_ctx),
            {'id': t_snat_name}, t_constants.RT_PORT, t_snat_port_body)
        return t_snat_port_id
Esempio n. 2
0
    def prepare_top_snat_port(self, t_ctx, q_ctx, project_id, t_net_id,
                              t_subnet_id):
        """Create top centralized snat port

        :param t_ctx: tricircle context
        :param q_ctx: neutron context
        :param project_id: project id
        :param t_net_id: top network id
        :param t_subnet_id: top subnet id
        :return: top centralized snat port
        """
        t_snat_name = t_constants.snat_port_name % t_subnet_id
        t_snat_port_body = {
            'port': {
                'tenant_id': project_id,
                'admin_state_up': True,
                'network_id': t_net_id,
                'name': t_snat_name,
                'binding:profile': {},
                'device_id': '',
                'device_owner': constants.DEVICE_OWNER_ROUTER_SNAT,
            }
        }
        if self.call_obj:
            t_snat_port_body['port'].update(
                {'mac_address': constants.ATTR_NOT_SPECIFIED,
                 'fixed_ips': constants.ATTR_NOT_SPECIFIED})

        # NOTE(zhiyuan) for one subnet in different pods, we just create one
        # centralized snat port. though snat port in different pods will have
        # the same IP, VM packets will only got to the local router namespace
        _, t_snat_port_id = self.prepare_top_element(
            t_ctx, q_ctx, project_id, db_api.get_top_pod(t_ctx),
            {'id': t_snat_name}, t_constants.RT_PORT, t_snat_port_body)
        return t_snat_port_id
Esempio n. 3
0
    def test_get_top_pod(self):
        self._create_pod(0, '')
        self._create_pod(1, 'test_az_uuid1')
        self._create_pod(2, 'test_az_uuid2')

        pod = api.get_top_pod(self.context)
        self.assertEqual(pod['region_name'], 'test_pod_0')
        self.assertEqual(pod['az_name'], '')
Esempio n. 4
0
    def test_get_top_pod(self):
        self._create_pod(0, '')
        self._create_pod(1, 'test_az_uuid1')
        self._create_pod(2, 'test_az_uuid2')

        pod = api.get_top_pod(self.context)
        self.assertEqual(pod['region_name'], 'test_pod_0')
        self.assertEqual(pod['az_name'], '')
Esempio n. 5
0
 def _get_links(self, context, image):
     nova_url = self.client.get_endpoint(
         context, db_api.get_top_pod(context)['pod_id'],
         constants.ST_NOVA)
     nova_url = nova_url.replace('/$(tenant_id)s', '')
     self_link = url_join(nova_url, self.project_id, 'images', image['id'])
     bookmark_link = url_join(
         remove_trailing_version_from_href(nova_url),
         self.project_id, 'images', image['id'])
     glance_url = self.client.get_endpoint(
         context, db_api.get_top_pod(context)['pod_id'],
         constants.ST_GLANCE)
     alternate_link = '/'.join([glance_url, 'images', image['id']])
     return [{'rel': 'self', 'href': self_link},
             {'rel': 'bookmark', 'href': bookmark_link},
             {'rel': 'alternate',
                     'type': 'application/vnd.openstack.image',
                     'href': alternate_link}]
Esempio n. 6
0
    def setup_bottom_router(self, ctx, payload):
        (b_pod_id,
         t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#')

        if b_pod_id == constants.POD_NOT_SPECIFIED:
            mappings = db_api.get_bottom_mappings_by_top_id(
                ctx, t_net_id, constants.RT_NETWORK)
            b_pods = [mapping[0] for mapping in mappings]
            for b_pod in b_pods:
                # NOTE(zhiyuan) we create one job for each pod to avoid
                # conflict caused by different workers operating the same pod
                self.xjob_handler.setup_bottom_router(
                    ctx, t_net_id, t_router_id, b_pod['pod_id'])
            return

        t_client = self._get_client()
        t_pod = db_api.get_top_pod(ctx)
        t_router = t_client.get_routers(ctx, t_router_id)
        if not t_router:
            # we just end this job if top router no longer exists
            return
        t_net = t_client.get_networks(ctx, t_net_id)
        if not t_net:
            # we just end this job if top network no longer exists
            return
        project_id = t_router['tenant_id']

        b_pod = db_api.get_pod(ctx, b_pod_id)

        t_ew_bridge_net_name = constants.ew_bridge_net_name % project_id
        t_ew_bridge_subnet_name = constants.ew_bridge_subnet_name % project_id
        t_ew_bridge_net = self._get_resource_by_name(t_client, ctx, 'network',
                                                     t_ew_bridge_net_name)
        t_ew_bridge_subnet = self._get_resource_by_name(
            t_client, ctx, 'subnet', t_ew_bridge_subnet_name)

        ext_nets = t_client.list_networks(ctx,
                                          filters=[{'key': 'router:external',
                                                    'comparator': 'eq',
                                                    'value': True}])
        ext_net_pod_names = set(
            [ext_net[AZ_HINTS][0] for ext_net in ext_nets])

        if not ext_net_pod_names:
            need_ns_bridge = False
        elif b_pod['pod_name'] in ext_net_pod_names:
            need_ns_bridge = False
        else:
            need_ns_bridge = True
        self._setup_router_one_pod(ctx, t_pod, b_pod, t_client, t_net,
                                   t_router, t_ew_bridge_net,
                                   t_ew_bridge_subnet, need_ns_bridge)

        self.xjob_handler.configure_extra_routes(ctx, t_router_id)
Esempio n. 7
0
    def setup_bottom_router(self, ctx, payload):
        (b_pod_id,
         t_router_id, t_net_id) = payload[constants.JT_ROUTER_SETUP].split('#')

        if b_pod_id == constants.POD_NOT_SPECIFIED:
            mappings = db_api.get_bottom_mappings_by_top_id(
                ctx, t_net_id, constants.RT_NETWORK)
            b_pods = [mapping[0] for mapping in mappings]
            for b_pod in b_pods:
                # NOTE(zhiyuan) we create one job for each pod to avoid
                # conflict caused by different workers operating the same pod
                self.xjob_handler.setup_bottom_router(
                    ctx, t_net_id, t_router_id, b_pod['pod_id'])
            return

        t_client = self._get_client()
        t_pod = db_api.get_top_pod(ctx)
        t_router = t_client.get_routers(ctx, t_router_id)
        if not t_router:
            # we just end this job if top router no longer exists
            return
        t_net = t_client.get_networks(ctx, t_net_id)
        if not t_net:
            # we just end this job if top network no longer exists
            return
        project_id = t_router['tenant_id']

        b_pod = db_api.get_pod(ctx, b_pod_id)

        t_ew_bridge_net_name = constants.ew_bridge_net_name % project_id
        t_ew_bridge_subnet_name = constants.ew_bridge_subnet_name % project_id
        t_ew_bridge_net = self._get_resource_by_name(t_client, ctx, 'network',
                                                     t_ew_bridge_net_name)
        t_ew_bridge_subnet = self._get_resource_by_name(
            t_client, ctx, 'subnet', t_ew_bridge_subnet_name)

        ext_nets = t_client.list_networks(ctx,
                                          filters=[{'key': 'router:external',
                                                    'comparator': 'eq',
                                                    'value': True}])
        ext_net_pod_names = set(
            [ext_net[AZ_HINTS][0] for ext_net in ext_nets])

        if not ext_net_pod_names:
            need_ns_bridge = False
        elif b_pod['pod_name'] in ext_net_pod_names:
            need_ns_bridge = False
        else:
            need_ns_bridge = True
        self._setup_router_one_pod(ctx, t_pod, b_pod, t_client, t_net,
                                   t_router, t_ew_bridge_net,
                                   t_ew_bridge_subnet, need_ns_bridge)

        self.xjob_handler.configure_extra_routes(ctx, t_router_id)
Esempio n. 8
0
 def _get_links(self, context, image):
     nova_url = self.client.get_endpoint(
         context,
         db_api.get_top_pod(context)['pod_id'], constants.ST_NOVA)
     nova_url = nova_url.replace('/$(tenant_id)s', '')
     self_link = url_join(nova_url, self.project_id, 'images', image['id'])
     bookmark_link = url_join(remove_trailing_version_from_href(nova_url),
                              self.project_id, 'images', image['id'])
     glance_url = self.client.get_endpoint(
         context,
         db_api.get_top_pod(context)['pod_id'], constants.ST_GLANCE)
     alternate_link = '/'.join([glance_url, 'images', image['id']])
     return [{
         'rel': 'self',
         'href': self_link
     }, {
         'rel': 'bookmark',
         'href': bookmark_link
     }, {
         'rel': 'alternate',
         'type': 'application/vnd.openstack.image',
         'href': alternate_link
     }]
Esempio n. 9
0
    def prepare_dhcp_port(self, ctx, project_id, b_pod, t_net_id, t_subnet_id,
                          b_net_id, b_subnet_id):
        """Create top dhcp port and map it to bottom dhcp port

        :param ctx: tricircle context
        :param project_id: project id
        :param b_pod: dict of bottom pod
        :param t_net_id: top network id
        :param t_subnet_id: top subnet id
        :param b_net_id: bottom network id
        :param b_subnet_id: bottom subnet id
        :return: None
        """
        t_client = self._get_client()

        t_dhcp_name = t_constants.dhcp_port_name % t_subnet_id
        t_dhcp_port_body = {
            'port': {
                'tenant_id': project_id,
                'admin_state_up': True,
                'network_id': t_net_id,
                'name': t_dhcp_name,
                'binding:profile': {},
                'device_id': 'reserved_dhcp_port',
                'device_owner': 'network:dhcp',
            }
        }
        if self.call_obj:
            t_dhcp_port_body['port'].update({
                'mac_address':
                constants.ATTR_NOT_SPECIFIED,
                'fixed_ips':
                constants.ATTR_NOT_SPECIFIED
            })

        # NOTE(zhiyuan) for one subnet in different pods, we just create
        # one dhcp port. though dhcp port in different pods will have
        # the same IP, each dnsmasq daemon only takes care of VM IPs in
        # its own pod, VM will not receive incorrect dhcp response
        _, t_dhcp_port_id = self.prepare_top_element(ctx, None, project_id,
                                                     db_api.get_top_pod(ctx),
                                                     {'id': t_dhcp_name},
                                                     t_constants.RT_PORT,
                                                     t_dhcp_port_body)
        t_dhcp_port = t_client.get_ports(ctx, t_dhcp_port_id)
        dhcp_port_body = self._get_create_dhcp_port_body(
            project_id, t_dhcp_port, b_subnet_id, b_net_id)
        self.prepare_bottom_element(ctx, project_id, b_pod, t_dhcp_port,
                                    t_constants.RT_PORT, dhcp_port_body)
Esempio n. 10
0
def check_network_not_in_use(self, context, t_ctx, network_id):
    # use a different name to avoid override _ensure_entwork_not_in_use
    subnets = self._get_subnets_by_network(context, network_id)
    auto_delete_port_names = []

    for subnet in subnets:
        subnet_id = subnet['id']
        region_names = [
            e[0]
            for e in t_ctx.session.query(sql.distinct(models.Pod.region_name)).
            join(models.ResourceRouting, models.Pod.pod_id ==
                 models.ResourceRouting.pod_id).filter(
                     models.ResourceRouting.top_id == subnet_id)
        ]
        auto_delete_port_names.extend([
            t_constants.interface_port_name % (region_name, subnet_id)
            for region_name in region_names
        ])
        dhcp_port_name = t_constants.dhcp_port_name % subnet_id
        snat_port_name = t_constants.snat_port_name % subnet_id
        auto_delete_port_names.append(dhcp_port_name)
        auto_delete_port_names.append(snat_port_name)

    if not auto_delete_port_names:
        # pre-created port not found, any ports left need to be deleted
        # before deleting network
        non_auto_delete_ports = context.session.query(
            models_v2.Port.id).filter_by(network_id=network_id)
        if non_auto_delete_ports.count():
            raise exceptions.NetworkInUse(net_id=network_id)
        return

    t_pod = db_api.get_top_pod(t_ctx)
    auto_delete_port_ids = [
        e[0] for e in t_ctx.session.query(models.ResourceRouting.bottom_id).
        filter_by(pod_id=t_pod['pod_id'], resource_type=t_constants.RT_PORT).
        filter(models.ResourceRouting.top_id.in_(auto_delete_port_names))
    ]

    non_auto_delete_ports = context.session.query(
        models_v2.Port.id).filter_by(network_id=network_id).filter(
            ~models_v2.Port.id.in_(auto_delete_port_ids))
    if non_auto_delete_ports.count():
        raise exceptions.NetworkInUse(net_id=network_id)
Esempio n. 11
0
    def prepare_top_dhcp_port(self, t_ctx, q_ctx, project_id, t_net_id,
                              t_subnet_id):
        """Create top dhcp port

        :param t_ctx: tricircle context
        :param q_ctx: neutron context
        :param project_id: project id
        :param t_net_id: top network id
        :param t_subnet_id: top subnet id
        :return: top dhcp port id
        """
        t_dhcp_name = t_constants.dhcp_port_name % t_subnet_id
        t_dhcp_port_body = {
            'port': {
                'tenant_id': project_id,
                'admin_state_up': True,
                'network_id': t_net_id,
                'name': t_dhcp_name,
                portbindings.PROFILE: {},
                'device_id': 'reserved_dhcp_port',
                'device_owner': 'network:dhcp',
            }
        }
        if self.call_obj:
            t_dhcp_port_body['port'].update({
                'mac_address':
                constants.ATTR_NOT_SPECIFIED,
                'fixed_ips':
                constants.ATTR_NOT_SPECIFIED
            })

        # NOTE(zhiyuan) for one subnet in different pods, we just create
        # one dhcp port. though dhcp port in different pods will have
        # the same IP, each dnsmasq daemon only takes care of VM IPs in
        # its own pod, VM will not receive incorrect dhcp response
        _, t_dhcp_port_id = self.prepare_top_element(t_ctx, q_ctx, project_id,
                                                     db_api.get_top_pod(t_ctx),
                                                     {'id': t_dhcp_name},
                                                     t_constants.RT_PORT,
                                                     t_dhcp_port_body)
        return t_dhcp_port_id
Esempio n. 12
0
def check_network_not_in_use(self, context, t_ctx, network_id):
    # use a different name to avoid override _ensure_entwork_not_in_use
    subnets = self._get_subnets_by_network(context, network_id)
    auto_delete_port_names = []

    for subnet in subnets:
        subnet_id = subnet['id']
        region_names = [e[0] for e in t_ctx.session.query(
            sql.distinct(models.Pod.region_name)).join(
            models.ResourceRouting,
            models.Pod.pod_id == models.ResourceRouting.pod_id).filter(
            models.ResourceRouting.top_id == subnet_id)]
        auto_delete_port_names.extend([t_constants.interface_port_name % (
            region_name, subnet_id) for region_name in region_names])
        dhcp_port_name = t_constants.dhcp_port_name % subnet_id
        snat_port_name = t_constants.snat_port_name % subnet_id
        auto_delete_port_names.append(dhcp_port_name)
        auto_delete_port_names.append(snat_port_name)

    if not auto_delete_port_names:
        # pre-created port not found, any ports left need to be deleted
        # before deleting network
        non_auto_delete_ports = context.session.query(
            models_v2.Port.id).filter_by(network_id=network_id)
        if non_auto_delete_ports.count():
            raise exceptions.NetworkInUse(net_id=network_id)
        return

    t_pod = db_api.get_top_pod(t_ctx)
    auto_delete_port_ids = [e[0] for e in t_ctx.session.query(
        models.ResourceRouting.bottom_id).filter_by(
        pod_id=t_pod['pod_id'], resource_type=t_constants.RT_PORT).filter(
        models.ResourceRouting.top_id.in_(auto_delete_port_names))]

    non_auto_delete_ports = context.session.query(
        models_v2.Port.id).filter_by(network_id=network_id).filter(
        ~models_v2.Port.id.in_(auto_delete_port_ids))
    if non_auto_delete_ports.count():
        raise exceptions.NetworkInUse(net_id=network_id)
Esempio n. 13
0
    def prepare_top_dhcp_port(self, t_ctx, q_ctx, project_id, t_net_id,
                              t_subnet_id):
        """Create top dhcp port

        :param t_ctx: tricircle context
        :param q_ctx: neutron context
        :param project_id: project id
        :param t_net_id: top network id
        :param t_subnet_id: top subnet id
        :return: top dhcp port id
        """
        t_dhcp_name = t_constants.dhcp_port_name % t_subnet_id
        t_dhcp_port_body = {
            'port': {
                'tenant_id': project_id,
                'admin_state_up': True,
                'network_id': t_net_id,
                'name': t_dhcp_name,
                portbindings.PROFILE: {},
                'device_id': 'reserved_dhcp_port',
                'device_owner': 'network:dhcp',
            }
        }
        if self.call_obj:
            t_dhcp_port_body['port'].update(
                {'mac_address': constants.ATTR_NOT_SPECIFIED,
                 'fixed_ips': constants.ATTR_NOT_SPECIFIED})

        # NOTE(zhiyuan) for one subnet in different pods, we just create
        # one dhcp port. though dhcp port in different pods will have
        # the same IP, each dnsmasq daemon only takes care of VM IPs in
        # its own pod, VM will not receive incorrect dhcp response
        _, t_dhcp_port_id = self.prepare_top_element(
            t_ctx, q_ctx, project_id, db_api.get_top_pod(t_ctx),
            {'id': t_dhcp_name}, t_constants.RT_PORT, t_dhcp_port_body)
        return t_dhcp_port_id
Esempio n. 14
0
    def post(self, **kw):
        context = t_context.extract_context_from_environ()

        if 'volume' not in kw:
            pecan.abort(400, _('Volume not found in request body'))
            return

        if 'availability_zone' not in kw['volume']:
            pecan.abort(400, _('Availability zone not set in request'))
            return

        pod, pod_az = az_ag.get_pod_by_az_tenant(
            context,
            az_name=kw['volume']['availability_zone'],
            tenant_id=self.tenant_id)
        if not pod:
            pecan.abort(500, _('Pod not configured or scheduling failure'))
            LOG.error(_LE("Pod not configured or scheduling failure"))
            return

        t_pod = db_api.get_top_pod(context)
        if not t_pod:
            pecan.abort(500, _('Top Pod not configured'))
            LOG.error(_LE("Top Pod not configured"))
            return

        # TODO(joehuang): get release from pod configuration,
        # to convert the content
        # b_release = pod['release']
        # t_release = t_pod['release']
        t_release = cons.R_MITAKA
        b_release = cons.R_MITAKA

        s_ctx = hclient.get_pod_service_ctx(context,
                                            request.url,
                                            pod['pod_name'],
                                            s_type=cons.ST_CINDER)

        if s_ctx['b_url'] == '':
            pecan.abort(500, _('bottom pod endpoint incorrect'))
            LOG.error(
                _LE("bottom pod endpoint incorrect %s") % pod['pod_name'])
            return

        b_headers = self._convert_header(t_release, b_release, request.headers)

        t_vol = kw['volume']

        # add or remove key-value in the request for diff. version
        b_vol_req = self._convert_object(t_release,
                                         b_release,
                                         t_vol,
                                         res_type=cons.RT_VOLUME)

        # convert az to the configured one
        # remove the AZ parameter to bottom request for default one
        b_vol_req['availability_zone'] = pod['pod_az_name']
        if b_vol_req['availability_zone'] == '':
            b_vol_req.pop("availability_zone", None)

        b_body = jsonutils.dumps({'volume': b_vol_req})

        resp = hclient.forward_req(context, 'POST', b_headers, s_ctx['b_url'],
                                   b_body)
        b_status = resp.status_code
        b_ret_body = jsonutils.loads(resp.content)

        # build routing and convert response from the bottom pod
        # for different version.
        response.status = b_status
        if b_status == 202:
            if b_ret_body.get('volume') is not None:
                b_vol_ret = b_ret_body['volume']

                try:
                    with context.session.begin():
                        core.create_resource(
                            context, models.ResourceRouting, {
                                'top_id': b_vol_ret['id'],
                                'bottom_id': b_vol_ret['id'],
                                'pod_id': pod['pod_id'],
                                'project_id': self.tenant_id,
                                'resource_type': cons.RT_VOLUME
                            })
                except Exception as e:
                    LOG.exception(
                        _LE('Failed to create volume '
                            'resource routing'
                            'top_id: %(top_id)s ,'
                            'bottom_id: %(bottom_id)s ,'
                            'pod_id: %(pod_id)s ,'
                            '%(exception)s '), {
                                'top_id': b_vol_ret['id'],
                                'bottom_id': b_vol_ret['id'],
                                'pod_id': pod['pod_id'],
                                'exception': e
                            })
                    return Response(
                        _('Failed to create volume '
                          'resource routing'), 500)

                ret_vol = self._convert_object(b_release,
                                               t_release,
                                               b_vol_ret,
                                               res_type=cons.RT_VOLUME)

                ret_vol['availability_zone'] = pod['az_name']

                return {'volume': ret_vol}

        return {'error': b_ret_body}
Esempio n. 15
0
    def post(self, **kw):
        context = t_context.extract_context_from_environ()

        if 'volume' not in kw:
            pecan.abort(400, _('Volume not found in request body'))
            return

        if 'availability_zone' not in kw['volume']:
            pecan.abort(400, _('Availability zone not set in request'))
            return

        pod, pod_az = az_ag.get_pod_by_az_tenant(
            context,
            az_name=kw['volume']['availability_zone'],
            tenant_id=self.tenant_id)
        if not pod:
            pecan.abort(500, _('Pod not configured or scheduling failure'))
            LOG.error(_LE("Pod not configured or scheduling failure"))
            return

        t_pod = db_api.get_top_pod(context)
        if not t_pod:
            pecan.abort(500, _('Top Pod not configured'))
            LOG.error(_LE("Top Po not configured"))
            return

        # TODO(joehuang): get release from pod configuration,
        # to convert the content
        # b_release = pod['release']
        # t_release = t_pod['release']
        t_release = 'Mitaka'
        b_release = 'Mitaka'

        s_ctx = hclient.get_pod_service_ctx(
            context,
            request.url,
            pod['pod_name'],
            s_type=cons.ST_CINDER)

        if s_ctx['b_url'] == '':
            pecan.abort(500, _('bottom pod endpoint incorrect'))
            LOG.error(_LE("bottom pod endpoint incorrect %s") %
                      pod['pod_name'])
            return

        b_headers = self._convert_header(t_release,
                                         b_release,
                                         request.headers)

        t_vol = kw['volume']

        # add or remove key-value in the request for diff. version
        b_vol_req = self._convert_object(t_release, b_release, t_vol,
                                         res_type=cons.RT_VOLUME)

        # convert az to the configured one
        # remove the AZ parameter to bottom request for default one
        b_vol_req['availability_zone'] = pod['pod_az_name']
        if b_vol_req['availability_zone'] == '':
            b_vol_req.pop("availability_zone", None)

        b_body = jsonutils.dumps({'volume': b_vol_req})

        resp = hclient.forward_req(
            context,
            'POST',
            b_headers,
            s_ctx['b_url'],
            b_body)
        b_status = resp.status_code
        b_ret_body = jsonutils.loads(resp.content)

        # build routing and convert response from the bottom pod
        # for different version.
        response.status = b_status
        if b_status == 202:
            if b_ret_body.get('volume') is not None:
                b_vol_ret = b_ret_body['volume']

                try:
                    with context.session.begin():
                        core.create_resource(
                            context, models.ResourceRouting,
                            {'top_id': b_vol_ret['id'],
                             'bottom_id': b_vol_ret['id'],
                             'pod_id': pod['pod_id'],
                             'project_id': self.tenant_id,
                             'resource_type': cons.RT_VOLUME})
                except Exception as e:
                    LOG.error(_LE('Fail to create volume: %(exception)s'),
                              {'exception': e})
                    return Response(_('Failed to create volume'), 500)

                ret_vol = self._convert_object(b_release, t_release,
                                               b_vol_ret,
                                               res_type=cons.RT_VOLUME)

                ret_vol['availability_zone'] = pod['az_name']

                return {'volume': ret_vol}

        return {'error': b_ret_body}
Esempio n. 16
0
    def post(self, **kw):
        """Create volume metadata associated with a volume.

        :param kw: dictionary of values to be created
        :returns: created volume metadata
        """
        context = t_context.extract_context_from_environ()

        if 'metadata' not in kw:
            return utils.format_cinder_error(
                400,
                _("Missing required element 'metadata' in "
                  "request body."))

        try:
            pod = utils.get_pod_by_top_id(context, self.volume_id)
            if pod is None:
                return utils.format_cinder_error(
                    404,
                    _('Volume %(volume_id)s could not be found.') %
                    {'volume_id': self.volume_id})

            t_pod = db_api.get_top_pod(context)
            if not t_pod:
                LOG.error(_LE("Top Pod not configured"))
                return utils.format_cinder_error(500,
                                                 _('Top Pod not configured'))
        except Exception as e:
            LOG.exception(
                _LE('Fail to create metadata for a volume:'
                    '%(volume_id)s'
                    '%(exception)s'), {
                        'volume_id': self.volume_id,
                        'exception': e
                    })
            return utils.format_cinder_error(500, _('Fail to create metadata'))

        t_release = cons.R_MITAKA
        b_release = cons.R_MITAKA

        s_ctx = hclient.get_pod_service_ctx(context,
                                            request.url,
                                            pod['pod_name'],
                                            s_type=cons.ST_CINDER)

        if s_ctx['b_url'] == '':
            LOG.error(
                _LE("Bottom pod endpoint incorrect %s") % pod['pod_name'])
            return utils.format_cinder_error(
                500, _('Bottom pod endpoint incorrect'))

        b_headers = hclient.convert_header(t_release, b_release,
                                           request.headers)

        t_metadata = kw['metadata']

        # add or remove key-value in the request for diff. version
        b_vol_req = hclient.convert_object(t_release,
                                           b_release,
                                           t_metadata,
                                           res_type=cons.RT_VOl_METADATA)

        b_body = jsonutils.dumps({'metadata': b_vol_req})

        resp = hclient.forward_req(context, 'POST', b_headers, s_ctx['b_url'],
                                   b_body)
        b_status = resp.status_code
        b_body_ret = jsonutils.loads(resp.content)

        # convert response from the bottom pod
        # for different version.
        response.status = b_status
        if b_status == 200:
            if b_body_ret.get('metadata') is not None:
                b_metadata_ret = b_body_ret['metadata']

                vol_ret = hclient.convert_object(b_release,
                                                 t_release,
                                                 b_metadata_ret,
                                                 res_type=cons.RT_VOl_METADATA)

                return {'metadata': vol_ret}

        return b_body_ret