def get(self, request, vimid=""): LOGGER.debug("Extensions--get::data> %s" % request.data) LOGGER.debug("Extensions--get::vimid> %s" % vimid) try: cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) registered_extensions = \ [ # { # "alias": "guest-monitor", # "description": "Multiple network support", # "name": "Guest Monitor", # "url": self.proxy_prefix + "/%s/extensions/guest-monitor/{server_id}" % (vimid), # "spec": "" # } ] content = { "cloud-owner": cloud_owner, "cloud-region-id": cloud_region_id, "vimid": vimid, "extensions": registered_extensions } return Response(data=content, status=status.HTTP_200_OK) except VimDriverNewtonException as e: return Response(data={'error': e.content}, status=e.status_code) except HttpError as e: LOGGER.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return Response(data=e.response.json(), status=e.http_status) except Exception as e: LOGGER.error(traceback.format_exc()) return Response(data={'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def get(self, request, vimid=""): logger.debug("Extensions--get::data> %s" % request.data) logger.debug("Extensions--get::vimid> %s" % vimid) try: cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) registered_extensions = [] content = { "cloud-owner": cloud_owner, "cloud-region-id": cloud_region_id, "vimid": vimid, "extensions": registered_extensions } return Response(data=content, status=status.HTTP_200_OK) except VimDriverNewtonException as e: return Response(data={'error': e.content}, status=e.status_code) except HttpError as e: logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return Response(data=e.response.json(), status=e.http_status) except Exception as e: logger.error(traceback.format_exc()) return Response(data={'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def workload_detail(self, vimid, stack_id, nexturi=None, otherinfo=None, project_idorname=None): ''' get workload status by either stack id or name :param vimid: :param stack_id: :param nexturi: stacks/<stack id>/<nexturi> :param otherinfo: :return: ''' try: # assume the workload_type is heat cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = \ helper.MultiCloudIdentityHelper( settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens", {"Project": project_idorname}) if retcode > 0 or not v2_token_resp_json: errmsg = "authenticate fails:%s, %s, %s" % \ (cloud_owner, regionid, v2_token_resp_json) logger.error(errmsg) return os_status, "GET_FAILED", errmsg # get stack status service_type = "orchestration" resource_uri = "/stacks/%s" % stack_id if nexturi: resource_uri += "/" + nexturi self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") if retcode > 0 or not content: errmsg = "Stack query %s response: %s" % (resource_uri, content) self._logger.debug(errmsg) return os_status, "GET_FAILED", content stack = content.get('stack', {}) # if retcode == 0 and content else [] workload_status = stack.get("stack_status", "GET_FAILED") # workload_status = "GET_COMPLETE" return 0, workload_status, content except Exception as e: self._logger.error(e.message) return status.HTTP_500_INTERNAL_SERVER_ERROR, "GET_FAILED", e.message
def get(self, request, vimid="", requri=""): self._logger.info("vimid, requri: %s, %s" % (vimid, requri)) self._logger.debug("META: %s" % request.META) try: # assume the workload_type is heat template_type = "heat" stack_id = requri cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = helper.MultiCloudIdentityHelper( settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens") if retcode > 0 or not v2_token_resp_json: logger.error("authenticate fails:%s, %s, %s" % (cloud_owner, regionid, v2_token_resp_json)) return # get stack status service_type = "orchestration" resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks" self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = helper.MultiCloudServiceHelper( cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") stacks = content.get('stacks', []) if retcode == 0 and content else [] stack_status = stacks[0]["stack_status"] if len(stacks) > 0 else "" resp_template = { "template_type": template_type, "workload_id": stack_id, "workload_status": stack_status } if retcode > 0: resp_template['workload_response'] = content if ('CREATE_COMPLETE' == stack_status): self.heatbridge_update(request, vimid, stack_id) self._logger.info("RESP with data> result:%s" % resp_template) return Response(data=resp_template, status=status.HTTP_200_OK) except VimDriverNewtonException as e: self._logger.error("Plugin exception> status:%s,error:%s" % (e.status_code, e.content)) return Response(data={'error': e.content}, status=e.status_code) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return Response(data=e.response.json(), status=e.http_status) except Exception as e: self._logger.error(traceback.format_exc()) return Response(data={'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def processBacklog_pm_vm(vesAgentConfig, vesAgentState, oneBacklog): logger.debug("vesAgentConfig:%s, vesAgentState:%s, oneBacklog: %s" % (vesAgentConfig, vesAgentState, oneBacklog)) try: # get token # resolve tenant_name to tenant_id vimid = vesAgentConfig["vimid"] cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = helper.MultiCloudIdentityHelper( settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens") if retcode > 0 or not v2_token_resp_json: logger.error("authenticate fails:%s,%s, %s" % (cloud_owner, regionid, v2_token_resp_json)) return service_type = "metering" resource_uri = oneBacklog["api_link"] template_data = '' logger.info("retrieve metering resources, URI:%s" % resource_uri) retcode, content, os_status = helper.MultiCloudServiceHelper( cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, template_data, "GET") meters = content if retcode == 0 and content else [] all_events = [] for meter in meters: encodeData = data2event_pm_vm(meter) encodeData['event']['commonEventHeader']['eventType'] = 'guestOS' encodeData['event']['commonEventHeader'][ 'reportingEntityId'] = vimid encodeData['event']['commonEventHeader'][ 'reportingEntityName'] = vimid if encodeData is not None: logger.debug("this event: %s" % encodeData) all_events.append(encodeData.get("event", None)) # report data to VES if len(all_events) > 0: ves_subscription = vesAgentConfig.get("subscription", None) publishAnyEventToVES(ves_subscription, all_events) # store the latest data into cache, never expire except Exception as e: logger.error("exception:%s" % str(e)) return logger.info("return") return
def _discover_snapshots(self, vimid="", session=None, viminfo=None): try: cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) for ss in self._get_list_resources("/snapshots/detail", "volumev3", session, viminfo, vimid, "snapshots"): snapshot_info = { 'snapshot-id': ss['id'], 'snapshot-name': ss['name'], } if ss.get('metadata'): snapshot_info['snapshot-architecture'] = ss[ 'metadata'].get('architecture') snapshot_info['application'] = ss['metadata'].get( 'architecture') snapshot_info['snapshot-os-distro'] = ss['metadata'].get( 'os-distro') snapshot_info['snapshot-os-version'] = ss['metadata'].get( 'os-version') snapshot_info['application-vendor'] = ss['metadata'].get( 'vendor') snapshot_info['application-version'] = ss['metadata'].get( 'version') snapshot_info['snapshot-selflink'] = ss['metadata'].get( 'selflink') snapshot_info['prev-snapshot-id'] = ss['metadata'].get( 'prev-snapshot-id') ret, content = self._update_resoure(cloud_owner, cloud_region_id, ss['id'], snapshot_info, "snapshot") if ret != 0: # failed to update image self._logger.debug( "failed to populate snapshot info into AAI: %s, snapshot-id: %s, ret:%s" % (vimid, snapshot_info['snapshot-id'], ret)) return (ret, "fail to populate snapshot into AAI:%s" % content) return 0, "Succeed" except VimDriverNewtonException as e: self._logger.error( "VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content)) return (e.http_status, e.content) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return (e.http_status, e.response.json()) except Exception as e: self._logger.error(traceback.format_exc()) return (11, str(e))
def _discover_pservers(self, vimid="", session=None, viminfo=None): try: cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) for hypervisor in self._get_list_resources( "/os-hypervisors/detail", "compute", session, viminfo, vimid, "hypervisors"): hypervisor_info = { 'hostname': hypervisor['hypervisor_hostname'], 'in-maint': hypervisor['state'], 'pserver-id': hypervisor.get('id'), 'ptnii-equip-name': hypervisor.get('id'), 'disk-in-gigabytes': hypervisor.get('local_gb'), 'ram-in-megabytes': hypervisor.get('memory_mb'), 'pserver-selflink': hypervisor.get('hypervisor_links'), 'ipv4-oam-address': hypervisor.get('host_ip'), } if hypervisor.get('cpu_info'): cpu_info = json.loads(hypervisor['cpu_info']) if cpu_info.get('topology'): cputopo = cpu_info.get('topology') n_cpus = cputopo['cores'] * cputopo[ 'threads'] * cputopo['sockets'] hypervisor_info['number-of-cpus'] = n_cpus ret, content = self._update_pserver(cloud_owner, cloud_region_id, hypervisor_info) if ret != 0: # failed to update image self._logger.debug( "failed to populate pserver info into AAI:" " %s, hostname: %s, ret:%s" % (vimid, hypervisor_info['hostname'], ret)) return ret, "fail to update pserver to AAI:%s" % content return 0, "succeed" except VimDriverNewtonException as e: self._logger.error( "VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content)) return (e.http_status, e.content) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return (e.http_status, e.response.json()) except Exception as e: self._logger.error(traceback.format_exc()) return (11, str(e))
def _discover_tenants(self, vimid="", session=None, viminfo=None): try: # iterate all projects and populate them into AAI cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) for tenant in self._get_list_resources("projects", "identity", session, viminfo, vimid, "projects"): tenant_info = { 'tenant-id': tenant['id'], 'tenant-name': tenant['name'], } self._update_resoure(cloud_owner, cloud_region_id, tenant['id'], tenant_info, "tenant") return 0, "succeed" except VimDriverNewtonException as e: self._logger.error( "VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content)) return (e.http_status, e.content) except HttpError as e: if e.http_status == status.HTTP_403_FORBIDDEN: ### get the tenant information from the token response try: ### get tenant info from the session tmp_auth_state = VimDriverUtils.get_auth_state(session) tmp_auth_info = json.loads(tmp_auth_state) tmp_auth_data = tmp_auth_info['body'] tenant = tmp_auth_data['token']['project'] tenant_info = { 'tenant-id': tenant['id'], 'tenant-name': tenant['name'], } self._update_resoure(cloud_owner, cloud_region_id, tenant['id'], tenant_info, "tenant") return 0, "succeed" except Exception as ex: self._logger.error(traceback.format_exc()) return (11, str(ex)) else: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return (e.http_status, e.response.json()) except Exception as e: self._logger.error(traceback.format_exc()) return (11, str(e))
def head(self, request, vimid="", servicetype="", requri=""): self._logger.info("vimid, servicetype, requri> %s,%s,%s" % (vimid, servicetype, requri)) self._logger.debug("META, data> %s , %s" % (request.META, request.data)) token = self._get_token(request) try: vim = VimDriverUtils.get_vim_info(vimid) auth_state, metadata_catalog = VimDriverUtils.get_token_cache(token) sess = VimDriverUtils.get_session(vim, auth_state=auth_state) req_resource = '' if requri and requri != '': req_resource = "/" if re.match(r'//', requri) else ''+ requri cloud_owner, regionid = extsys.decode_vim_id(vimid) interface = 'public' service = {'service_type': servicetype, 'interface': interface, 'region_name': vim['openstack_region_id'] if vim.get('openstack_region_id') else vim['cloud_region_id'] } self._logger.info("service head request with uri %s, %s" % (req_resource, service)) resp = sess.head(req_resource, endpoint_filter=service) self._logger.info("service head response status %s" % (resp.status_code)) content = resp.json() if resp.content else None self._logger.debug("service head response: %s" % (content)) return Response(headers={'X-Subject-Token': token}, data=content, status=resp.status_code) except VimDriverNewtonException as e: self._logger.error("Plugin exception> status:%s,error:%s" % (e.status_code, e.content)) return Response(data={'error': e.content}, status=e.status_code) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return Response(data=e.response.json(), status=e.http_status) except Exception as e: self._logger.error(traceback.format_exc()) return Response(data={'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def workload_detail(self, vimid, workloadid, request): ''' get workload status ''' # resp_template = { # "template_type": "HEAT", # "workload_id": workloadid, # "workload_status": "GET_FAILED", # "workload_status_reason": "Exception occurs" # } # status_code = status.HTTP_500_INTERNAL_SERVER_ERROR workload_query_str = VimDriverUtils.get_query_part(request) workload_data = request.data multicloudK8sUrl = "%s://%s:%s/api/multicloud-k8s/v1" % ( settings.MSB_SERVICE_PROTOCOL, settings.MSB_SERVICE_ADDR, settings.MSB_SERVICE_PORT) # forward infra_workload API requests with queries cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) infraUrl = multicloudK8sUrl + "/%s/%s/infra_workload" % ( cloud_owner, cloud_region_id) if workloadid: infraUrl += ("/%s" % workloadid) if workload_query_str: infraUrl += ("?%s" % workload_query_str) # should we forward headers ? TBD logger.debug("request with url,content: %s,%s" % (infraUrl, workload_data)) # resp = requests.get(infraUrl, data=json.dumps(workload_data), verify=False) resp = requests.get(infraUrl, verify=False) # resp_template["workload_status_reason"] = resp.content logger.debug("response status,content: %s,%s" % (resp.status_code, resp.content)) return Response(data=json.loads(resp.content), status=resp.status_code)
def workload_create(self, vimid, workload_data, project_idorname=None): ''' Instantiate a stack over target cloud region (OpenStack instance) The template for workload will be fetched from sdc client :param vimid: :param workload_data: :param project_idorname: tenant id or name :return: result code, status enum, status reason result code: 0-ok, otherwise error status enum: "CREATE_IN_PROGRESS", "CREATE_FAILED" status reason: message to explain the status enum ''' # step 2: normalize the input: xxx_directives data = workload_data vf_module_model_customization_id = data.get( "vf-module-model-customization-id", None) vf_module_id = data.get("vf-module-id", "") user_directive = data.get("user_directives", {}) oof_directive = data.get("oof_directives", {}) sdnc_directive = data.get("sdnc_directives", {}) template_type = data.get("template_type", None) template_data = data.get("template_data", {}) # resp_template = None if not template_type or "heat" != template_type.lower(): return status.HTTP_400_BAD_REQUEST, "CREATE_FAILED", \ "Bad parameters: template type %s is not heat" %\ template_type or "" # retrieve the template data template_data = self.openstack_template_update( template_data, vf_module_model_customization_id) # update the parameter in order of reverse precedence parameters = template_data.get("parameters", {}) parameters = self.param_update_sdnc_directives(parameters, sdnc_directive) parameters = self.param_update_oof_directives(parameters, oof_directive) parameters = self.param_update_user_directives(parameters, user_directive) template_data["parameters"] = parameters # reset to make sure "files" are empty template_data["files"] = {} template_data["stack_name"] =\ template_data.get("stack_name", vf_module_id) # authenticate cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that # the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = \ helper.MultiCloudIdentityHelper( settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens", {"Project": project_idorname} ) if retcode > 0 or not v2_token_resp_json: errmsg = "authenticate fails:%s,%s, %s" %\ (cloud_owner, regionid, v2_token_resp_json) logger.error(errmsg) return (os_status, "CREATE_FAILED", errmsg) # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] service_type = "orchestration" resource_uri = "/stacks" self._logger.info("create stack resources, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, template_data, "POST") if retcode == 0: stack1 = content.get('stack', None) # stackid = stack1["id"] if stack1 else "" return 0, "CREATE_IN_PROGRESS", stack1 else: self._logger.info("workload_create fails: %s" % content) return os_status, "CREATE_FAILED", content
def _discover_availability_zones(self, vimid="", session=None, viminfo=None): try: az_pserver_info = {} cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) for az in self._get_list_resources("/os-availability-zone/detail", "compute", session, viminfo, vimid, "availabilityZoneInfo"): az_info = { 'availability-zone-name': az['zoneName'], 'operational-status': az['zoneState']['available'] if az.get('zoneState') else '', 'hypervisor-type': '', } # filter out the default az: "internal" and "nova" azName = az.get('zoneName', None) # comment it for test the registration process only # if azName == 'nova': # continue if azName == 'internal': continue # get list of host names pservers_info = [k for (k, v) in list(az['hosts'].items())] # set the association between az and pservers az_pserver_info[azName] = pservers_info az_info['hypervisor-type'] = 'QEMU' # default for OpenStack ret, content = self._update_resoure(cloud_owner, cloud_region_id, az['zoneName'], az_info, "availability-zone") if ret != 0: # failed to update image self._logger.debug( "failed to populate az info into AAI: " "%s, az name: %s, ret:%s" % (vimid, az_info['availability-zone-name'], ret)) # return ( # ret, # "fail to popluate az info into AAI:%s" % content # ) continue # populate pservers: for hostname in pservers_info: if hostname == "": continue pservername = vimid + "_" + hostname selflink = "" # if self.proxy_prefix[3:] == "/v1": # selflink = "%s/%s/%s/compute/os-hypervisors/detail?hypervisor_hostname_pattern=%s"%\ # (self.proxy_prefix, cloud_owner, cloud_region_id , hostname) # else: # selflink = "%s/%s/compute/os-hypervisors/detail?hypervisor_hostname_pattern=%s" % \ # (self.proxy_prefix, vimid, hostname) pinfo = { "hostname": pservername, "server-selflink": selflink, "pserver-id": hostname } self._update_pserver(cloud_owner, cloud_region_id, pinfo) self._update_pserver_relation_az(cloud_owner, cloud_region_id, pinfo, azName) self._update_pserver_relation_cloudregion( cloud_owner, cloud_region_id, pinfo) return (0, az_pserver_info) except VimDriverNewtonException as e: self._logger.error("VimDriverNewtonException: status:%s," " response:%s" % (e.http_status, e.content)) return (e.http_status, e.content) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return (e.http_status, e.response.json()) except Exception as e: self._logger.error(traceback.format_exc()) return (11, str(e))
def _discover_images(self, vimid="", session=None, viminfo=None): try: cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) for image in self._get_list_resources("/v2/images", "image", session, viminfo, vimid, "images"): image_info = { 'image-id': image['id'], 'image-name': image['name'], 'image-selflink': image['self'], 'image-os-distro': image.get('os_distro') or 'Unknown', 'image-os-version': image.get('os_version') or 'Unknown', 'application': image.get('application'), 'application-vendor': image.get('application_vendor'), 'application-version': image.get('application_version'), 'image-architecture': image.get('architecture'), } ret = self._update_resoure(cloud_owner, cloud_region_id, image['id'], image_info, "image") if ret != 0: # failed to update image self._logger.debug( "failed to populate image info into AAI: %s," " image id: %s, ret:%s" % (vimid, image_info['image-id'], ret)) continue schema = image['schema'] if schema: req_resource = schema service = { 'service_type': "image", 'interface': 'public', 'region_name': viminfo['openstack_region_id'] if viminfo.get('openstack_region_id') else viminfo['cloud_region_id'] } self._logger.info("making request with URI:%s" % req_resource) resp = session.get(req_resource, endpoint_filter=service) self._logger.info("request returns with status %s" % resp.status_code) if resp.status_code == status.HTTP_200_OK: self._logger.debug("with content:%s" % resp.json()) pass content = resp.json() # if resp.status_code == status.HTTP_200_OK: # parse the schema? TBD # self.update_image(cloud_owner, cloud_region_id, image_info) #metadata_info = {} return (0, "succeed") except VimDriverNewtonException as e: self._logger.error("VimDriverNewtonException:" " status:%s, response:%s" % (e.http_status, e.content)) return (e.http_status, e.content) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return (e.http_status, e.response.json()) except Exception as e: self._logger.error(traceback.format_exc()) return (11, str(e))
def _discover_flavors(self, vimid="", session=None, viminfo=None): try: cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) for flavor in self._get_list_resources("/flavors/detail", "compute", session, viminfo, vimid, "flavors"): flavor_info = { 'flavor-id': flavor['id'], 'flavor-name': flavor['name'], 'flavor-vcpus': flavor['vcpus'], 'flavor-ram': flavor['ram'], 'flavor-disk': flavor['disk'], 'flavor-ephemeral': flavor['OS-FLV-EXT-DATA:ephemeral'], 'flavor-swap': flavor['swap'], 'flavor-is-public': flavor['os-flavor-access:is_public'], 'flavor-disabled': flavor['OS-FLV-DISABLED:disabled'], } if flavor.get('links') and len(flavor['links']) > 0: flavor_info['flavor-selflink'] =\ flavor['links'][0]['href'] or 'http://0.0.0.0' else: flavor_info['flavor-selflink'] = 'http://0.0.0.0' # add hpa capabilities hpa_capabilities = [] if (flavor['name'].find('onap.') == 0): req_resouce = "/flavors/%s/os-extra_specs" % flavor['id'] extraResp = self._get_list_resources( req_resouce, "compute", session, viminfo, vimid, "extra_specs") vimtype = viminfo['version'] hpa = hpa_discovery.HPADiscovery() for extra_spec in extraResp: data = { "flavor": flavor, "extra_specs": extraResp, "viminfo": viminfo, "vimtype": vimtype } hpa_capability = hpa.get_hpa_capabilities(data) hpa_capabilities.append(hpa_capability) logger.info("hpa_capabilities:%s" % hpa_capabilities) flavor_info['hpa-capabilities'] = \ {'hpa-capability': hpa_capabilities} retcode, content = self._update_resoure( cloud_owner, cloud_region_id, flavor['id'], flavor_info, "flavor") return (0, "succeed") except VimDriverNewtonException as e: self._logger.error( "VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content)) return (e.http_status, e.content) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return (e.http_status, e.response.json()) except Exception as e: self._logger.error(traceback.format_exc()) return (11, str(e))
def update_catalog_dnsaas(vimid, catalog, multicould_namespace, viminfo): ''' append DNSaaS delegate endpoints to catalog :param vimid: :param catalog: service catalog to be updated :param multicould_namespace: multicloud namespace prefix to replace the real one in catalog endpoints url :param viminfo: vim information :return:updated catalog ''' try: cloud_dns_delegate_info = None cloud_extra_info_str = viminfo.get('cloud_extra_info') if cloud_extra_info_str: cloud_extra_info = json.loads(cloud_extra_info_str) cloud_dns_delegate_info = cloud_extra_info.get("dns-delegate") if not cloud_dns_delegate_info\ or not cloud_dns_delegate_info.get("cloud-owner") \ or not cloud_dns_delegate_info.get("cloud-region-id"): #DNSaaS deleget was not configured yet return catalog if (multicould_namespace[-3:] == "/v0"): dns_catalog = { "name": "dns-delegate", "type": "dns", "id": str(uuid.uuid1()), "endpoints": [{ "interface": "public", "region": cloud_dns_delegate_info.get("cloud-region-id"), "region_id": cloud_dns_delegate_info.get("cloud-region-id"), "id": str(uuid.uuid1()), "url": multicould_namespace + "/%s/dns-delegate" % vimid, }] } catalog.append(dns_catalog) else: # api v1 or future cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) dns_catalog = { "name": "dns-delegate", "type": "dns", "id": str(uuid.uuid1()), "endpoints": [{ "interface": "public", "region": cloud_dns_delegate_info.get("cloud-region-id"), "region_id": cloud_dns_delegate_info.get("cloud-region-id"), "id": str(uuid.uuid1()), "url": multicould_namespace + "/%s/%s/dns-delegate" % (cloud_owner, cloud_region_id), }] } catalog.append(dns_catalog) return catalog except Exception as e: logger.error(traceback.format_exc()) return catalog
def heatbridge_update(self, request, vimid, stack_id): ''' update heat resource to AAI for the specified cloud region and tenant The resources includes: vserver, vserver/l-interface, :param request: :param vimid: :param stack_id: :return: ''' cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = \ helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens") if retcode > 0: logger.error("authenticate fails:%s, %s, %s" % (cloud_owner, regionid, v2_token_resp_json)) return None tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"] # common prefix aai_cloud_region = \ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \ % (cloud_owner, regionid, tenant_id) # get stack resource service_type = "orchestration" resource_uri = "/stacks/%s/resources" % (stack_id) self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") resources = content.get('resources', []) if retcode == 0 and content else [] # find and update resources transactions = [] for resource in resources: if resource.get('resource_status', None) != "CREATE_COMPLETE": continue if resource.get('resource_type', None) == 'OS::Nova::Server': # retrieve vserver details service_type = "compute" resource_uri = "/servers/%s" % (resource['physical_resource_id']) self._logger.info("retrieve vserver detail, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") self._logger.debug(" resp data:%s" % content) vserver_detail = content.get('server', None) if retcode == 0 and content else None if vserver_detail: # compose inventory entry for vserver vserver_link = "" for link in vserver_detail['links']: if link['rel'] == 'self': vserver_link = link['href'] break pass # note: relationship-list to flavor/image is not be update yet # note: volumes is not updated yet # note: relationship-list to vnf will be handled somewhere else aai_resource = { 'body': { 'vserver-name': vserver_detail['name'], 'vserver-name2': vserver_detail['name'], "vserver-id": vserver_detail['id'], "vserver-selflink": vserver_link, "prov-status": vserver_detail['status'] }, "uri": aai_cloud_region + "/vservers/vserver/%s" % (vserver_detail['id']) } try: # then update the resource retcode, content, status_code = \ restcall.req_to_aai(aai_resource['uri'], "PUT", content=aai_resource['body']) if retcode == 0 and content: content = json.JSONDecoder().decode(content) self._logger.debug("AAI update %s response: %s" % (aai_resource['uri'], content)) except Exception: self._logger.error(traceback.format_exc()) pass aai_resource_transactions = {"put": [aai_resource]} transactions.append(aai_resource_transactions) # self._logger.debug("aai_resource :%s" % aai_resource_transactions) pass for resource in resources: if resource.get('resource_status', None) != "CREATE_COMPLETE": continue if resource.get('resource_type', None) == 'OS::Neutron::Port': # retrieve vport details service_type = "network" resource_uri = "/v2.0/ports/%s" % (resource['physical_resource_id']) self._logger.info("retrieve vport detail, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") self._logger.debug(" resp data:%s" % content) vport_detail = content.get('port', None) if retcode == 0 and content else None if vport_detail: # compose inventory entry for vport # note: l3-interface-ipv4-address-list, # l3-interface-ipv6-address-list are not updated yet # note: network-name is not update yet since the detail # coming with network-id aai_resource = { "body": { "interface-name": vport_detail['name'], "interface-id": vport_detail['id'], "macaddr": vport_detail['mac_address'] }, 'uri': aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s" % (vport_detail['device_id'], vport_detail['name']) } try: # then update the resource retcode, content, status_code = \ restcall.req_to_aai(aai_resource['uri'], "PUT", content=aai_resource['body']) if retcode == 0 and content: content = json.JSONDecoder().decode(content) self._logger.debug("AAI update %s response: %s" % (aai_resource['uri'], content)) except Exception: self._logger.error(traceback.format_exc()) pass aai_resource_transactions = {"put": [aai_resource]} transactions.append(aai_resource_transactions) # self._logger.debug("aai_resource :%s" % aai_resource_transactions) pass aai_transactions = {"transactions": transactions} self._logger.debug("aai_transactions :%s" % aai_transactions) return aai_transactions
def heatbridge_delete(self, request, vimid, stack_id): ''' remove heat resource from AAI for the specified cloud region and tenant The resources includes: vserver, vserver/l-interface, :param request: :param vimid: :param stack_id: :param tenant_id: :return: ''' # enumerate the resources cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = \ helper.MultiCloudIdentityHelper(settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens") if retcode > 0: logger.error("authenticate fails:%s, %s, %s" % (cloud_owner, regionid, v2_token_resp_json)) return None tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"] # common prefix aai_cloud_region = \ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \ % (cloud_owner, regionid, tenant_id) # get stack resource service_type = "orchestration" resource_uri = "/stacks/%s/resources" % (stack_id) self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") resources = content.get('resources', []) \ if retcode == 0 and content else [] vserver_list = [resource['physical_resource_id'] for resource in resources if resource.get('resource_type', None) == 'OS::Nova::Server'] try: # get list of vservers vserver_list_url = aai_cloud_region + "/vservers?depth=all" retcode, content, status_code = \ restcall.req_to_aai(vserver_list_url, "GET") if retcode > 0 or not content: self._logger.debug("AAI get %s response: %s" % (vserver_list_url, content)) return None content = json.JSONDecoder().decode(content) vservers = content['vserver'] for vserver in vservers: if vserver['vserver-id'] not in vserver_list: continue try: # iterate vport, except will be raised if no l-interface exist for vport in vserver['l-interfaces']['l-interface']: # delete vport vport_delete_url = \ aai_cloud_region + \ "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \ % (vserver['vserver-id'], vport['interface-name'], vport['resource-version']) restcall.req_to_aai(vport_delete_url, "DELETE") except Exception: pass try: # delete vserver vserver_delete_url = \ aai_cloud_region + \ "/vservers/vserver/%s?resource-version=%s" \ % (vserver['vserver-id'], vserver['resource-version']) restcall.req_to_aai(vserver_delete_url, "DELETE") except Exception: continue except Exception: self._logger.error(traceback.format_exc()) return None pass
def post(self, request, vimid=""): self._logger.info("CapacityCheck--post::vimid, data> %s, %s" % (vimid, request.data)) self._logger.debug("CapacityCheck--post::META> %s" % request.META) hasEnoughResource = False try: resource_demand = request.data tenant_name = None vim = VimDriverUtils.get_vim_info(vimid) sess = VimDriverUtils.get_session(vim, tenant_name) #get token: cloud_owner, regionid = extsys.decode_vim_id(vimid) interface = 'public' service = { 'service_type': 'compute', 'interface': interface, 'region_name': vim['openstack_region_id'] if vim.get('openstack_region_id') else vim['cloud_region_id'] } #get limit for this tenant req_resouce = "/limits" resp = sess.get(req_resouce, endpoint_filter=service) content = resp.json() compute_limits = content['limits']['absolute'] #get total resource of this cloud region try: req_resouce = "/os-hypervisors/statistics" self._logger.info("check os-hypervisors statistics> URI:%s" % req_resouce) resp = sess.get(req_resouce, endpoint_filter=service) self._logger.info( "check os-hypervisors statistics> status:%s" % resp.status_code) content = resp.json() hypervisor_statistics = content['hypervisor_statistics'] self._logger.debug( "check os-hypervisors statistics> resp data:%s" % content) except HttpError as e: if e.http_status == status.HTTP_403_FORBIDDEN: # Due to non administrator account cannot get hypervisor data, # so construct enough resource data conVCPUS = int(resource_demand['vCPU']) conFreeRamMB = int(resource_demand['Memory']) conFreeDiskGB = int(resource_demand['Storage']) self._logger.info( "Non administator forbidden to access hypervisor statistics data" ) hypervisor_statistics = { 'vcpus_used': 0, 'vcpus': conVCPUS, 'free_ram_mb': conFreeRamMB, 'free_disk_gb': conFreeDiskGB } else: # non forbiden exeption will be redirected raise e #get storage limit for this tenant service['service_type'] = 'volumev2' req_resouce = "/limits" resp = sess.get(req_resouce, endpoint_filter=service) content = resp.json() storage_limits = content['limits']['absolute'] # compute actual available resource for this tenant remainVCPU = compute_limits['maxTotalCores'] - compute_limits[ 'totalCoresUsed'] remainHypervisorVCPU = hypervisor_statistics[ 'vcpus'] - hypervisor_statistics['vcpus_used'] if (remainVCPU > remainHypervisorVCPU): remainVCPU = remainHypervisorVCPU remainMEM = compute_limits['maxTotalRAMSize'] - compute_limits[ 'totalRAMUsed'] remainHypervisorMEM = hypervisor_statistics['free_ram_mb'] if remainMEM > remainHypervisorMEM: remainMEM = remainHypervisorMEM remainStorage = storage_limits[ 'maxTotalVolumeGigabytes'] - storage_limits[ 'totalGigabytesUsed'] remainHypervisorStorage = hypervisor_statistics['free_disk_gb'] if (remainStorage > remainHypervisorStorage): remainStorage = remainHypervisorStorage # compare resource demanded with available if (int(resource_demand['vCPU']) > remainVCPU): hasEnoughResource = False elif (int(resource_demand['Memory']) > remainMEM): hasEnoughResource = False elif (int(resource_demand['Storage']) > remainStorage): hasEnoughResource = False else: hasEnoughResource = True return Response(data={'result': hasEnoughResource}, status=status.HTTP_200_OK) except VimDriverNewtonException as e: return Response(data={ 'result': hasEnoughResource, 'error': e.content }, status=e.status_code) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) resp = e.response.json() resp.update({'result': hasEnoughResource}) return Response(data=e.response.json(), status=e.http_status) except Exception as e: self._logger.error(traceback.format_exc()) return Response(data={ 'result': hasEnoughResource, 'error': str(e) }, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def post(self, request, vimid=""): self._logger.info("vimid, data> %s, %s" % (vimid, request.data)) self._logger.debug("META> %s" % request.META) try: resource_demand = request.data tenant_name = None vim = VimDriverUtils.get_vim_info(vimid) sess = VimDriverUtils.get_session(vim, tenant_name) # get token: cloud_owner, regionid = extsys.decode_vim_id(vimid) interface = 'public' service = { 'service_type': 'compute', 'interface': interface, 'region_name': vim['openstack_region_id'] if vim.get('openstack_region_id') else vim['cloud_region_id'] } # get servers detail info req_resouce = "/servers/detail" self._logger.info("check servers detail> URI:%s" % req_resouce) resp = sess.get(req_resouce, endpoint_filter=service) self._logger.info("check servers detail> status:%s" % resp.status_code) content = resp.json() self._logger.debug("check servers detail> resp data:%s" % content) # extract server status info if len(content['servers']): servers = content['servers'] resp_vmstate = [] for num in range(0, len(servers)): vmstate = { 'name': servers[num]['name'], 'state': servers[num]['OS-EXT-STS:vm_state'], 'power_state': servers[num]['OS-EXT-STS:power_state'], 'launched_at': servers[num]['OS-SRV-USG:launched_at'], 'id': servers[num]['id'], 'host': servers[num]['OS-EXT-SRV-ATTR:host'], 'availability_zone': servers[num]['OS-EXT-AZ:availability_zone'], 'tenant_id': servers[num]['tenant_id'] } resp_vmstate.append(vmstate) self._logger.info("RESP with data> result:%s" % resp_vmstate) return Response(data={'result': resp_vmstate}, status=status.HTTP_200_OK) except VimDriverNewtonException as e: self._logger.error("Plugin exception> status:%s,error:%s" % (e.status_code, e.content)) return Response(data={ 'result': resp_vmstate, 'error': e.content }, status=e.status_code) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) resp = e.response.json() resp.update({'result': resp_vmstate}) return Response(data=e.response.json(), status=e.http_status) except Exception as e: self._logger.error(traceback.format_exc()) return Response(data={ 'result': resp_vmstate, 'error': str(e) }, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def workload_update(self, vimid, stack_id, otherinfo=None, project_idorname=None): ''' update heat resource to AAI for the specified cloud region and tenant The resources includes: vserver, vserver/l-interface, :param vimid: :param stack_id: id of the created stack in OpenStack instance :param stack_name: name of stack :param otherinfo: :return: result code, status enum, status reason result code: 0-ok, otherwise error status enum: "UPDATE_IN_PROGRESS", "UPDATE_FAILED" status reason: message to explain the status enum ''' cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = \ helper.MultiCloudIdentityHelper( settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens", {"Project": project_idorname}) if retcode > 0: errmsg = "authenticate fails:%s, %s, %s" %\ (cloud_owner, regionid, v2_token_resp_json) logger.error(errmsg) return os_status, "UPDATE_FAILED", errmsg tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"] # common prefix aai_cloud_region = \ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \ % (cloud_owner, regionid, tenant_id) # get stack resource service_type = "orchestration" resource_uri = "/stacks/%s/resources" % (stack_id) self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") resources = content.get('resources', []) if retcode == 0 and content else [] if retcode > 0: errmsg = "stack:%s, query fails: %s" %\ (resource_uri, content) logger.error(errmsg) return os_status, "UPDATE_FAILED", content # find and update resources # transactions = [] for resource in resources: if resource.get('resource_status', None) != "CREATE_COMPLETE": # this resource is not ready yet, just return errmsg = "stack: %s, resource not ready :%s" % \ (resource_uri, resource) logger.info(errmsg) return status.HTTP_206_PARTIAL_CONTENT, "UPDATE_FAILED", errmsg # continue if resource.get('resource_type', None) == 'OS::Nova::Server': # retrieve vserver details service_type = "compute" resource_uri = "/servers/%s" % ( resource['physical_resource_id']) self._logger.info("retrieve vserver detail, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") self._logger.debug(" resp data:%s" % content) if retcode > 0: errmsg = "stack resource:%s, query fails: %s" % \ (resource_uri, content) logger.error(errmsg) return os_status, "UPDATE_FAILED", content vserver_detail = content.get( 'server', None) if retcode == 0 and content else None if vserver_detail: # compose inventory entry for vserver vserver_link = "" for link in vserver_detail['links']: if link['rel'] == 'self': vserver_link = link['href'] break pass # note: relationship-list to flavor/image is not be update yet # note: volumes is not updated yet # note: relationship-list to vnf will be handled somewhere else aai_resource = { 'body': { 'vserver-name': vserver_detail['name'], 'vserver-name2': vserver_detail['name'], "vserver-id": vserver_detail['id'], "vserver-selflink": vserver_link, "prov-status": vserver_detail['status'] }, "uri": aai_cloud_region + "/vservers/vserver/%s" % (vserver_detail['id']) } try: # then update the resource retcode, content, status_code = \ restcall.req_to_aai(aai_resource['uri'], "PUT", content=aai_resource['body']) if retcode == 0 and content: content = json.JSONDecoder().decode(content) self._logger.debug("AAI update %s response: %s" % (aai_resource['uri'], content)) except Exception as e: self._logger.error(e.message) return status.HTTP_500_INTERNAL_SERVER_ERROR, "UPDATE_FAILED", e.message # aai_resource_transactions = {"put": [aai_resource]} # transactions.append(aai_resource_transactions) # self._logger.debug("aai_resource :%s" % aai_resource_transactions) for resource in resources: if resource.get('resource_status', None) != "CREATE_COMPLETE": continue if resource.get('resource_type', None) == 'OS::Neutron::Port': # retrieve vport details service_type = "network" resource_uri = "/v2.0/ports/%s" % ( resource['physical_resource_id']) self._logger.info("retrieve vport detail, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") self._logger.debug(" resp data:%s" % content) if retcode > 0: errmsg = "stack resource:%s, query fails: %s" % \ (resource_uri, content) logger.error(errmsg) return os_status, "UPDATE_FAILED", content vport_detail = content.get( 'port', None) if retcode == 0 and content else None if vport_detail: # compose inventory entry for vport # note: l3-interface-ipv4-address-list, # l3-interface-ipv6-address-list are not updated yet # note: network-name is not update yet since the detail # coming with network-id aai_resource = { "body": { "interface-name": vport_detail['name'], "interface-id": vport_detail['id'], "macaddr": vport_detail['mac_address'] }, 'uri': aai_cloud_region + "/vservers/vserver/%s/l-interfaces/l-interface/%s" % (vport_detail['device_id'], vport_detail['name']) } try: # then update the resource retcode, content, status_code = \ restcall.req_to_aai(aai_resource['uri'], "PUT", content=aai_resource['body']) if retcode == 0 and content: content = json.JSONDecoder().decode(content) self._logger.debug("AAI update %s response: %s" % (aai_resource['uri'], content)) except Exception as e: self._logger.error(e.message) return status.HTTP_500_INTERNAL_SERVER_ERROR, "UPDATE_FAILED", e.message # aai_resource_transactions = {"put": [aai_resource]} # transactions.append(aai_resource_transactions) # self._logger.debug("aai_resource :%s" % aai_resource_transactions) # aai_transactions = {"transactions": transactions} # self._logger.debug("aai_transactions :%s" % aai_transactions) return 0, "UPDATE_COMPLETE", "succeed"
def workload_delete(self, vimid, stack_id, otherinfo=None, project_idorname=None): ''' remove heat resource from AAI for the specified cloud region and tenant The resources includes: vserver, vserver/l-interface, :param vimid: :param stack_id: id of the created stack in OpenStack instance :param otherinfo: :return: result code, status enum, status reason result code: 0-ok, otherwise error status enum: "DELETE_IN_PROGRESS", "DELETE_FAILED" status reason: message to explain the status enum ''' # enumerate the resources cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = \ helper.MultiCloudIdentityHelper( settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens", {"Project": project_idorname}) if retcode > 0: errmsg = "authenticate fails:%s, %s, %s" %\ (cloud_owner, regionid, v2_token_resp_json) logger.error(errmsg) return os_status, "DELETE_FAILED", errmsg tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"] # common prefix aai_cloud_region = \ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \ % (cloud_owner, regionid, tenant_id) # get stack resource service_type = "orchestration" resource_uri = "/stacks/%s/resources" % (stack_id) self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") resources = content.get('resources', []) \ if retcode == 0 and content else [] vserver_list = [ resource['physical_resource_id'] for resource in resources if resource.get('resource_type', None) == 'OS::Nova::Server' ] try: # get list of vservers vserver_list_url = aai_cloud_region + "/vservers?depth=all" retcode, content, status_code = \ restcall.req_to_aai(vserver_list_url, "GET") if retcode > 0 or not content: self._logger.debug("AAI get %s response: %s" % (vserver_list_url, content)) return (status_code, "DELETE_FAILED", "authenticate fails:%s, %s, %s" % (cloud_owner, regionid, v2_token_resp_json)) content = json.JSONDecoder().decode(content) vservers = content['vserver'] for vserver in vservers: if vserver['vserver-id'] not in vserver_list: continue try: # iterate vport, except will be raised if no l-interface exist for vport in vserver['l-interfaces']['l-interface']: # delete vport vport_delete_url = \ aai_cloud_region + \ "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \ % (vserver['vserver-id'], vport['interface-name'], vport['resource-version']) restcall.req_to_aai(vport_delete_url, "DELETE") except Exception as e: # return 12, "DELETE_FAILED", e.message pass try: # delete vserver vserver_delete_url = \ aai_cloud_region + \ "/vservers/vserver/%s?resource-version=%s" \ % (vserver['vserver-id'], vserver['resource-version']) restcall.req_to_aai(vserver_delete_url, "DELETE") except Exception: continue return 0, "DELETE_COMPLETE", "succeed" except Exception as e: self._logger.error(e.message) return status.HTTP_500_INTERNAL_SERVER_ERROR, "DELETE_FAILED", e.message pass
def workload_create(self, vimid, workload_data, project_idorname=None): ''' Instantiate a stack over target cloud region (OpenStack instance) :param vimid: :param workload_data: :param project_idorname :return: result code, status enum, status reason result code: 0-ok, otherwise error status enum: "CREATE_IN_PROGRESS", "CREATE_FAILED" status reason: message to explain the status enum ''' data = workload_data oof_directive = data.get("oof_directives", {}) template_type = data.get("template_type", None) template_data = data.get("template_data", {}) # resp_template = None if not template_type or "heat" != template_type.lower(): return status.HTTP_400_BAD_REQUEST, "CREATE_FAILED", \ "Bad parameters: template type %s is not heat" %\ template_type or "" # update heat parameters from oof_directive parameters = template_data.get("parameters", {}) for directive in oof_directive.get("directives", []): if directive["type"] == "vnfc": for directive2 in directive.get("directives", []): if directive2["type"] in [ "flavor_directives", "sriovNICNetwork_directives" ]: for attr in directive2.get("attributes", []): flavor_label = attr.get("attribute_name", None) flavor_value = attr.get("attribute_value", None) if flavor_label in parameters: parameters[flavor_label] = flavor_value else: self._logger.warn( "There is no parameter exist: %s" % flavor_label) # update parameters template_data["parameters"] = parameters # reset to make sure "files" are empty template_data["files"] = {} # authenticate cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that # the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = \ helper.MultiCloudIdentityHelper( settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens", {"Project": project_idorname} ) if retcode > 0 or not v2_token_resp_json: errmsg = "authenticate fails:%s,%s, %s" %\ (cloud_owner, regionid, v2_token_resp_json) logger.error(errmsg) return (os_status, "CREATE_FAILED", errmsg) # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] service_type = "orchestration" resource_uri = "/stacks" self._logger.info("create stack resources, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, template_data, "POST") if retcode == 0: stack1 = content.get('stack', None) # stackid = stack1["id"] if stack1 else "" return 0, "CREATE_IN_PROGRESS", stack1 else: self._logger.info("workload_create fail: %s" % content) return os_status, "CREATE_FAILED", content
def delete(self, request, vimid="", requri=""): self._logger.info("vimid,requri: %s, %s" % (vimid, requri)) self._logger.debug("META: %s" % request.META) try: if requri == "": raise VimDriverNewtonException( message="workload_id is not specified", content="workload_id must be specified to delete the workload", status_code=400) # assume the workload_type is heat stack_id = requri cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that # the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = \ helper.MultiCloudIdentityHelper( settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens") if retcode > 0 or not v2_token_resp_json: logger.error("authenticate fails:%s, %s, %s" % (cloud_owner, regionid, v2_token_resp_json)) return # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] # tenant_name = v2_token_resp_json["access"]["token"]["tenant"]["name"] # get stack status service_type = "orchestration" resource_uri = "/stacks?id=%s" % stack_id if stack_id else "/stacks" self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "GET") stacks = content.get('stacks', []) \ if retcode == 0 and content else [] # assume there is at most 1 stack returned # since it was filtered by id stack1 = stacks[0] if stacks else None stack_status = "" if stack1 and 'CREATE_COMPLETE' == stack1['stack_status']: # delete the stack resource_uri = "/stacks/%s/%s" % \ (stack1['stack_name'], stack1['id']) self._logger.info("delete stack, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, None, "DELETE") # if retcode == 0: # stack_status = "DELETE_IN_PROCESS" # # and update AAI inventory by heatbridge-delete # self.heatbridge_delete(request, vimid, stack1['id']) # stub response resp_template = { "template_type": "HEAT", "workload_id": stack_id, "workload_status": stack_status } if retcode > 0: resp_template["workload_response"] = content self._logger.info("RESP with data> result:%s" % resp_template) return Response(status=os_status) except VimDriverNewtonException as e: self._logger.error("Plugin exception> status:%s,error:%s" % (e.status_code, e.content)) return Response(data={'error': e.content}, status=e.status_code) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return Response(data=e.response.json(), status=e.http_status) except Exception as e: self._logger.error(traceback.format_exc()) return Response(data={'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def _do_action(self, action, request, vim_id, servicetype, requri): tmp_auth_token = self._get_token(request) try: #special handling of compute/v2 request from APPC, temp solution for A release if servicetype == 'compute': tmp_pattern = re.compile(r'^v2/(.+)') requri = tmp_pattern.sub(r'v2.1/' + r'\1', requri) vim = VimDriverUtils.get_vim_info(vim_id) # fetch the auth_state out of cache auth_state, metadata_catalog = VimDriverUtils.get_token_cache(tmp_auth_token) req_resource, metadata_catalog = self._get_resource_and_metadata(servicetype, metadata_catalog, requri) sess = VimDriverUtils.get_session(vim, auth_state=auth_state) cloud_owner, regionid = extsys.decode_vim_id(vim_id) interface = 'public' service = { 'service_type': servicetype, 'interface': interface, 'region_name': vim['openstack_region_id'] if vim.get('openstack_region_id') else vim['cloud_region_id'] } querystr = VimDriverUtils.get_query_part(request) if querystr: req_resource += "?" + querystr self._logger.info("service " + action + " request with uri %s,%s" % (req_resource, service)) if(action == "get"): resp = sess.get(req_resource, endpoint_filter=service, headers={"Content-Type": "application/json", "Accept": "application/json"}) elif(action == "post"): resp = sess.post(req_resource, data=json.JSONEncoder().encode(request.data), endpoint_filter=service, headers={"Content-Type": "application/json", "Accept": "application/json"}) elif(action == "put"): resp = sess.put(req_resource, data=json.JSONEncoder().encode(request.data), endpoint_filter=service, headers={"Content-Type": "application/json", "Accept": "application/json"}) elif(action == "patch"): resp = sess.patch(req_resource, data=json.JSONEncoder().encode(request.data), endpoint_filter=service, headers={"Content-Type": "application/json", "Accept": "application/json"}) elif (action == "delete"): resp = sess.delete(req_resource, endpoint_filter=service, headers={"Content-Type": "application/json", "Accept": "application/json"}) content = resp.json() if resp.content else None self._logger.info("service " + action + " response status: %s" % (resp.status_code)) self._logger.debug("service " + action + " response content: %s" % (content)) if (action == "delete"): self._logger.info("RESP with status> %s" % resp.status_code) return Response(headers={'X-Subject-Token': tmp_auth_token}, status=resp.status_code) else: content = ProxyUtils.update_prefix(metadata_catalog, content) if (action == "get"): if requri == '/v3/auth/catalog' and content and content.get("catalog"): content['catalog'] = ProxyUtils.update_catalog_dnsaas( vim_id, content['catalog'], self.proxy_prefix, vim) self._logger.info("RESP with status> %s" % resp.status_code) return Response(headers={'X-Subject-Token': tmp_auth_token}, data=content, status=resp.status_code) except VimDriverNewtonException as e: self._logger.error("Plugin exception> status:%s,error:%s" % (e.status_code, e.content)) return Response(data={'error': e.content}, status=e.status_code) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return Response(data=e.response.json(), status=e.http_status) except Exception as e: self._logger.error(traceback.format_exc()) return Response(data={'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def post(self, request, vimid="", requri=""): self._logger.info("vimid: %s" % (vimid)) self._logger.info("data: %s" % (request.data)) self._logger.debug("META: %s" % request.META) try: data = request.data oof_directive = data.get("oof_directives", {}) template_type = data.get("template_type", None) template_data = data.get("template_data", {}) resp_template = None if template_type and "heat" == template_type.lower(): # update heat parameters from oof_directive parameters = template_data.get("parameters", {}) for directive in oof_directive.get("directives", []): if directive["type"] == "vnfc": for directive2 in directive.get("directives", []): if directive2["type"] in ["flavor_directives", "sriovNICNetwork_directives"]: for attr in directive2.get("attributes", []): flavor_label = attr.get("attribute_name", None) flavor_value = attr.get("attribute_value", None) if flavor_label in parameters: parameters[flavor_label] = flavor_value else: self._logger.warn( "There is no parameter exist: %s" % flavor_label) # update parameters template_data["parameters"] = parameters # reset to make sure "files" are empty template_data["files"] = {} # authenticate cloud_owner, regionid = extsys.decode_vim_id(vimid) # should go via multicloud proxy so that # the selflink is updated by multicloud retcode, v2_token_resp_json, os_status = \ helper.MultiCloudIdentityHelper( settings.MULTICLOUD_API_V1_PREFIX, cloud_owner, regionid, "/v2.0/tokens") if retcode > 0 or not v2_token_resp_json: logger.error("authenticate fails:%s,%s, %s" % (cloud_owner, regionid, v2_token_resp_json)) return # tenant_id = v2_token_resp_json["access"]["token"]["tenant"]["id"] service_type = "orchestration" resource_uri = "/stacks" self._logger.info("retrieve stack resources, URI:%s" % resource_uri) retcode, content, os_status = \ helper.MultiCloudServiceHelper(cloud_owner, regionid, v2_token_resp_json, service_type, resource_uri, template_data, "POST") stack1 = content.get('stack', None) \ if retcode == 0 and content else None resp_template = { "template_type": template_type, "workload_id": stack1["id"] if stack1 else "", "template_response": content } self._logger.info("RESP with data> result:%s" % resp_template) return Response(data=resp_template, status=os_status) else: msg = "The template type %s is not supported" % (template_type) self._logger.warn(msg) return Response(data={"error": msg}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) except VimDriverNewtonException as e: self._logger.error("Plugin exception> status:%s,error:%s" % (e.status_code, e.content)) return Response(data={'error': e.content}, status=e.status_code) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return Response(data=e.response.json(), status=e.http_status) except Exception as e: self._logger.error(traceback.format_exc()) return Response(data={'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def _do_action(self, action, request, vim_id, servicetype, requri): tmp_auth_token = self._get_token(request) try: # fetch the auth_state out of cache auth_state_str, metadata_catalog_str = VimDriverUtils.get_token_cache(tmp_auth_token) if not auth_state_str: #invalid token msg = { 'error': "request token %s is not valid" % (tmp_auth_token) } self._logger.warn("RESP with status, msg> %s , %s" % (status.HTTP_404_NOT_FOUND, msg)) return Response(data=msg, status=status.HTTP_404_NOT_FOUND) # get project name from auth_state auth_state = json.loads(auth_state_str) if not auth_state: # invalid token msg = { 'error': "request token %s is broken" % (tmp_auth_token) } self._logger.warn("RESP with status, msg> %s , %s" % (status.HTTP_404_NOT_FOUND, msg)) return Response(data=msg, status=status.HTTP_404_NOT_FOUND) tenant_name = auth_state['body']['token']['project']['name'] #tenant_id = auth_state['body']['token']['project']['id'] #find out the delegated DNSaaS provider viminfo = VimDriverUtils.get_vim_info(vim_id) if not viminfo: msg = { 'error': "vimid %s is not found" % (vim_id) } self._logger.warn("RESP with status, msg> %s , %s" % (status.HTTP_404_NOT_FOUND, msg)) return Response(data=msg, status=status.HTTP_404_NOT_FOUND) cloud_dns_delegate_info = None cloud_extra_info_str = viminfo.get('cloud_extra_info') if cloud_extra_info_str: cloud_extra_info = json.loads(cloud_extra_info_str) cloud_dns_delegate_info = cloud_extra_info.get("dns-delegate") if not cloud_dns_delegate_info \ or not cloud_dns_delegate_info.get("cloud-owner") \ or not cloud_dns_delegate_info.get("cloud-region-id"): msg = { 'error': "dns-delegate for vimid %s is not configured" % (vim_id) } self._logger.warn("RESP with status, msg> %s , %s" % (status.HTTP_404_NOT_FOUND, msg)) return Response(data=msg, status=status.HTTP_404_NOT_FOUND) vimid_delegate = cloud_dns_delegate_info.get("cloud-owner") \ + "_" \ + cloud_dns_delegate_info.get("cloud-region-id") #now forward request to delegated DNS service endpoint vim = VimDriverUtils.get_vim_info(vimid_delegate) if not vim: msg = { 'error': "delegated vimid %s is not found" % (vimid_delegate) } self._logger.warn("RESP with status, msg> %s , %s" % (status.HTTP_404_NOT_FOUND, msg)) return Response(data=msg, status=status.HTTP_404_NOT_FOUND) sess = VimDriverUtils.get_session(vim, tenant_name=tenant_name) cloud_owner, regionid = extsys.decode_vim_id(vimid_delegate) interface = 'public' service = { 'service_type': servicetype, 'interface': interface, 'region_name': vim['openstack_region_id'] if vim.get('openstack_region_id') else vim['cloud_region_id'] } req_resource = requri querystr = VimDriverUtils.get_query_part(request) if querystr: req_resource += "?" + querystr self._logger.info("service " + action + " request with uri %s" % (req_resource)) if(action == "get"): resp = sess.get(req_resource, endpoint_filter=service, headers={"Content-Type": "application/json", "Accept": "application/json"}) elif(action == "post"): resp = sess.post(req_resource, data=json.JSONEncoder().encode(request.data), endpoint_filter=service, headers={"Content-Type": "application/json", "Accept": "application/json"}) elif(action == "put"): resp = sess.put(req_resource, data=json.JSONEncoder().encode(request.data), endpoint_filter=service, headers={"Content-Type": "application/json", "Accept": "application/json"}) elif(action == "patch"): resp = sess.patch(req_resource, data=json.JSONEncoder().encode(request.data), endpoint_filter=service, headers={"Content-Type": "application/json", "Accept": "application/json"}) elif (action == "delete"): resp = sess.delete(req_resource, endpoint_filter=service, headers={"Content-Type": "application/json", "Accept": "application/json"}) self._logger.info("service " + action + " response status> %s" % (resp.status_code)) content = resp.json() if resp.content else None self._logger.debug("service " + action + " response content> %s" % (content)) self._logger.info("RESP with status> %s" % resp.status_code) if (action == "delete"): return Response(headers={'X-Subject-Token': tmp_auth_token}, status=resp.status_code) else: #content = ProxyUtils.update_dnsaas_project_id(content, tenant_id) return Response(headers={'X-Subject-Token': tmp_auth_token}, data=content, status=resp.status_code) except VimDriverNewtonException as e: self._logger.error("Plugin exception> status:%s,error:%s" % (e.status_code, e.content)) return Response(data={'error': e.content}, status=e.status_code) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return Response(data=e.response.json(), status=e.http_status) except Exception as e: self._logger.error(traceback.format_exc()) return Response(data={'error': str(e)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def registryV0(self, vimid="", project_idorname=None): ''' extend base method ''' viminfo = VimDriverUtils.get_vim_info(vimid) if not viminfo: return (10, "Cloud Region not found in AAI: %s" % vimid) # cloud_extra_info_str = viminfo.get('cloud_extra_info', {}) # cloud_extra_info = {} # try: # cloud_extra_info = json.loads(cloud_extra_info_str) # except Exception as ex: # logger.error("Can not convert cloud extra info %s %s" % ( # str(ex), cloud_extra_info_str)) # pass cloud_extra_info = viminfo.get("cloud_extra_info_json", {}) region_specified = cloud_extra_info.get("openstack-region-id", None) multi_region_discovery = cloud_extra_info.get("multi-region-discovery", None) sess = None if project_idorname: try: # check if specified with tenant id sess = VimDriverUtils.get_session(viminfo, tenant_name=None, tenant_id=project_idorname) except Exception as e: pass if not sess: try: # check if specified with tenant name sess = VimDriverUtils.get_session( viminfo, tenant_name=project_idorname, tenant_id=None) except Exception as e: pass if not sess: # set the default tenant since there is no tenant info in the VIM yet sess = VimDriverUtils.get_session(viminfo, tenant_name=viminfo.get( 'tenant', None)) # discover the regions, expect it always returns a list (even empty list) cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) # region_ids = self._discover_regions(cloud_owner, cloud_region_id, sess, viminfo) region_ids = self._discover_regions(vimid, sess, viminfo) if len(region_ids) == 0: self._logger.warn("failed to get region id") # compare the regions with region_specified and then cloud_region_id if region_specified in region_ids: pass elif cloud_region_id in region_ids: region_specified = cloud_region_id pass else: # assume the first region be the primary region # since we have no other way to determine it. region_specified = region_ids.pop(0) # update cloud region and discover/register resource if multi_region_discovery: # no input for specified cloud region, # so discover all cloud region for regionid in region_ids: # do not update the specified region here if region_specified == regionid: continue # create cloud region with composed AAI cloud_region_id # except for the one onboarded externally (e.g. ESR) gen_cloud_region_id = cloud_region_id + "_" + regionid self._logger.info("create a cloud region: %s,%s,%s" % (cloud_owner, gen_cloud_region_id, regionid)) try: self._update_cloud_region(cloud_owner, gen_cloud_region_id, regionid, viminfo, sess) except Exception as e: self._logger.debug("update cloud region fails %s" % str(e)) try: new_vimid = extsys.encode_vim_id(cloud_owner, gen_cloud_region_id) super(RegistryHelper, self).registryV0(new_vimid, project_idorname) # update k8s connectivity try: newviminfo = VimDriverUtils.get_vim_info(new_vimid) sess = VimDriverUtils.get_session( newviminfo, tenant_name=newviminfo.get('tenant', None)) self._update_k8s_info(cloud_owner, gen_cloud_region_id, newviminfo, sess) except Exception as e: self.__logger.debug( "update k8s info failes for cloud region:%s,%s, %s" % (cloud_owner, gen_cloud_region_id, str(e))) # continue the registration without reporting error except Exception as e: self._logger.debug("registryV0 fails %s" % str(e)) # update the specified region try: self._update_cloud_region(cloud_owner, cloud_region_id, region_specified, viminfo, sess) #re-fetch viminfo viminfo = VimDriverUtils.get_vim_info(vimid) except Exception as e: self._logger.debug( "update cloud region fails for cloud region: %s,%s, %s" % (cloud_owner, cloud_region_id, str(e))) # update k8s connectivity try: self._update_k8s_info(cloud_owner, cloud_region_id, viminfo, sess) except Exception as e: self.__logger.debug("update k8s info failes %s" % str(e)) # continue the registration without reporting error try: return super(RegistryHelper, self).registryV0(vimid, project_idorname) except Exception as e: errmsg = "registryV0 fails %s" % str(e) self._logger.debug(errmsg) return 11, errmsg
def _update_proxy_identity_endpoint(self, vimid): ''' update cloud_region's identity url :param cloud_owner: :param cloud_region_id: :param url: :return: ''' try: cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) if cloud_owner and cloud_region_id: resource_url = \ "/cloud-infrastructure/cloud-regions" \ "/cloud-region/%s/%s" \ % (cloud_owner, cloud_region_id) # get cloud-region retcode, content, status_code = \ restcall.req_to_aai(resource_url, "GET", nocache=True) # add resource-version to url if retcode == 0 and content: viminfo = json.JSONDecoder().decode(content) viminfo['identity-url'] =\ self.proxy_prefix + "/%s/identity/v2.0" % vimid \ if self.proxy_prefix[-3:] == "/v0" \ else self.proxy_prefix +\ "/%s/%s/identity/v2.0"\ % extsys.decode_vim_id(vimid) retcode, content, status_code = \ restcall.req_to_aai( "/cloud-infrastructure/cloud-regions" "/cloud-region/%s/%s" % (cloud_owner, cloud_region_id), "PUT", content=viminfo) self._logger.debug("update_proxy_identity_endpoint,vimid:" "%s req_to_aai: %s, return %s, %s, %s" % (vimid, viminfo['identity-url'], retcode, content, status_code)) return 0, "succeed" else: self._logger.debug( "failure: update_proxy_identity_endpoint,vimid:" "%s req_to_aai: return %s, %s, %s" % (vimid, retcode, content, status_code)) return retcode, content else: return (10, "Cloud Region not found: %s" % vimid) except VimDriverNewtonException as e: self._logger.error( "VimDriverNewtonException: status:%s, response:%s" % (e.http_status, e.content)) return (e.http_status, e.content) except HttpError as e: self._logger.error("HttpError: status:%s, response:%s" % (e.http_status, e.response.json())) return (e.http_status, e.response.json()) except Exception as e: self._logger.error(traceback.format_exc()) return (11, str(e))
def update_catalog(vimid, catalog, multicould_namespace): ''' replace the orignal endpoints with multicloud's return the catalog with updated endpoints, and also another catalog with prefix and suffix of each endpoint :param vimid: :param catalog: service catalog to be updated :param multicould_namespace: multicloud namespace prefix to replace the real one in catalog endpoints url :return:updated catalog, and metadata_catalog looks like: { 'compute': { 'prefix': 'http://ip:port', 'proxy_prefix': 'http://another_ip: another_port', 'suffix': 'v2.1/53a4ab9015c84ee892e46d294f3b8b2d', }, 'network': { 'prefix': 'http://ip:port', 'proxy_prefix': 'http://another_ip: another_port', 'suffix': '', }, } ''' metadata_catalog = {} if catalog: # filter and replace endpoints of catalogs for item in catalog: one_catalog = {} metadata_catalog[item['type']] = one_catalog endpoints = item['endpoints'] item['endpoints'] = [] for endpoint in endpoints: interface = endpoint.get('interface', None) if interface != 'public': continue # elif item["type"] == "identity": # endpoint["url"] = multicould_namespace + "/%s/identity/v3" % vimid else: # replace the endpoint with MultiCloud's proxy import re endpoint_url = endpoint["url"] real_prefix = None real_suffix = None m = re.search( r'^(http[s]?://[0-9.]+:[0-9]+)(/([0-9a-zA-Z/._-]+)$)?', endpoint_url) if not m: m = re.search( r'^(http[s]?://[0-9.]+)(/([0-9a-zA-Z/._-]+)$)?', endpoint_url) if m: real_prefix = m.group(1) real_suffix = m.group(3) if real_prefix: # populate metadata_catalog one_catalog['prefix'] = real_prefix one_catalog[ 'suffix'] = real_suffix if real_suffix else '' if (multicould_namespace[-3:] == "/v0"): one_catalog[ 'proxy_prefix'] = multicould_namespace + "/%s" % vimid endpoint_url = multicould_namespace + "/%s" % vimid else: #api v1 or future cloud_owner, cloud_region_id = extsys.decode_vim_id( vimid) one_catalog[ 'proxy_prefix'] = multicould_namespace + "/%s/%s" % ( cloud_owner, cloud_region_id) endpoint_url = multicould_namespace + "/%s/%s" % ( cloud_owner, cloud_region_id) tmp_pattern = re.compile(item["type"]) if not real_suffix or not re.match( tmp_pattern, real_suffix): one_catalog[ 'proxy_prefix'] += "/" + item["type"] endpoint_url += '/' + item["type"] if real_suffix: endpoint_url += "/" + real_suffix if item["type"] == "identity": if (multicould_namespace[-3:] == "/v0"): endpoint_url = multicould_namespace + "/%s/identity/v3" % vimid else: #api v1 or future cloud_owner, cloud_region_id = extsys.decode_vim_id( vimid) endpoint_url = multicould_namespace + "/%s/%s/identity/v3" % ( cloud_owner, cloud_region_id) else: #something wrong pass endpoint["url"] = endpoint_url item['endpoints'].append(endpoint) return catalog, metadata_catalog else: return None
def unregistryV0(self, vimid): # prepare request resource to vim instance # get token: viminfo = VimDriverUtils.get_vim_info(vimid) if not viminfo: return (10, "Cloud Region not found:" % vimid) cloud_owner, cloud_region_id = extsys.decode_vim_id(vimid) # get the resource first resource_url = ( "/cloud-infrastructure/cloud-regions/" "cloud-region/%(cloud_owner)s/%(cloud_region_id)s?depth=all" % { "cloud_owner": cloud_owner, "cloud_region_id": cloud_region_id, }) # get cloud-region retcode, content, status_code = \ restcall.req_to_aai(resource_url, "GET", nocache=True) # add resource-version cloudregiondata = {} if retcode == 0 and content: cloudregiondata = json.JSONDecoder().decode(content) else: return (10, "Cloud Region not found: %s, %s" % (cloud_owner, cloud_region_id)) # step 1. remove all tenants tenants = cloudregiondata.get("tenants", None) for tenant in tenants.get("tenant", []) if tenants else []: # common prefix aai_cloud_region = \ "/cloud-infrastructure/cloud-regions/cloud-region/%s/%s/tenants/tenant/%s" \ % (cloud_owner, cloud_region_id, tenant['tenant-id']) # remove all vservers try: # get list of vservers vservers = tenant.get('vservers', {}).get('vserver', []) for vserver in vservers: try: # iterate vport, except will be raised if no l-interface exist for vport in vserver['l-interfaces']['l-interface']: # delete vport vport_delete_url =\ aai_cloud_region + \ "/vservers/vserver/%s/l-interfaces/l-interface/%s?resource-version=%s" \ % (vserver['vserver-id'], vport['interface-name'], vport['resource-version']) restcall.req_to_aai(vport_delete_url, "DELETE") except Exception as e: pass try: # delete vserver vserver_delete_url =\ aai_cloud_region +\ "/vservers/vserver/%s?resource-version=%s" \ % (vserver['vserver-id'], vserver['resource-version']) restcall.req_to_aai(vserver_delete_url, "DELETE") except Exception as e: continue except Exception: self._logger.error(traceback.format_exc()) pass resource_url = ( "/cloud-infrastructure/cloud-regions/" "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/" "%(resource_type)ss/%(resource_type)s/%(resource_id)s/" "?resource-version=%(resource-version)s" % { "cloud_owner": cloud_owner, "cloud_region_id": cloud_region_id, "resource_type": "tenant", "resource_id": tenant["tenant-id"], "resource-version": tenant["resource-version"] }) # remove tenant retcode, content, status_code = \ restcall.req_to_aai(resource_url, "DELETE") # remove all flavors flavors = cloudregiondata.get("flavors", None) for flavor in flavors.get("flavor", []) if flavors else []: # iterate hpa-capabilities hpa_capabilities = flavor.get("hpa-capabilities", None) for hpa_capability in hpa_capabilities.get("hpa-capability", [])\ if hpa_capabilities else []: resource_url = ( "/cloud-infrastructure/cloud-regions/" "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/" "%(resource_type)ss/%(resource_type)s/%(resource_id)s/" "hpa-capabilities/hpa-capability/%(hpa-capability-id)s/" "?resource-version=%(resource-version)s" % { "cloud_owner": cloud_owner, "cloud_region_id": cloud_region_id, "resource_type": "flavor", "resource_id": flavor["flavor-id"], "hpa-capability-id": hpa_capability["hpa-capability-id"], "resource-version": hpa_capability["resource-version"] }) # remove hpa-capability retcode, content, status_code = \ restcall.req_to_aai(resource_url, "DELETE") # remove flavor resource_url = ( "/cloud-infrastructure/cloud-regions/" "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/" "%(resource_type)ss/%(resource_type)s/%(resource_id)s/" "?resource-version=%(resource-version)s" % { "cloud_owner": cloud_owner, "cloud_region_id": cloud_region_id, "resource_type": "flavor", "resource_id": flavor["flavor-id"], "resource-version": flavor["resource-version"] }) retcode, content, status_code = \ restcall.req_to_aai(resource_url, "DELETE") # remove all images images = cloudregiondata.get("images", None) for image in images.get("image", []) if images else []: resource_url = ( "/cloud-infrastructure/cloud-regions/" "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/" "%(resource_type)ss/%(resource_type)s/%(resource_id)s/" "?resource-version=%(resource-version)s" % { "cloud_owner": cloud_owner, "cloud_region_id": cloud_region_id, "resource_type": "image", "resource_id": image["image-id"], "resource-version": image["resource-version"] }) # remove image retcode, content, status_code = \ restcall.req_to_aai(resource_url, "DELETE") # remove all az azs = cloudregiondata.get("availability-zones", None) for az in azs.get("availability-zone", []) if azs else []: # delete az relationship first resource_url = ( "/cloud-infrastructure/cloud-regions/" "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/" "%(resource_type)ss/%(resource_type)s/%(resource_id)s" % { "cloud_owner": cloud_owner, "cloud_region_id": cloud_region_id, "resource_type": "availability-zone", "resource_id": az["availability-zone-name"] }) rs = az.get("relationship-list", []).get("relationship", []) for r in rs: retcode, content, status_code = \ restcall.req_to_aai( resource_url+"/relationship-list/relationship", "DELETE", content=r) # delete az resource_url2 = (resource_url + "?resource-version=%(resource-version)s" % { "resource-version": az["resource-version"] }) retcode, content, status_code = \ restcall.req_to_aai(resource_url2, "DELETE") # remove all vg # remove all snapshots snapshots = cloudregiondata.get("snapshots", None) for snapshot in snapshots.get("snapshot", []) if snapshots else []: resource_url = ( "/cloud-infrastructure/cloud-regions/" "cloud-region/%(cloud_owner)s/%(cloud_region_id)s/" "%(resource_type)ss/%(resource_type)s/%(resource_id)s/" "?resource-version=%(resource-version)s" % { "cloud_owner": cloud_owner, "cloud_region_id": cloud_region_id, "resource_type": "snapshot", "resource_id": snapshot["snapshot-id"], "resource-version": snapshot["resource-version"] }) # remove snapshot retcode, content, status_code = \ restcall.req_to_aai(resource_url, "DELETE") # remove all server groups # remove all pservers # remove cloud region itself resource_url = ( "/cloud-infrastructure/cloud-regions/" "cloud-region/%(cloud_owner)s/%(cloud_region_id)s" "?resource-version=%(resource-version)s" % { "cloud_owner": cloud_owner, "cloud_region_id": cloud_region_id, "resource-version": cloudregiondata["resource-version"] }) # remove cloud region retcode, content, status_code = \ restcall.req_to_aai(resource_url, "DELETE") return retcode, content