def request_msg(self, method, path, data=None, headers=None, **kwargs): req_headers = self._update_and_get_headers(headers, False) # set default request time out kwargs['timeout'] = kwargs.get('timeout', self.__request_time_out) rsp = self._request(method, path, data, headers=req_headers, **kwargs) if rsp.status_code in self.STATUS_NO_AUTH: LOG.info('token may expired, fetch again.') req_headers = self._update_and_get_headers(headers, True) rsp = self._request(method, path, data, headers=req_headers, **kwargs) #catch message sending exception self._raise_if_not_in_status_ok(rsp) ret_data = {'response': rsp, 'data': None} if rsp.text: try: ret_data['data'] = rsp.json() #ignore pylint:disable=W0703 except Exception as excp: LOG.warn(_('failed to loads json response data, %s'), excp) ret_data['data'] = rsp.text if kwargs.get('need_response', False): return ret_data return ret_data['data']
def ensure_network(self, network_info): """ Ensure network resource on FC :param network_info: network_info from nova, dictionary type :return: """ # NOTE: physical network only visible to admin user context = nova_ctxt.get_admin_context() network = self._get_network_from_neutron(context, network_info) LOG.debug(_('get network info from neutron: %s'), network) if network['provider:network_type'] == constant.TYPE_VXLAN: dvs_name = constant.CONF.fusioncompute.vxlan_dvs_name else: dvs_name = network['provider:physical_network'] dvs_id = self._get_dvs_id_by_physnet_name(dvs_name) if not dvs_id: raise fc_exc.DVSwitchNotFound( dvs_id=network['provider:physical_network']) pg_adpt = PortGroupQueryAdapter(network, dvs_id) pg_data = self.query_port_group(pg_adpt) if not pg_data: try: pg_adpt = PortGroupCreateAdapter(network, dvs_id) pg_data = self.create_port_group(dvs_id, pg_adpt) except Exception as e: # race condition LOG.warn(_('create pg failed (%s), will check it again'), e) pg_adpt = PortGroupQueryAdapter(network, dvs_id) pg_data = self.query_port_group(pg_adpt) return pg_data['urn'] if pg_data else None
def detach_interface(self, instance, vif): """ Send message to fusion compute virtual machine :param instance: :param vif: :return: response : {"taskUrn": string, "taskUri": string} if the nic does not exited, return {} else {"taskUrn": string, "taskUri": string} """ LOG.debug(_("trying to detach interface for vm name: %s," "vm uuid: %s, vif information is %s"), instance['name'], instance['uuid'], vif) response = {} fc_vm = FC_MGR.get_vm_by_uuid(instance) nics = fc_vm["vmConfig"]["nics"] LOG.info("nics in FusionCompute is %s", nics) nic_uri = None for nic in nics: if nic['name'] == vif['id']: nic_uri = nic['uri'] break if nic_uri: detach_interface_uri = (nic_uri.replace("nics", "virtualNics")) LOG.info("detach_interface_uri is %s", detach_interface_uri) response = self.delete(detach_interface_uri, excp=exception.InstanceInvalidState) else: LOG.warn(_("detach interface for vm name: %s, not exist nic."), instance['name']) LOG.info(_('send detach interface finished, return is: %s'), jsonutils.dumps(response)) return response
def delete_vm(self, context, instance, block_device_info=None, destroy_disks=True): """Delete VM on FC :param context: :param instance: :param block_device_info: :param destroy_disks: :return: """ # if revert resize, only stop vm. when resize operation # task state will be resize_reverting or resize_confirming if instance and (instance.get('task_state') == 'resize_reverting' or instance.get('task_state') == 'resize_confirming'): LOG.info(_('revert resize now, here only stop vm.')) try: self.stop_vm(instance) except Exception as e: LOG.warn(_('stop vm failed, trigger rollback')) raise exception.InstanceFaultRollback(inner_exception=e) return try: fc_vm = FC_MGR.get_vm_by_uuid(instance) except exception.InstanceNotFound: LOG.warn(_('instance exist no more. ignore this deleting.')) return # detach volume created by cinder if block_device_info: LOG.info(_('now will stop vm before detach cinder volumes.')) self.stop_vm(instance) for vol in block_device_info['block_device_mapping']: self.detach_volume(vol['connection_info'], instance) # if vm is in fault-resuming or unknown status, stop it before delete if fc_vm.status == constant.VM_STATUS.UNKNOWN \ or fc_vm.status == constant.VM_STATUS.FAULTRESUMING: LOG.debug( _("vm %s status is fault-resuming or unknown, " "stop it before delete."), fc_vm.uri) self.stop_vm(instance) self._delete_vm_with_fc_vm(fc_vm, destroy_disks) # update affinity group info if needed try: self._update_drs_rules(instance) self._update_affinity_groups(context, instance) #ignore pylint:disable=W0703 except Exception as excp: utils.log_exception(excp) LOG.error(_('update affinity group info failed !'))
def delete_vm(self, context, instance, block_device_info=None, destroy_disks=True): """Delete VM on FC :param context: :param instance: :param block_device_info: :param destroy_disks: :return: """ # if revert resize, only stop vm. when resize operation # task state will be resize_reverting or resize_confirming if instance and (instance.get('task_state') == 'resize_reverting' or instance.get('task_state') == 'resize_confirming'): LOG.info(_('revert resize now, here only stop vm.')) try: self.stop_vm(instance) except Exception as e: LOG.warn(_('stop vm failed, trigger rollback')) raise exception.InstanceFaultRollback(inner_exception=e) return try: fc_vm = FC_MGR.get_vm_by_uuid(instance) except exception.InstanceNotFound: LOG.warn(_('instance exist no more. ignore this deleting.')) return # detach volume created by cinder if block_device_info: LOG.info(_('now will stop vm before detach cinder volumes.')) self.stop_vm(instance) for vol in block_device_info['block_device_mapping']: self.detach_volume(vol['connection_info'], instance) # if vm is in fault-resuming or unknown status, stop it before delete if fc_vm.status == constant.VM_STATUS.UNKNOWN \ or fc_vm.status == constant.VM_STATUS.FAULTRESUMING: LOG.debug(_("vm %s status is fault-resuming or unknown, " "stop it before delete."), fc_vm.uri) self.stop_vm(instance) self._delete_vm_with_fc_vm(fc_vm, destroy_disks) # update affinity group info if needed try: self._update_drs_rules(instance) self._update_affinity_groups(context, instance) #ignore pylint:disable=W0703 except Exception as excp: utils.log_exception(excp) LOG.error(_('update affinity group info failed !'))
def audit_pg(self): context = nova_ctxt.get_admin_context() networks = self._neutron.get_all(context=context) if len(self.dvs_mapping) == 0: self._init_all_fc_dvs() pg_list = self.query_all_pg() for pg in pg_list: pg_name_ayn_list = [] try: pg_name_ayn_list = re.split('#', pg['name']) except: pass if len(pg_name_ayn_list) != 3: continue fc_network_name = pg_name_ayn_list[0] fc_network_id = pg_name_ayn_list[1] fc_dvs_id = pg_name_ayn_list[2] pg_id = self._get_pg_id_pg_date(pg) if fc_network_name is None \ or fc_network_id is None\ or fc_dvs_id is None\ or pg_id is None: continue if fc_dvs_id not in self.dvs_mapping.values(): continue pg_user = pg.get('userName') if pg_user is None: continue if pg_user != constant.CONF.fusioncompute.fc_user: continue is_need_remove = True for network in networks: if network['name'] == fc_network_name \ and network['id'] == fc_network_id: is_need_remove = False break if is_need_remove: try: self.del_port_group(fc_dvs_id, pg_id) LOG.warn('port group remove dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id) except Exception: LOG.error( 'Error happen while delete port group remove ' 'dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id) pass
def audit_pg(self): context = nova_ctxt.get_admin_context() networks = self._neutron.get_all(context=context) if len(self.dvs_mapping)==0: self._init_all_fc_dvs() pg_list = self.query_all_pg() for pg in pg_list: pg_name_ayn_list = [] try: pg_name_ayn_list = re.split('#', pg['name']) except: pass if len(pg_name_ayn_list)!=3: continue fc_network_name = pg_name_ayn_list[0] fc_network_id = pg_name_ayn_list[1] fc_dvs_id = pg_name_ayn_list[2] pg_id = self._get_pg_id_pg_date(pg) if fc_network_name is None \ or fc_network_id is None\ or fc_dvs_id is None\ or pg_id is None: continue if fc_dvs_id not in self.dvs_mapping.values(): continue pg_user = pg.get('userName') if pg_user is None: continue if pg_user != constant.CONF.fusioncompute.fc_user: continue is_need_remove = True for network in networks: if network['name'] == fc_network_name \ and network['id'] == fc_network_id: is_need_remove = False break if is_need_remove: try: self.del_port_group(fc_dvs_id, pg_id) LOG.warn('port group remove dvs_id=%s,ps_id=%s',fc_dvs_id,pg_id) except Exception: LOG.error('Error happen while delete port group remove ' 'dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id) pass
def get_cluster_stats_by_name(self, cluster_name): """Get the aggregate resource stats of a cluster.""" cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []} mem_info = {'total': 0, 'used': 0} mem_total = 0 mem_used = 0 cluster_urn = None cluster_query_info = {'name': cluster_name} cluster_query_uri = utils.build_uri_with_params(self.site.cluster_uri, cluster_query_info) clusters = self.get(cluster_query_uri)['clusters'] find_cluster = None if clusters: for cluster in clusters: if cluster['name'] == cluster_name: find_cluster = cluster if find_cluster: cluster_urn = find_cluster['urn'] hosts = self._query_host_by_scope(cluster_urn) for host in hosts: if host['status'] == 'normal' and (not host['isMaintaining']): cpu_info['vcpus'] += host['cpuQuantity'] mem_total += host['memResource']['totalSizeMB'] mem_used += host['memResource']['allocatedSizeMB'] if 'vendor' in host: cpu_info['vendor'].append(host['vendor']) if 'model' in host: cpu_info['model'].append(host['model']) mem_info['total'] = mem_total mem_info['used'] = mem_used computeresource = self._get_cluster_computeresource(find_cluster) cpuResource = computeresource["cpuResource"] cpu_info["totalSizeMHz"] = cpuResource.get("totalSizeMHz") cpu_info["allocatedSizeMHz"] = cpuResource.get("allocatedSizeMHz") cpu_info["allocatedVcpus"] = cpuResource.get("allocatedVcpus", 0) cpu_usage_monitor_period = \ constant.CONF.fusioncompute.cpu_usage_monitor_period if cpu_usage_monitor_period not in [300, 1800, 3600, 86400]: cpu_usage_monitor_period = 3600 cpu_info["usage"] = self.get_cpu_usage(cpu_usage_monitor_period, cluster_urn) data = {'cpu': cpu_info, 'mem': mem_info} return cluster_urn, data else: LOG.warn(_("get cluster status failed, use default.")) data = {'cpu': cpu_info, 'mem': mem_info} return cluster_urn, data
def get_cluster_stats_by_name(self, cluster_name): """Get the aggregate resource stats of a cluster.""" cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []} mem_info = {'total': 0, 'used': 0} mem_total = 0 mem_used = 0 cluster_urn = None cluster_query_info = {'name': cluster_name} cluster_query_uri = utils.build_uri_with_params( self.site.cluster_uri, cluster_query_info) clusters = self.get(cluster_query_uri)['clusters'] find_cluster = None if clusters: for cluster in clusters: if cluster['name'] == cluster_name: find_cluster = cluster if find_cluster: cluster_urn = find_cluster['urn'] hosts = self._query_host_by_scope(cluster_urn) for host in hosts: if host['status'] == 'normal' and (not host['isMaintaining']): cpu_info['vcpus'] += host['cpuQuantity'] mem_total += host['memResource']['totalSizeMB'] mem_used += host['memResource']['allocatedSizeMB'] if 'vendor' in host: cpu_info['vendor'].append(host['vendor']) if 'model' in host: cpu_info['model'].append(host['model']) mem_info['total'] = mem_total mem_info['used'] = mem_used computeresource = self._get_cluster_computeresource(find_cluster) cpuResource = computeresource["cpuResource"] cpu_info["totalSizeMHz"] = cpuResource.get("totalSizeMHz") cpu_info["allocatedSizeMHz"] = cpuResource.get("allocatedSizeMHz") cpu_info["allocatedVcpus"] = cpuResource.get("allocatedVcpus", 0) cpu_usage_monitor_period = \ constant.CONF.fusioncompute.cpu_usage_monitor_period if cpu_usage_monitor_period not in [300, 1800, 3600, 86400]: cpu_usage_monitor_period = 3600 cpu_info["usage"] = self.get_cpu_usage(cpu_usage_monitor_period, cluster_urn) data = {'cpu': cpu_info, 'mem': mem_info} return cluster_urn, data else: LOG.warn(_("get cluster status failed, use default.")) data = {'cpu': cpu_info, 'mem': mem_info} return cluster_urn, data
def get_fc_os_options(self, os_type, os_version): """ get fc options :param os_type: :param os_version: :return: """ try: fc_os_type = constant.HUAWEI_OS_TYPE_MAP[os_type.lower()] fc_os_version = \ constant.HUAWEI_OS_VERSION_INT[os_type][os_version.lower()] #ignore pylint:disable=W0703 except Exception as excp: LOG.warn(_("use default os type and version %s."), excp) fc_os_type, fc_os_version = constant.DEFAULT_HUAWEI_OS_CONFIG return { 'osType': fc_os_type, 'osVersion': fc_os_version }
def _raise_if_not_in_status_ok(self, rsp): """ if response is not normal,rasise exception :param rsp: :return: """ if rsp.status_code not in self.STATUS_OK: error_info = {} try: error_info = rsp.json() #ignore pylint:disable=W0703 except Exception as excp: LOG.warn('try to get error response content failed: %s', excp) LOG.error(_('FC request error: <status_code> %s <reason> ' '%s <url> %s <errorcode> %s <errorDes> %s'), rsp.status_code, rsp.reason, rsp.url, error_info.get('errorCode', 'unknown'), error_info.get('errorDes', 'unknown')) raise exception.RequestError(reason=error_info.get('errorDes'), error_code=error_info.get('errorCode') )
def _raise_if_not_in_status_ok(self, rsp): """ if response is not normal,rasise exception :param rsp: :return: """ if rsp.status_code not in self.STATUS_OK: error_info = {} try: error_info = rsp.json() #ignore pylint:disable=W0703 except Exception as excp: LOG.warn('try to get error response content failed: %s', excp) LOG.error( _('FC request error: <status_code> %s <reason> ' '%s <url> %s <errorcode> %s <errorDes> %s'), rsp.status_code, rsp.reason, rsp.url, error_info.get('errorCode', 'unknown'), error_info.get('errorDes', 'unknown')) raise exception.RequestError( reason=error_info.get('errorDes'), error_code=error_info.get('errorCode'))
def detach_interface(self, instance, vif): """ Send message to fusion compute virtual machine :param instance: :param vif: :return: response : {"taskUrn": string, "taskUri": string} if the nic does not exited, return {} else {"taskUrn": string, "taskUri": string} """ LOG.debug( _("trying to detach interface for vm name: %s," "vm uuid: %s, vif information is %s"), instance['name'], instance['uuid'], vif) response = {} fc_vm = FC_MGR.get_vm_by_uuid(instance) nics = fc_vm["vmConfig"]["nics"] LOG.info("nics in FusionCompute is %s", nics) nic_uri = None for nic in nics: if nic['name'] == vif['id']: nic_uri = nic['uri'] break if nic_uri: detach_interface_uri = (nic_uri.replace("nics", "virtualNics")) LOG.info("detach_interface_uri is %s", detach_interface_uri) response = self.delete(detach_interface_uri, excp=exception.InstanceInvalidState) else: LOG.warn(_("detach interface for vm name: %s, not exist nic."), instance['name']) LOG.info(_('send detach interface finished, return is: %s'), jsonutils.dumps(response)) return response
def create_vm(self, context, instance, network_info, block_device_info, image_meta, injected_files, admin_password, extra_specs): """ Create VM on FC :param instance: :param network_info: :param image_meta: :param injected_files: :param admin_password: :param block_device_info: :return: """ customization, filtered_injected_files = \ self._split_injected_files(injected_files) # set qos io self._volume_ops.set_qos_specs_to_volume(block_device_info) # prepare network on FC LOG.debug(_('prepare network')) vifs = [] for idx, network_item in enumerate(network_info): pg_urn = self._network_ops.ensure_network(network_item['network']) enable_dhcp = self._network_ops.\ is_enable_dhcp(context, network_item['id']) vifs.append({ 'sequence_num': idx, 'pg_urn': pg_urn, 'enable_dhcp': enable_dhcp, 'network_info': network_item }) location = self._cluster_ops.\ get_cluster_urn_by_nodename(instance['node']) # initial obj and create vm try: LOG.debug(_('begin create vm in fc.')) vm_create = vmcreation.get_vm_create(self.fc_client, self.task_ops, instance, image_meta) vm_create(context, self._volume_ops, location, vifs, block_device_info, image_meta, filtered_injected_files, admin_password, extra_specs, customization) vm_create.create_and_boot_vm() except Exception as exc: utils.log_exception(exc) msg = _("create and boot vm %s failed.") % instance['name'] self.delete_vm(context, instance, block_device_info) raise exception.InstancePowerOnFailure(msg) boot_result = {'result': False} def _wait_for_boot(): """Called at an interval until the VM is running.""" statue = FC_MGR.get_vm_by_uuid(instance).status if statue == constant.VM_STATUS.RUNNING: LOG.debug(_("vm %s create success."), instance['name']) boot_result['result'] = True raise loopingcall.LoopingCallDone() elif statue == constant.VM_STATUS.STOPPED: LOG.debug(_("create vm %s success, but start failed."), instance['name']) raise loopingcall.LoopingCallDone() else: LOG.debug(_("vm %s is still in creating state."), instance['name']) timer = loopingcall.FixedIntervalLoopingCall(_wait_for_boot) timer.start(interval=1).wait() if not boot_result['result']: self.delete_vm(context, instance, block_device_info) msg = _("create vm %s success, but start failed.") % \ instance['name'] raise exception.InstancePowerOnFailure(msg) try: urn = FC_MGR.get_vm_by_uuid(instance).urn instance.system_metadata.update({'fc_vm_id': urn.split(':')[-1]}) instance.save() except Exception as exc: utils.log_exception(exc) LOG.warn(_("update sys metadata for %s failed."), instance['name'])