def create_drs_rules(self, cluster, rule_name, rule_type): """ :param cluster: :param rule_name: :param rule_type: :return: """ rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) if rule: LOG.debug(_("drs rules %s already exists"), rule_name) return body = { 'drsSetting': { 'drsRules': [{ 'operationType': constant.DRS_RULES_OP_TYPE_MAP['create'], 'ruleName': rule_name, 'ruleType': rule_type }] } } self._modify_cluster(cluster, body) LOG.debug(_("create drs rules %s succeed"), rule_name)
def inner(*args, **kwargs): """ inner function :param args: the list format args of function that will be decorated :param kwargs: the dict format args of function that will be decorated :return: """ try: resp = func(*args, **kwargs) except fc_exc.RequestError as req_exc: if exc: raise exc(str(req_exc.kwargs['reason'])) raise req_exc if isinstance(resp, dict) and resp.get('taskUri'): success, reason = task_ops.wait_task_done(resp['taskUri']) if not success: LOG.error(_('task failed: %s'), reason) if exc: raise exc(str(reason)) raise fc_exc.FusionComputeTaskException(reason=reason) return resp
def delete_drs_rules(self, cluster, rule_name, rule_type): """ :param cluster: :param rule_name: :param rule_type: :return: """ rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) if rule is None: LOG.debug(_("drs rules %s not exists"), rule_name) return body = { 'drsSetting': { 'drsRules': [{ 'operationType': constant.DRS_RULES_OP_TYPE_MAP['delete'], 'ruleIndex': rule['ruleIndex'] }] } } self._modify_cluster(cluster, body) LOG.debug(_("delete drs rules %s succeed"), rule_name)
def __init__(self, virtapi): LOG.info(_("begin to init FusionComputeDriver ...")) super(FusionComputeDriver, self).__init__(virtapi) self._client = FCBaseClient( constant.CONF.fusioncompute.fc_ip, constant.CONF.fusioncompute.fc_user, crypt.decrypt(constant.CONF.fusioncompute.fc_pwd), constant.FC_DRIVER_JOINT_CFG["user_type"], ssl=True, port=constant.FC_DRIVER_JOINT_CFG["fc_port"], api_version=constant.FC_DRIVER_JOINT_CFG["api_version"], request_time_out=constant.FC_DRIVER_JOINT_CFG["request_time_out"], ) self._client.set_default_site() # task ops is need by other ops, init it first self.task_ops = taskops.TaskOperation(self._client) FC_MGR.set_client(self._client) self.network_ops = networkops.NetworkOps(self._client, self.task_ops) self.volume_ops = volumeops.VolumeOps(self._client, self.task_ops) self.cluster_ops = fc_cluster.ClusterOps(self._client, self.task_ops) self.compute_ops = computeops.ComputeOps( self._client, self.task_ops, self.network_ops, self.volume_ops, self.cluster_ops )
def attach_volume(self, connection_info, instance, mountpoint): """ Attach volume for vm :param connection_info: :param instance: :return: """ LOG.info(_("trying to attach vol for vm: %s.") % instance['name']) # 0. set qos io self._volume_ops.set_qos_specs_to_volume(connection_info) # 1. volume can only be attached when vm is running or stopped fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status not in [constant.VM_STATUS.RUNNING, constant.VM_STATUS.STOPPED]: reason = _("vm status is not running or stopped !") raise fc_exc.InstanceAttachvolFailure(reason=reason) # 2. ignore this op when vm already has this volume vol_urn = self._get_vol_urn_from_connection(connection_info) if self._check_if_vol_in_instance(fc_vm, vol_urn) is True: LOG.info(_("vm %s already has vol %s, consider it success"), fc_vm.name, vol_urn) return # 3. attach this volume self._volume_action(self._volume_ops.attach_volume, vol_urn, fc_vm, mountpoint)
def detach_volume(self, connection_info, instance): """ Detach volume for vm :param connection_info: :param instance: :return: """ LOG.info(_("trying to detach vol for vm: %s.") % instance['name']) # 1. volume can only be detached when vm is running or stopped fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status not in [constant.VM_STATUS.RUNNING, constant.VM_STATUS.STOPPED]: reason = _("vm status is not running or stopped !") raise fc_exc.InstanceDetachvolFailure(reason=reason) # 2. ignore this op when vm do not have this volume vol_urn = self._get_vol_urn_from_connection(connection_info) if self._check_if_vol_in_instance(fc_vm, vol_urn) is False: LOG.info(_("vol %s is not in vm %s, consider it success"), vol_urn, fc_vm.name) return # 3. detach this volume self._volume_action(self._volume_ops.detach_volume, vol_urn, fc_vm)
def get_disks_info(self): """get image disk detail info""" LOG.debug(_('prepare volume')) disks_info = [] # sys vol info sys_disk_info = { 'sequenceNum': 1, 'quantityGB': self._instance['root_gb'], 'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin'] } disks_info.append(sys_disk_info) # user vol info for disk in self._volume_ops.ensure_volume(self._block_device_info): user_disk_info = { 'pciType': disk['pci'], 'volumeUrn': disk['urn'], 'sequenceNum': constant.MOUNT_DEVICE_SEQNUM_MAP[disk['mount_device']], 'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin'] } disks_info.append(user_disk_info) return disks_info
def _request(self, method, path, data=None, headers=None, **kwargs): """ send request msg :param method: :param path: :param data: :param headers: :param kwargs: :return: """ url = self._to_url(path) if not data: data = jsonutils.dumps({}) elif isinstance(data, dict) or isinstance(data, list): data = jsonutils.dumps(data) try: data_for_log = copy.deepcopy(jsonutils.loads(data)) utils.drop_password_key(data_for_log) LOG.debug(_('request: %s %s %s'), method, url, jsonutils.dumps(data_for_log)) except Exception: LOG.debug(_('request: %s %s'), method, url) rsp = requests.request(method, url, data=data, headers=headers, verify=False, **kwargs) return rsp
def modify_drs_rules(self, cluster, rule_name, rule_type, vms): """ :param cluster: :param rule_name: :param rule_type: :param vms: :return: """ rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) if rule is None: msg = _("Can not find drs rules: name=%s, type=%d") % \ (rule_name, rule_type) raise fc_exc.AffinityGroupException(reason=msg) body = { 'drsSetting':{ 'drsRules':[{ 'operationType': constant.DRS_RULES_OP_TYPE_MAP['modify'], 'ruleIndex': rule['ruleIndex'], 'ruleName': rule_name, 'ruleType': rule_type, 'vms': vms }] } } self._modify_cluster(cluster, body) LOG.debug(_("modify drs rules %s succeed"), rule_name)
def detach_interface(self, instance, vif): """ Send message to fusion compute virtual machine :param instance: :param vif: :return: response : {"taskUrn": string, "taskUri": string} if the nic does not exited, return {} else {"taskUrn": string, "taskUri": string} """ LOG.debug(_("trying to detach interface for vm name: %s," "vm uuid: %s, vif information is %s"), instance['name'], instance['uuid'], vif) response = {} fc_vm = FC_MGR.get_vm_by_uuid(instance) nics = fc_vm["vmConfig"]["nics"] LOG.info("nics in FusionCompute is %s", nics) nic_uri = None for nic in nics: if nic['name'] == vif['id']: nic_uri = nic['uri'] break if nic_uri: detach_interface_uri = (nic_uri.replace("nics", "virtualNics")) LOG.info("detach_interface_uri is %s", detach_interface_uri) response = self.delete(detach_interface_uri, excp=exception.InstanceInvalidState) else: LOG.warn(_("detach interface for vm name: %s, not exist nic."), instance['name']) LOG.info(_('send detach interface finished, return is: %s'), jsonutils.dumps(response)) return response
def finish_revert_migration(self, instance, power_on=True): """ :param instance: :param power_on: :return: """ LOG.info(_("begin finish_revert_migration ...")) # 1. get flavor info from fc fc_vm = FC_MGR.get_vm_by_uuid(instance) #ignore pylint:disable=W0612 old_flavor, new_flavor = self._get_flavor_from_group(fc_vm.group) # 2. check cpu mem changes location = self._cluster_ops.\ get_cluster_urn_by_nodename(instance['node']) data = self._generate_vm_spec_info(location=location, flavor=old_flavor) self.modify_vm(fc_vm, vm_config=data) LOG.info(_("modify cpu and mem success.")) # 5. clear vm group info self._reset_vm_group(fc_vm) # 6. power on vm if needed if power_on: self.start_vm(instance)
def modify_drs_rules(self, cluster, rule_name, rule_type, vms): """ :param cluster: :param rule_name: :param rule_type: :param vms: :return: """ rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) if rule is None: msg = _("Can not find drs rules: name=%s, type=%d") % \ (rule_name, rule_type) raise fc_exc.AffinityGroupException(reason=msg) body = { 'drsSetting': { 'drsRules': [{ 'operationType': constant.DRS_RULES_OP_TYPE_MAP['modify'], 'ruleIndex': rule['ruleIndex'], 'ruleName': rule_name, 'ruleType': rule_type, 'vms': vms }] } } self._modify_cluster(cluster, body) LOG.debug(_("modify drs rules %s succeed"), rule_name)
def _modify_boot_option_if_needed(self, instance, fc_vm): """ :param instance: OpenStack instance object :param fc_vm: FusionCompute vm object :return: """ new_boot_option = utils.get_boot_option_from_metadata( instance.get('metadata')) old_boot_option = None if 'vmConfig' in fc_vm: vm_property = fc_vm['vmConfig'].get('properties') old_boot_option = vm_property.get('bootOption') if vm_property \ else None if new_boot_option and old_boot_option and \ new_boot_option != old_boot_option: LOG.info( _("trying to modify boot option from %s to %s") % (old_boot_option, new_boot_option)) body = {'properties': {'bootOption': new_boot_option}} try: self.modify_vm(instance, vm_config=body) except Exception as msg: LOG.error(_("modify boot option has exception: %s") % msg)
def get_cpu_usage(self, monitor_period, cluster_urn): end_time = self.get_fc_current_time() start_time = end_time - (monitor_period * 2) body = [{ "startTime": str(start_time), "endTime": str(end_time), "interval": str(monitor_period), "metricId": "cpu_usage", "urn": cluster_urn }] LOG.debug("get_cpu_usage body:%s", json.dumps(body)) response = self.fc_client.post(self.site.metric_curvedata_uri, data=body) LOG.debug("get_cpu_usage body:%s response:%s", json.dumps(body), json.dumps(response)) if response: if len(response["items"]) > 0: metric_value = response["items"][0]["metricValue"] if len(metric_value) > 0: value = metric_value[0]["value"] if len(metric_value) is 2: if metric_value[1]["value"] is not None: value = metric_value[1]["value"] return value return None
def __init__(self, virtapi): LOG.info(_('begin to init FusionComputeDriver ...')) super(FusionComputeDriver, self).__init__(virtapi) self._client = FCBaseClient( constant.CONF.fusioncompute.fc_ip, constant.CONF.fusioncompute.fc_user, crypt.decrypt(constant.CONF.fusioncompute.fc_pwd), constant.FC_DRIVER_JOINT_CFG['user_type'], ssl=True, port=constant.FC_DRIVER_JOINT_CFG['fc_port'], api_version=constant.FC_DRIVER_JOINT_CFG['api_version'], request_time_out=constant.FC_DRIVER_JOINT_CFG['request_time_out']) self._client.set_default_site() # task ops is need by other ops, init it first self.task_ops = taskops.TaskOperation(self._client) FC_MGR.set_client(self._client) self.network_ops = networkops.NetworkOps(self._client, self.task_ops) self.volume_ops = volumeops.VolumeOps(self._client, self.task_ops) self.cluster_ops = fc_cluster.ClusterOps(self._client, self.task_ops) self.compute_ops = computeops.ComputeOps(self._client, self.task_ops, self.network_ops, self.volume_ops, self.cluster_ops)
def _generate_vm_spec_info(self, location=None, flavor=None): """ Generate the vm spec info for cole migration :param location: :param flavor: :return: """ data = {} if location: data['location'] = location if flavor: if flavor.get('vcpus'): data['cpu'] = {'quantity': flavor.get('vcpus')} if flavor.get('memory_mb'): data['memory'] = {'quantityMB': flavor.get('memory_mb')} cpu_qos = utils.dict_filter_and_convert(flavor, constant.CPU_QOS_FC_KEY, constant.CPU_QOS_FC_KEY) if data.get('cpu', None): data['cpu'] = utils.dict_add(data['cpu'], cpu_qos) else: data['cpu'] = cpu_qos LOG.debug(_("vm spec data: %s.") % jsonutils.dumps(data)) return data
def attach_volume(self, connection_info, instance, mountpoint): """ Attach volume for vm :param connection_info: :param instance: :return: """ LOG.info(_("trying to attach vol for vm: %s.") % instance['name']) # 0. set qos io self._volume_ops.set_qos_specs_to_volume(connection_info) # 1. volume can only be attached when vm is running or stopped fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status not in [ constant.VM_STATUS.RUNNING, constant.VM_STATUS.STOPPED ]: reason = _("vm status is not running or stopped !") raise fc_exc.InstanceAttachvolFailure(reason=reason) # 2. ignore this op when vm already has this volume vol_urn = self._get_vol_urn_from_connection(connection_info) if self._check_if_vol_in_instance(fc_vm, vol_urn) is True: LOG.info(_("vm %s already has vol %s, consider it success"), fc_vm.name, vol_urn) return # 3. attach this volume self._volume_action(self._volume_ops.attach_volume, vol_urn, fc_vm, mountpoint)
def request_msg(self, method, path, data=None, headers=None, **kwargs): req_headers = self._update_and_get_headers(headers, False) # set default request time out kwargs['timeout'] = kwargs.get('timeout', self.__request_time_out) rsp = self._request(method, path, data, headers=req_headers, **kwargs) if rsp.status_code in self.STATUS_NO_AUTH: LOG.info('token may expired, fetch again.') req_headers = self._update_and_get_headers(headers, True) rsp = self._request(method, path, data, headers=req_headers, **kwargs) #catch message sending exception self._raise_if_not_in_status_ok(rsp) ret_data = {'response': rsp, 'data': None} if rsp.text: try: ret_data['data'] = rsp.json() #ignore pylint:disable=W0703 except Exception as excp: LOG.warn(_('failed to loads json response data, %s'), excp) ret_data['data'] = rsp.text if kwargs.get('need_response', False): return ret_data return ret_data['data']
def _generate_vm_spec_info(self, location=None, flavor=None): """ Generate the vm spec info for cole migration :param location: :param flavor: :return: """ data = {} if location: data['location'] = location if flavor: if flavor.get('vcpus'): data['cpu'] = { 'quantity':flavor.get('vcpus') } if flavor.get('memory_mb'): data['memory'] = { 'quantityMB':flavor.get('memory_mb') } cpu_qos = utils.dict_filter_and_convert(flavor, constant.CPU_QOS_FC_KEY, constant.CPU_QOS_FC_KEY) if data.get('cpu', None): data['cpu'] = utils.dict_add(data['cpu'], cpu_qos) else: data['cpu'] = cpu_qos LOG.debug(_("vm spec data: %s.") % jsonutils.dumps(data)) return data
def detach_volume(self, connection_info, instance): """ Detach volume for vm :param connection_info: :param instance: :return: """ LOG.info(_("trying to detach vol for vm: %s.") % instance['name']) # 1. volume can only be detached when vm is running or stopped fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status not in [ constant.VM_STATUS.RUNNING, constant.VM_STATUS.STOPPED ]: reason = _("vm status is not running or stopped !") raise fc_exc.InstanceDetachvolFailure(reason=reason) # 2. ignore this op when vm do not have this volume vol_urn = self._get_vol_urn_from_connection(connection_info) if self._check_if_vol_in_instance(fc_vm, vol_urn) is False: LOG.info(_("vol %s is not in vm %s, consider it success"), vol_urn, fc_vm.name) return # 3. detach this volume self._volume_action(self._volume_ops.detach_volume, vol_urn, fc_vm)
def _modify_boot_option_if_needed(self, instance, fc_vm): """ :param instance: OpenStack instance object :param fc_vm: FusionCompute vm object :return: """ new_boot_option = utils.get_boot_option_from_metadata( instance.get('metadata')) old_boot_option = None if 'vmConfig' in fc_vm: vm_property = fc_vm['vmConfig'].get('properties') old_boot_option = vm_property.get('bootOption') if vm_property \ else None if new_boot_option and old_boot_option and \ new_boot_option != old_boot_option: LOG.info(_("trying to modify boot option from %s to %s") % (old_boot_option, new_boot_option)) body = { 'properties':{ 'bootOption': new_boot_option } } try: self.modify_vm(instance, vm_config=body) except Exception as msg: LOG.error(_("modify boot option has exception: %s") % msg)
def ensure_network(self, network_info): """ Ensure network resource on FC :param network_info: network_info from nova, dictionary type :return: """ # NOTE: physical network only visible to admin user context = nova_ctxt.get_admin_context() network = self._get_network_from_neutron(context, network_info) LOG.debug(_('get network info from neutron: %s'), network) if network['provider:network_type'] == constant.TYPE_VXLAN: dvs_name = constant.CONF.fusioncompute.vxlan_dvs_name else: dvs_name = network['provider:physical_network'] dvs_id = self._get_dvs_id_by_physnet_name(dvs_name) if not dvs_id: raise fc_exc.DVSwitchNotFound( dvs_id=network['provider:physical_network']) pg_adpt = PortGroupQueryAdapter(network, dvs_id) pg_data = self.query_port_group(pg_adpt) if not pg_data: try: pg_adpt = PortGroupCreateAdapter(network, dvs_id) pg_data = self.create_port_group(dvs_id, pg_adpt) except Exception as e: # race condition LOG.warn(_('create pg failed (%s), will check it again'), e) pg_adpt = PortGroupQueryAdapter(network, dvs_id) pg_data = self.query_port_group(pg_adpt) return pg_data['urn'] if pg_data else None
def create_drs_rules(self, cluster, rule_name, rule_type): """ :param cluster: :param rule_name: :param rule_type: :return: """ rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) if rule: LOG.debug(_("drs rules %s already exists"), rule_name) return body = { 'drsSetting':{ 'drsRules':[{ 'operationType': constant.DRS_RULES_OP_TYPE_MAP['create'], 'ruleName': rule_name, 'ruleType': rule_type }] } } self._modify_cluster(cluster, body) LOG.debug(_("create drs rules %s succeed"), rule_name)
def get_cpu_usage(self, monitor_period, cluster_urn): end_time = self.get_fc_current_time() start_time = end_time - (monitor_period*2) body = [ { "startTime": str(start_time), "endTime": str(end_time), "interval": str(monitor_period), "metricId": "cpu_usage", "urn": cluster_urn } ] LOG.debug("get_cpu_usage body:%s", json.dumps(body)) response = self.fc_client.post(self.site.metric_curvedata_uri, data=body) LOG.debug("get_cpu_usage body:%s response:%s", json.dumps(body), json.dumps(response)) if response: if len(response["items"]) > 0: metric_value = response["items"][0]["metricValue"] if len(metric_value) > 0: value = metric_value[0]["value"] if len(metric_value) is 2: if metric_value[1]["value"] is not None: value = metric_value[1]["value"] return value return None
def _reboot_vm(self, fc_vm, reboot_type): """reboot vm inner func""" body = {'mode': constant.FC_REBOOT_TYPE[reboot_type]} self.post(fc_vm.get_vm_action_uri('reboot'), data=body, excp=exception.InstanceRebootFailure) LOG.debug(_("_reboot_vm %s success"), fc_vm.uri)
def _set_qos_specs_to_volume(self, connection_info): """ :param connection_info :return: """ qos_para = {'maxReadBytes': 0, 'maxWriteBytes': 0, 'maxReadRequest': 0, 'maxWriteRequest': 0} key_cvt_map = {'read_bytes_sec': 'maxReadBytes', 'write_bytes_sec': 'maxWriteBytes', 'read_iops_sec': 'maxReadRequest', 'write_iops_sec': 'maxWriteRequest'} tune_opts = ['read_bytes_sec', 'write_bytes_sec', 'read_iops_sec', 'write_iops_sec'] tune_cvt_opts = ['read_bytes_sec', 'write_bytes_sec'] # Extract rate_limit control parameters if connection_info is None or 'data' not in connection_info: return specs = connection_info['data']['qos_specs'] vol_urn = connection_info.get('vol_urn') if vol_urn is None: return # because the volume can be detached and attach to another instance # qos maybe disassociated from volume type # between the up two operations # so if specs is none,set default value to FC. if specs is not None: if isinstance(specs, dict): for key, value in specs.iteritems(): if key in tune_opts: # convert byte to KB for FC,0 is no limited, # the value is at least 1 output_value = value if key in tune_cvt_opts: addition = 0 if output_value.isdigit(): if long(value) % 1024 != 0: addition = 1 output_value = long(value) / 1024 \ + addition qos_para[key_cvt_map[key]] = output_value else: LOG.debug(_('Unknown content in connection_info ' 'qos_specs: %s'), specs) return qos_specs_uri = utils.generate_uri_from_urn(vol_urn) \ + constant.VOL_URI_MAP['modio'] # Send Qos IO Specs to VRM with put method self.put(qos_specs_uri, data=qos_para, excp=fc_exc.SetQosIoFailure)
def _get_volume_meta_data(self, context, volume_id): """ from cinder get volume metadata :param volume_id: :return: """ LOG.debug(_('get_volume_meta_data enter, volume_id:%s.'), volume_id) return self._volume_api.get(context, volume_id)
def change_instance_info(self, instance): LOG.info(_("trying to change instance display_name = %s"), instance['display_name']) body = {'name': instance['display_name']} try: self.modify_vm(instance, vm_config=body) except Exception as msg: LOG.error(_("change_instance_info has exception, msg = %s") % msg)
def get_available_nodes(self, refresh=True): """Returns nodenames of all nodes managed by the compute service.""" # default is refresh to ensure it is latest if refresh: self.cluster_ops.update_resources() node_list = self.cluster_ops.resources LOG.debug(_("The available nodes are: %s") % node_list) return node_list
def check_input(self): """ check function input params :return: """ os_option = self.get_os_options() LOG.debug(_('os option: %s .'), jsonutils.dumps(os_option)) if not (os_option['osType'] and os_option['osVersion']): LOG.error('Invalid os option for vm %s!', self._instance['name']) raise fc_exc.InvalidOsOption()
def get_total_vm_numbers(self, **kwargs): """ Get total numbers in fc :return: """ instances = self._query_vm(limit=1, offset=0, detail=0, **kwargs) if not instances or not instances.get('total'): return 0 total = int(instances.get('total')) LOG.info(_("total instance number is %d."), total) return total
def change_instance_info(self, instance): LOG.info(_("trying to change instance display_name = %s"), instance['display_name']) body = {'name':instance['display_name']} try: self.modify_vm(instance,vm_config=body) except Exception as msg: LOG.error(_("change_instance_info has exception, msg = %s") % msg)
def check_input(self): """ create vm image detail check :return: """ super(VmCreateWithImage, self).check_input() disk_quantity_gb = self._instance['root_gb'] image_size = self._get_image_size() if image_size > disk_quantity_gb: LOG.error(_("image is larger than sys-vol.")) raise fc_exc.ImageTooLarge
def _delete_vm_with_fc_vm(self, fc_vm, destroy_disks=True): """ delete vm with fc instance, inner function :param fc_vm: :param destroy_disks: :return: """ reserve_disks = {'isReserveDisks': 0 if destroy_disks else 1} LOG.info(_('Deleting VM on FC, instance: %s reserve_disks %s'), fc_vm.name, jsonutils.dumps(reserve_disks)) self.delete(utils.build_uri_with_params(fc_vm.uri, reserve_disks))
def confirm_migration(self, instance): """ :param instance: :return: """ LOG.info(_("begin confirm_migration ...")) # clear vm group info fc_vm = FC_MGR.get_vm_by_uuid(instance) self._reset_vm_group(fc_vm)
def audit_pg(self): context = nova_ctxt.get_admin_context() networks = self._neutron.get_all(context=context) if len(self.dvs_mapping) == 0: self._init_all_fc_dvs() pg_list = self.query_all_pg() for pg in pg_list: pg_name_ayn_list = [] try: pg_name_ayn_list = re.split('#', pg['name']) except: pass if len(pg_name_ayn_list) != 3: continue fc_network_name = pg_name_ayn_list[0] fc_network_id = pg_name_ayn_list[1] fc_dvs_id = pg_name_ayn_list[2] pg_id = self._get_pg_id_pg_date(pg) if fc_network_name is None \ or fc_network_id is None\ or fc_dvs_id is None\ or pg_id is None: continue if fc_dvs_id not in self.dvs_mapping.values(): continue pg_user = pg.get('userName') if pg_user is None: continue if pg_user != constant.CONF.fusioncompute.fc_user: continue is_need_remove = True for network in networks: if network['name'] == fc_network_name \ and network['id'] == fc_network_id: is_need_remove = False break if is_need_remove: try: self.del_port_group(fc_dvs_id, pg_id) LOG.warn('port group remove dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id) except Exception: LOG.error( 'Error happen while delete port group remove ' 'dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id) pass
def check_input(self): super(VmCreateWithTemplate, self).check_input() properties = self._image_meta.get('properties') if properties: try: self._cloned_source_vm_or_tpl = \ self._get_vm_by_template_url( properties.get(constant.HUAWEI_IMAGE_LOCATION)) self._validate_template(self._cloned_source_vm_or_tpl) except Exception: LOG.error(_("Invalid FusionCompute template !")) raise fc_exc.InstanceCloneFailure
def get_block_device_meta_data(self, context, block_device_info): """ get volume meta data info from input info :param context: :param block_device_info: :return: """ LOG.debug('volume info is: %s', block_device_info) if len(block_device_info['block_device_mapping']) > 0: volume_info = block_device_info['block_device_mapping'][0] volume_id = volume_info['connection_info']['serial'] return self._get_volume_meta_data(context, volume_id) return None
def audit_pg(self): context = nova_ctxt.get_admin_context() networks = self._neutron.get_all(context=context) if len(self.dvs_mapping)==0: self._init_all_fc_dvs() pg_list = self.query_all_pg() for pg in pg_list: pg_name_ayn_list = [] try: pg_name_ayn_list = re.split('#', pg['name']) except: pass if len(pg_name_ayn_list)!=3: continue fc_network_name = pg_name_ayn_list[0] fc_network_id = pg_name_ayn_list[1] fc_dvs_id = pg_name_ayn_list[2] pg_id = self._get_pg_id_pg_date(pg) if fc_network_name is None \ or fc_network_id is None\ or fc_dvs_id is None\ or pg_id is None: continue if fc_dvs_id not in self.dvs_mapping.values(): continue pg_user = pg.get('userName') if pg_user is None: continue if pg_user != constant.CONF.fusioncompute.fc_user: continue is_need_remove = True for network in networks: if network['name'] == fc_network_name \ and network['id'] == fc_network_id: is_need_remove = False break if is_need_remove: try: self.del_port_group(fc_dvs_id, pg_id) LOG.warn('port group remove dvs_id=%s,ps_id=%s',fc_dvs_id,pg_id) except Exception: LOG.error('Error happen while delete port group remove ' 'dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id) pass
def get_cluster_stats_by_name(self, cluster_name): """Get the aggregate resource stats of a cluster.""" cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []} mem_info = {'total': 0, 'used': 0} mem_total = 0 mem_used = 0 cluster_urn = None cluster_query_info = {'name': cluster_name} cluster_query_uri = utils.build_uri_with_params(self.site.cluster_uri, cluster_query_info) clusters = self.get(cluster_query_uri)['clusters'] find_cluster = None if clusters: for cluster in clusters: if cluster['name'] == cluster_name: find_cluster = cluster if find_cluster: cluster_urn = find_cluster['urn'] hosts = self._query_host_by_scope(cluster_urn) for host in hosts: if host['status'] == 'normal' and (not host['isMaintaining']): cpu_info['vcpus'] += host['cpuQuantity'] mem_total += host['memResource']['totalSizeMB'] mem_used += host['memResource']['allocatedSizeMB'] if 'vendor' in host: cpu_info['vendor'].append(host['vendor']) if 'model' in host: cpu_info['model'].append(host['model']) mem_info['total'] = mem_total mem_info['used'] = mem_used computeresource = self._get_cluster_computeresource(find_cluster) cpuResource = computeresource["cpuResource"] cpu_info["totalSizeMHz"] = cpuResource.get("totalSizeMHz") cpu_info["allocatedSizeMHz"] = cpuResource.get("allocatedSizeMHz") cpu_info["allocatedVcpus"] = cpuResource.get("allocatedVcpus", 0) cpu_usage_monitor_period = \ constant.CONF.fusioncompute.cpu_usage_monitor_period if cpu_usage_monitor_period not in [300, 1800, 3600, 86400]: cpu_usage_monitor_period = 3600 cpu_info["usage"] = self.get_cpu_usage(cpu_usage_monitor_period, cluster_urn) data = {'cpu': cpu_info, 'mem': mem_info} return cluster_urn, data else: LOG.warn(_("get cluster status failed, use default.")) data = {'cpu': cpu_info, 'mem': mem_info} return cluster_urn, data
def inject_files(self): """ :return: """ fc_vm = FC_MGR.get_vm_by_uuid(self._instance) for (path, contents) in self._injected_files: body = { 'fileName': path, 'vmData': contents } self.post(fc_vm.get_vm_action_uri('set_vm_data'), data=body) LOG.debug(_('inject file %s succeed.') % path)
def _attach_user_vols(self): """ :return: """ fc_vm = FC_MGR.get_vm_by_uuid(self._instance) for disk in self._volume_ops.ensure_volume(self._block_device_info): body = { 'volUrn': disk['urn'], 'sequenceNum': constant.MOUNT_DEVICE_SEQNUM_MAP[disk['mount_device']] } LOG.debug(_("begin attach user vol: %s"), disk['urn']) self._volume_ops.attach_volume(fc_vm, vol_config=body)
def get_cluster_stats_by_name(self, cluster_name): """Get the aggregate resource stats of a cluster.""" cpu_info = {'vcpus': 0, 'cores': 0, 'vendor': [], 'model': []} mem_info = {'total': 0, 'used': 0} mem_total = 0 mem_used = 0 cluster_urn = None cluster_query_info = {'name': cluster_name} cluster_query_uri = utils.build_uri_with_params( self.site.cluster_uri, cluster_query_info) clusters = self.get(cluster_query_uri)['clusters'] find_cluster = None if clusters: for cluster in clusters: if cluster['name'] == cluster_name: find_cluster = cluster if find_cluster: cluster_urn = find_cluster['urn'] hosts = self._query_host_by_scope(cluster_urn) for host in hosts: if host['status'] == 'normal' and (not host['isMaintaining']): cpu_info['vcpus'] += host['cpuQuantity'] mem_total += host['memResource']['totalSizeMB'] mem_used += host['memResource']['allocatedSizeMB'] if 'vendor' in host: cpu_info['vendor'].append(host['vendor']) if 'model' in host: cpu_info['model'].append(host['model']) mem_info['total'] = mem_total mem_info['used'] = mem_used computeresource = self._get_cluster_computeresource(find_cluster) cpuResource = computeresource["cpuResource"] cpu_info["totalSizeMHz"] = cpuResource.get("totalSizeMHz") cpu_info["allocatedSizeMHz"] = cpuResource.get("allocatedSizeMHz") cpu_info["allocatedVcpus"] = cpuResource.get("allocatedVcpus", 0) cpu_usage_monitor_period = \ constant.CONF.fusioncompute.cpu_usage_monitor_period if cpu_usage_monitor_period not in [300, 1800, 3600, 86400]: cpu_usage_monitor_period = 3600 cpu_info["usage"] = self.get_cpu_usage(cpu_usage_monitor_period, cluster_urn) data = {'cpu': cpu_info, 'mem': mem_info} return cluster_urn, data else: LOG.warn(_("get cluster status failed, use default.")) data = {'cpu': cpu_info, 'mem': mem_info} return cluster_urn, data