def modify_drs_rules(self, cluster, rule_name, rule_type, vms): """modify_drs_rules :param cluster: :param rule_name: :param rule_type: :param vms: :return: """ rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) if rule is None: msg = (_("Can not find drs rules: name=%s,") % rule_name) raise fc_exc.AffinityGroupException(reason=msg) body = { 'drsSetting': { 'drsRules': [{ 'operationType': constant.DRS_RULES_OP_TYPE_MAP['modify'], 'ruleIndex': rule['ruleIndex'], 'ruleName': rule_name, 'ruleType': rule_type, 'vms': vms }] } } self._modify_cluster(cluster, body) LOG.debug(_("modify drs rules %s succeed"), rule_name)
def _get_fc_vm_by_uuid_and_external_uuid( self, vm_info, limit=1, offset=0, detail=2, **kwargs): """_get_fc_vm_by_uuid_and_external_uuid get fv vm info by conditions :param vm_info: :param limit: :param offset: :param detail: :param kwargs: :return:vms[0] """ # find vm by external_uuid or find vm by uuid for upgrade instances = self._query_vm( limit=limit, offset=offset, detail=detail, **kwargs) if not instances or not instances['vms']: instances_by_uuids = self._query_vm( limit=limit, offset=offset, detail=detail, uuid=vm_info) if not instances_by_uuids or not instances_by_uuids['vms']: LOG.error(_("can not find instance %s."), vm_info) raise exception.InstanceNotFound(instance_id=vm_info) return FCInstance(instances_by_uuids['vms'][0]) return FCInstance(instances['vms'][0])
def request_msg(self, method, path, data=None, headers=None, **kwargs): req_headers = self._update_and_get_headers(headers, False) # set default request time out kwargs['timeout'] = kwargs.get('timeout', self.__request_time_out) rsp = self._request(method, path, data, headers=req_headers, **kwargs) if rsp.status_code in self.STATUS_NO_AUTH: LOG.info('token may expired, fetch again.') req_headers = self._update_and_get_headers(headers, True) rsp = self._request(method, path, data, headers=req_headers, **kwargs) # catch message sending exception self._raise_if_not_in_status_ok(rsp) ret_data = {'response': rsp, 'data': None} if rsp.text: try: ret_data['data'] = rsp.json() # ignore pylint:disable=W0703 except Exception as excp: LOG.warn(_('failed to loads json response data, %s'), excp) ret_data['data'] = rsp.text if kwargs.get('need_response', False): return ret_data return ret_data['data']
def __init__(self, virtapi): LOG.info(_('begin to init FusionComputeDriver ...')) super(FusionComputeDriver, self).__init__(virtapi) self._client = FCBaseClient( constant.CONF.fusioncompute.fc_ip, constant.CONF.fusioncompute.fc_user, constant.CONF.fusioncompute.fc_pwd, constant.FC_DRIVER_JOINT_CFG['user_type'], ssl=True, port=constant.FC_DRIVER_JOINT_CFG['fc_port'], api_version=constant.FC_DRIVER_JOINT_CFG['api_version'], request_time_out=constant.FC_DRIVER_JOINT_CFG['request_time_out']) self._client.set_default_site() # task ops is need by other ops, init it first self.task_ops = taskops.TaskOperation(self._client) FC_MGR.set_client(self._client) self.network_ops = networkops.NetworkOps(self._client, self.task_ops) self.volume_ops = volumeops.VolumeOps(self._client, self.task_ops) self.cluster_ops = fc_cluster.ClusterOps(self._client, self.task_ops) self.compute_ops = computeops.ComputeOps(self._client, self.task_ops, self.network_ops, self.volume_ops, self.cluster_ops)
def inner(*args, **kwargs): """inner function :param args: the list format args of function that will be decorated :param kwargs: the dict format args of function that will be decorated :return: """ try: resp = func(*args, **kwargs) except fc_exc.RequestError as req_exc: if exc: raise exc(str(req_exc.kwargs['reason'])) raise req_exc if isinstance(resp, dict) and resp.get('taskUri'): if fixedInterval != 0: success, reason = task_ops.wait_task_done( resp['taskUri'], 3, fixedInterval) else: success, reason = task_ops.wait_task_done(resp['taskUri']) if not success: LOG.error(_('task failed: %s'), reason) if exc: raise exc(str(reason)) raise fc_exc.FusionComputeTaskException(reason=reason) return resp
def create_drs_rules(self, cluster, rule_name, rule_type): """create_drs_rules :param cluster: :param rule_name: :param rule_type: :return: """ rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) if rule: LOG.debug(_("drs rules %s already exists"), rule_name) return body = { 'drsSetting': { 'drsRules': [{ 'operationType': constant.DRS_RULES_OP_TYPE_MAP['create'], 'ruleName': rule_name, 'ruleType': rule_type }] } } self._modify_cluster(cluster, body) LOG.debug(_("create drs rules %s succeed"), rule_name)
def get_cpu_usage(self, monitor_period, cluster_urn): end_time = self.get_fc_current_time() start_time = end_time - (monitor_period * 2) body = [{ "startTime": str(start_time), "endTime": str(end_time), "interval": str(monitor_period), "metricId": "cpu_usage", "urn": cluster_urn }] LOG.debug("get_cpu_usage body:%s", json.dumps(body)) response = self.fc_client.post(self.site.metric_curvedata_uri, data=body) LOG.debug("get_cpu_usage body:%s response:%s", json.dumps(body), json.dumps(response)) if response: if len(response["items"]) > 0: metric_value = response["items"][0]["metricValue"] if len(metric_value) > 0: value = metric_value[0]["value"] if len(metric_value) is 2: if metric_value[1]["value"] is not None: value = metric_value[1]["value"] return value return None
def need_del_backup_snapshots(self, snapshot_info_list, volume_urn): """need_del_backup_snapshots :param snapshot_info_list: :param volume_urn: :return: """ def _is_vol_in_snap(snapshot_info, volume_urn): snapshot_volume_list = snapshot_info.get('volumeUriList') if isinstance(snapshot_volume_list, list) is not True: return False return volume_urn in snapshot_volume_list snapshots_with_volume = filter( lambda x: _is_vol_in_snap(x, volume_urn), snapshot_info_list) if snapshots_with_volume is None or len(snapshots_with_volume) == 0: LOG.info("can't find volume %s in snapshot %s" % (volume_urn, snapshot_info_list)) return [] for snapshot in snapshots_with_volume: type = snapshot.get('type') status = snapshot.get('status') if (type != 'backup' and type != 'CBTbackup') or status != 'ready': msg = _('snapshot is % s ') % (type) LOG.info(msg) raise fc_exc.InvalidSnapshotInfo(msg) return snapshots_with_volume
def delete_drs_rules(self, cluster, rule_name, rule_type): """delete_drs_rules :param cluster: :param rule_name: :param rule_type: :return: """ rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type) if rule is None: LOG.debug(_("drs rules %s not exists"), rule_name) return body = { 'drsSetting': { 'drsRules': [{ 'operationType': constant.DRS_RULES_OP_TYPE_MAP['delete'], 'ruleIndex': rule['ruleIndex'] }] } } self._modify_cluster(cluster, body) LOG.debug(_("delete drs rules %s succeed"), rule_name)
def attach_interface(self, instance, image_meta, vif): """attach_interface attach interface into fusion compute virtual machine, now do not consider inic network interface :param instance: :param image_meta: :param vif: :return: """ @utils.func_log_circle(instance) @utils.timelimited(constant.CONF.fusioncompute.attach_int_timeout) def attach_intf_inner(): """attach_intf_inner inner attach interface """ extra_specs = self.get_instance_extra_specs(instance) return self.compute_ops.attach_interface(instance, vif, extra_specs) try: return attach_intf_inner() except Exception as ex: LOG.error("Exception %s", ex) raise ex
def _get_volume_meta_data(self, context, volume_id): """from cinder get volume metadata :param volume_id: :return: """ LOG.debug(_('get_volume_meta_data enter, volume_id:%s.'), volume_id) return self._volume_api.get(context, volume_id)
def _def_vm_snapshot(snapshot_url): try: self.delete(snapshot_url) except Exception as e: if e.message.find('10300109') > 0: LOG.warn("snapshot %s has been deleted" % snapshot_url) pass else: msg = _('del %s snapshot error') % snapshot_url raise fc_exc.InvalidSnapshotInfo(msg)
def _reboot_vm_fc(): """_reboot_vm_fc inner reboot vm :return: """ try: self.compute_ops.reboot_vm(instance, reboot_type, block_device_info) except Exception as ex: LOG.error(_("reboot_vm exception: %s") % ex)
def list_all_clusters(self): """get all cluster info :return: """ LOG.info('list_all_clusters self.site.cluster_uri:%s .' % self.site.cluster_uri) cluster_list = self.get(self.site.cluster_uri)['clusters'] LOG.debug('clusters:%s' % cluster_list) return cluster_list
def list_instance_uuids(self): """list_instance_uuids""" try: fc_instances = self._list_instance_uuids() except Exception as ex: LOG.error(_("list_instance_uuids: %s") % ex) return [] if fc_instances is None: LOG.error(_("fc_instances is None")) return [] return [vm['uuid'] for vm in fc_instances]
def get_available_nodes(self, refresh=True): """Returns nodenames of all nodes managed by the compute service.""" LOG.debug(_("get_available_nodes")) node_list = self._get_available_nodes(refresh) # node_list is None only when exception is throwed. if node_list is None: raise nova_exc.HypervisorUnavailable(host='fc-nova-compute') else: return node_list
def ensure_volume(self, volume_info): """Ensure volume resource on FC :param volume_info: :return: """ LOG.debug('volume info is: %s', volume_info) return [{ 'urn': bdm['connection_info']['vol_urn'], 'mount_device': bdm['mount_device'] } for bdm in volume_info['block_device_mapping']]
def get_total_vm_numbers(self, **kwargs): """get_total_vm_numbers Get total numbers in fc :return: """ instances = self._query_vm(limit=1, offset=0, detail=0, **kwargs) if not instances or not instances.get('total'): return 0 total = int(instances.get('total')) LOG.info(_("total instance number is %d."), total) return total
def get_available_nodes_without_exception(self, refresh=True): """Returns nodenames of all nodes managed by the compute service.""" LOG.debug(_("get_available_nodes")) try: node_list = self._get_available_nodes(refresh) except Exception as ex: LOG.error(_("get_available_nodes: %s") % ex) return [] if node_list is None: return [] else: return node_list
def get_block_device_meta_data(self, context, block_device_info): """get volume meta data info from input info :param context: :param block_device_info: :return: """ LOG.debug('volume info is: %s', block_device_info) if len(block_device_info['block_device_mapping']) > 0: volume_info = block_device_info['block_device_mapping'][0] volume_id = volume_info['connection_info']['serial'] return self._get_volume_meta_data(context, volume_id) return None
def audit_pg(self): context = nova_ctxt.get_admin_context() networks = self._neutron.get_all(context=context) self._init_all_fc_dvs() pg_list = self.query_all_pg() for pg in pg_list: pg_name_ayn_list = [] try: pg_name_ayn_list = re.split('#', pg['name']) except Exception: pass if len(pg_name_ayn_list) < 3: continue fc_network_name = pg_name_ayn_list[0] fc_network_id = pg_name_ayn_list[1] fc_dvs_id = pg_name_ayn_list[2] pg_id = self._get_pg_id_pg_date(pg) if fc_network_name is None \ or fc_network_id is None\ or fc_dvs_id is None\ or pg_id is None: continue if fc_dvs_id not in self.dvs_mapping.values(): continue pg_user = pg.get('userName') if pg_user is None: continue if pg_user != constant.CONF.fusioncompute.fc_user: continue is_need_remove = True for network in networks: if network['name'] == fc_network_name \ and network['id'] == fc_network_id: is_need_remove = False break if is_need_remove: try: self.del_port_group(fc_dvs_id, pg_id) LOG.warn('port group remove dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id) except Exception: LOG.error( 'Error happen while delete port group remove ' 'dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id) pass
def get_instances_info(self): """get_instances_info Get all instances info from FusionCompute :return: """ LOG.debug(_("get_instances_info")) try: instances = self._get_instances_info() except Exception as ex: LOG.error(_("get_instances_info: %s") % ex) return {} if instances is None: return {} return instances
def _get_fc_vm(self, vm_info, limit=1, offset=0, detail=2, **kwargs): """get fv vm info by conditions :param vm_info: :param limit: :param offset: :param detail: :param kwargs: :return: """ instances = self._query_vm(limit=limit, offset=offset, detail=detail, **kwargs) if not instances or not instances['vms']: LOG.error(_("can not find instance %s."), vm_info) raise exception.InstanceNotFound(instance_id=vm_info) return FCInstance(instances['vms'][0])
def _create_vm(): """_create_vm inner create vm :return: """ extra_specs = self.get_instance_extra_specs(instance) LOG.debug(_("extra_specs is %s."), jsonutils.dumps(extra_specs)) vm_password = admin_password if constant.CONF.fusioncompute.use_admin_pass\ else None # create vm on FC self.compute_ops.create_vm(context, instance, network_info, block_device_info, image_meta, injected_files, vm_password, extra_specs)
def _raise_if_not_in_status_ok(self, rsp): """if response is not normal,rasise exception :param rsp: :return: """ if rsp.status_code not in self.STATUS_OK: error_info = {} try: error_info = rsp.json() # ignore pylint:disable=W0703 except Exception as excp: LOG.warn('try to get error response content failed: %s', excp) raise exception.RequestError(reason=error_info.get('errorDes'), error_code=error_info.get('errorCode') )
def init_all_cluster(self): """get all cluster info :return: """ LOG.debug('self.site.cluster_uri:%s .' % self.site.cluster_uri) cfg_cluster_list = utils.split_strip( constant.CONF.fusioncompute.clusters) cluster_list = self.get(self.site.cluster_uri)['clusters'] LOG.debug( 'clusters:%s, split:%s .' % (constant.CONF.fusioncompute.clusters, ','.join(cfg_cluster_list))) self.clusters = {} for cluster in cluster_list: if cluster['name'] in cfg_cluster_list: self.clusters[cluster['name']] = cluster
def _query_snapshot_volumes(snapshot_url): """query all volumes in snapshot and record it in list """ try: rsp = self.get(snapshot_url) except Exception as e: if e.message.find('10300109') > 0: rsp = {} else: msg = _('Query %s snapshot error') % snapshot_url raise fc_exc.InvalidSnapshotInfo(msg) volsnapshots = rsp.get('volsnapshots') if isinstance(volsnapshots, list) is False: LOG.info("snapshot not include any volume, %s" % rsp) return [] return map(lambda x: x.get('volumeUrn'), volsnapshots)
def query_volume(self, **kwargs): '''query_volume 'query_volume': ('GET', ('/volumes', kwargs. get(self.RESOURCE_URI), None, kwargs.get('id')), {'limit': kwargs.get('limit'), 'offset': kwargs.get('offset'), 'scope': kwargs.get('scope') }, {}, False), ''' LOG.debug(_("[VRM-CINDER] start query_volume()")) uri = self.site.volume_uri + '/' + kwargs.get('id') response = self.get(uri) return response
def ensure_network(self, network_info, checksum_enable=False, extra_specs=None): """Ensure network resource on FC :param network_info: network_info from nova, dictionary type :return: """ # NOTE: physical network only visible to admin user context = nova_ctxt.get_admin_context() network = self._get_network_from_neutron(context, network_info) LOG.info(_('get network info from neutron: %s'), network) network_info['checksum_enable'] = checksum_enable dvs_id = self.get_dvs_id(extra_specs, network) if not dvs_id: raise fc_exc.DVSwitchNotFound( dvs_id=network['provider:physical_network']) if checksum_enable is True: pg_adpt = PortGroupSuffixQueryAdapter(network, dvs_id, 'checksum') else: pg_adpt = PortGroupQueryAdapter(network, dvs_id) pg_data = self.query_port_group(pg_adpt) if not pg_data: try: if checksum_enable is True: pg_adpt = PortGroupSuffixCreateAdapter( network, dvs_id, 'checksum') else: pg_adpt = PortGroupCreateAdapter(network, dvs_id) pg_data = self.create_port_group(dvs_id, pg_adpt) except Exception as e: # race condition LOG.warn(_('create pg failed (%s), will check it again'), e) pg_adpt = PortGroupQueryAdapter(network, dvs_id) pg_data = self.query_port_group(pg_adpt) return pg_data['urn'] if pg_data else None
def _get_dvs_id_by_dvs_name(self, dvs_name=None): """get dvswitch id from cache according to physical network name :param dvs_name: :return: """ if dvs_name is None: return None LOG.debug(_("physnet_name is %s"), dvs_name) dvs_id = self.dvs_mapping.get(dvs_name) if not dvs_id: self._init_all_fc_dvs() else: if not self._is_dvs_in_hypervisor(dvs_id): self._init_all_fc_dvs() return self.dvs_mapping.get(dvs_name)