def __init__(self, virtapi): LOG.info(_('begin to init FusionComputeDriver ...')) super(FusionComputeDriver, self).__init__(virtapi) self._client = FCBaseClient( constant.CONF.fusioncompute.fc_ip, constant.CONF.fusioncompute.fc_user, crypt.decrypt(constant.CONF.fusioncompute.fc_pwd), constant.FC_DRIVER_JOINT_CFG['user_type'], ssl=True, port=constant.FC_DRIVER_JOINT_CFG['fc_port'], api_version=constant.FC_DRIVER_JOINT_CFG['api_version'], request_time_out=constant.FC_DRIVER_JOINT_CFG['request_time_out']) self._client.set_default_site() # task ops is need by other ops, init it first self.task_ops = taskops.TaskOperation(self._client) FC_MGR.set_client(self._client) self.network_ops = networkops.NetworkOps(self._client, self.task_ops) self.volume_ops = volumeops.VolumeOps(self._client, self.task_ops) self.cluster_ops = fc_cluster.ClusterOps(self._client, self.task_ops) self.compute_ops = computeops.ComputeOps(self._client, self.task_ops, self.network_ops, self.volume_ops, self.cluster_ops)
def __init__(self, virtapi): LOG.info(_("begin to init FusionComputeDriver ...")) super(FusionComputeDriver, self).__init__(virtapi) self._client = FCBaseClient( constant.CONF.fusioncompute.fc_ip, constant.CONF.fusioncompute.fc_user, crypt.decrypt(constant.CONF.fusioncompute.fc_pwd), constant.FC_DRIVER_JOINT_CFG["user_type"], ssl=True, port=constant.FC_DRIVER_JOINT_CFG["fc_port"], api_version=constant.FC_DRIVER_JOINT_CFG["api_version"], request_time_out=constant.FC_DRIVER_JOINT_CFG["request_time_out"], ) self._client.set_default_site() # task ops is need by other ops, init it first self.task_ops = taskops.TaskOperation(self._client) FC_MGR.set_client(self._client) self.network_ops = networkops.NetworkOps(self._client, self.task_ops) self.volume_ops = volumeops.VolumeOps(self._client, self.task_ops) self.cluster_ops = fc_cluster.ClusterOps(self._client, self.task_ops) self.compute_ops = computeops.ComputeOps( self._client, self.task_ops, self.network_ops, self.volume_ops, self.cluster_ops )
def detach_volume(self, connection_info, instance): """ Detach volume for vm :param connection_info: :param instance: :return: """ LOG.info(_("trying to detach vol for vm: %s.") % instance['name']) # 1. volume can only be detached when vm is running or stopped fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status not in [constant.VM_STATUS.RUNNING, constant.VM_STATUS.STOPPED]: reason = _("vm status is not running or stopped !") raise fc_exc.InstanceDetachvolFailure(reason=reason) # 2. ignore this op when vm do not have this volume vol_urn = self._get_vol_urn_from_connection(connection_info) if self._check_if_vol_in_instance(fc_vm, vol_urn) is False: LOG.info(_("vol %s is not in vm %s, consider it success"), vol_urn, fc_vm.name) return # 3. detach this volume self._volume_action(self._volume_ops.detach_volume, vol_urn, fc_vm)
def get_running_vms(self, cluster_urn): """ return vm counts in this cluster :param cluster_urn: :return: """ return FC_MGR.get_total_vm_numbers(scope=cluster_urn)
def detach_volume(self, connection_info, instance): """ Detach volume for vm :param connection_info: :param instance: :return: """ LOG.info(_("trying to detach vol for vm: %s.") % instance['name']) # 1. volume can only be detached when vm is running or stopped fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status not in [ constant.VM_STATUS.RUNNING, constant.VM_STATUS.STOPPED ]: reason = _("vm status is not running or stopped !") raise fc_exc.InstanceDetachvolFailure(reason=reason) # 2. ignore this op when vm do not have this volume vol_urn = self._get_vol_urn_from_connection(connection_info) if self._check_if_vol_in_instance(fc_vm, vol_urn) is False: LOG.info(_("vol %s is not in vm %s, consider it success"), vol_urn, fc_vm.name) return # 3. detach this volume self._volume_action(self._volume_ops.detach_volume, vol_urn, fc_vm)
def finish_revert_migration(self, instance, power_on=True): """ :param instance: :param power_on: :return: """ LOG.info(_("begin finish_revert_migration ...")) # 1. get flavor info from fc fc_vm = FC_MGR.get_vm_by_uuid(instance) #ignore pylint:disable=W0612 old_flavor, new_flavor = self._get_flavor_from_group(fc_vm.group) # 2. check cpu mem changes location = self._cluster_ops.\ get_cluster_urn_by_nodename(instance['node']) data = self._generate_vm_spec_info(location=location, flavor=old_flavor) self.modify_vm(fc_vm, vm_config=data) LOG.info(_("modify cpu and mem success.")) # 5. clear vm group info self._reset_vm_group(fc_vm) # 6. power on vm if needed if power_on: self.start_vm(instance)
def attach_interface(self, instance, vif): """ Send message to fusion compute virtual machine :param instance: :param vif: :return: response : {"taskUrn": string, "taskUri": string} """ LOG.debug( _("trying to attach interface, vm name: %s," "vm uuid: %s, vif info: %s"), instance['name'], instance['uuid'], vif) pg_urn = self._network_ops.ensure_network(vif['network']) vsp_body = { 'name': vif['id'], 'portId': vif['id'], 'portGroupUrn': pg_urn, 'mac': vif['address'] } LOG.info("the vsp information is %s", vsp_body) fc_vm = FC_MGR.get_vm_by_uuid(instance) attach_interface_uri = fc_vm.get_vm_action_uri('nics') response = self.post(attach_interface_uri, data=vsp_body, excp=exception.InterfaceAttachFailed) LOG.info('send attach interface finished, return is: %s', jsonutils.dumps(response)) return response
def get_vnc_console(self, instance, get_opt): """ Get the vnc console information :param instance: the instance info :return: HuaweiConsoleVNC or ConsoleVNC """ LOG.debug(_("start to get %s vnc console"), instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) host_ip = fc_vm.vncAcessInfo.get('hostIp', None) host_port = fc_vm.vncAcessInfo.get('vncPort', None) # raise exception if no information is provided if not host_port or not host_ip: raise exception.ConsoleNotFoundForInstance\ (instance_uuid=instance['uuid']) if get_opt is False: LOG.debug( _("return vnc info is host: %s, port:%s," " internal_access_path: %s"), host_ip, host_port, 'None') return ctype.ConsoleVNC(host=host_ip, port=host_port) password = fc_vm.vncAcessInfo.get('vncPassword', None) LOG.debug( _("return get vnc info is host: %s, port:%s," " internal_access_path: %s"), host_ip, host_port, 'None') return hwtype.HuaweiConsoleVNC(host_ip, host_port, password, None)
def attach_volume(self, connection_info, instance, mountpoint): """ Attach volume for vm :param connection_info: :param instance: :return: """ LOG.info(_("trying to attach vol for vm: %s.") % instance['name']) # 0. set qos io self._volume_ops.set_qos_specs_to_volume(connection_info) # 1. volume can only be attached when vm is running or stopped fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status not in [ constant.VM_STATUS.RUNNING, constant.VM_STATUS.STOPPED ]: reason = _("vm status is not running or stopped !") raise fc_exc.InstanceAttachvolFailure(reason=reason) # 2. ignore this op when vm already has this volume vol_urn = self._get_vol_urn_from_connection(connection_info) if self._check_if_vol_in_instance(fc_vm, vol_urn) is True: LOG.info(_("vm %s already has vol %s, consider it success"), fc_vm.name, vol_urn) return # 3. attach this volume self._volume_action(self._volume_ops.attach_volume, vol_urn, fc_vm, mountpoint)
def finish_revert_migration(self, instance, power_on=True): """ :param instance: :param power_on: :return: """ LOG.info(_("begin finish_revert_migration ...")) # 1. get flavor info from fc fc_vm = FC_MGR.get_vm_by_uuid(instance) #ignore pylint:disable=W0612 old_flavor, new_flavor = self._get_flavor_from_group(fc_vm.group) # 2. check cpu mem changes location = self._cluster_ops.\ get_cluster_urn_by_nodename(instance['node']) data = self._generate_vm_spec_info(location=location, flavor=old_flavor) self.modify_vm(fc_vm, vm_config=data) LOG.info(_("modify cpu and mem success.")) # 5. clear vm group info self._reset_vm_group(fc_vm) # 6. power on vm if needed if power_on: self.start_vm(instance)
def attach_interface(self, instance, vif): """ Send message to fusion compute virtual machine :param instance: :param vif: :return: response : {"taskUrn": string, "taskUri": string} """ LOG.debug(_("trying to attach interface, vm name: %s," "vm uuid: %s, vif info: %s"), instance['name'], instance['uuid'], vif) pg_urn = self._network_ops.ensure_network(vif['network']) vsp_body = { 'name': vif['id'], 'portId': vif['id'], 'portGroupUrn': pg_urn, 'mac': vif['address'] } LOG.info("the vsp information is %s", vsp_body) fc_vm = FC_MGR.get_vm_by_uuid(instance) attach_interface_uri = fc_vm.get_vm_action_uri('nics') response = self.post(attach_interface_uri, data=vsp_body, excp=exception.InterfaceAttachFailed) LOG.info('send attach interface finished, return is: %s', jsonutils.dumps(response)) return response
def get_vnc_console(self, instance, get_opt): """ Get the vnc console information :param instance: the instance info :return: HuaweiConsoleVNC or ConsoleVNC """ LOG.debug(_("start to get %s vnc console"), instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) host_ip = fc_vm.vncAcessInfo.get('hostIp', None) host_port = fc_vm.vncAcessInfo.get('vncPort', None) # raise exception if no information is provided if not host_port or not host_ip: raise exception.ConsoleNotFoundForInstance\ (instance_uuid=instance['uuid']) if get_opt is False: LOG.debug(_("return vnc info is host: %s, port:%s," " internal_access_path: %s"), host_ip, host_port, 'None') return ctype.ConsoleVNC(host=host_ip, port=host_port) password = fc_vm.vncAcessInfo.get('vncPassword', None) LOG.debug(_("return get vnc info is host: %s, port:%s," " internal_access_path: %s"), host_ip, host_port, 'None') return hwtype.HuaweiConsoleVNC(host_ip, host_port, password, None)
def detach_interface(self, instance, vif): """ Send message to fusion compute virtual machine :param instance: :param vif: :return: response : {"taskUrn": string, "taskUri": string} if the nic does not exited, return {} else {"taskUrn": string, "taskUri": string} """ LOG.debug(_("trying to detach interface for vm name: %s," "vm uuid: %s, vif information is %s"), instance['name'], instance['uuid'], vif) response = {} fc_vm = FC_MGR.get_vm_by_uuid(instance) nics = fc_vm["vmConfig"]["nics"] LOG.info("nics in FusionCompute is %s", nics) nic_uri = None for nic in nics: if nic['name'] == vif['id']: nic_uri = nic['uri'] break if nic_uri: detach_interface_uri = (nic_uri.replace("nics", "virtualNics")) LOG.info("detach_interface_uri is %s", detach_interface_uri) response = self.delete(detach_interface_uri, excp=exception.InstanceInvalidState) else: LOG.warn(_("detach interface for vm name: %s, not exist nic."), instance['name']) LOG.info(_('send detach interface finished, return is: %s'), jsonutils.dumps(response)) return response
def get_running_vms(self, cluster_urn): """ return vm counts in this cluster :param cluster_urn: :return: """ return FC_MGR.get_total_vm_numbers(scope=cluster_urn)
def attach_volume(self, connection_info, instance, mountpoint): """ Attach volume for vm :param connection_info: :param instance: :return: """ LOG.info(_("trying to attach vol for vm: %s.") % instance['name']) # 0. set qos io self._volume_ops.set_qos_specs_to_volume(connection_info) # 1. volume can only be attached when vm is running or stopped fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status not in [constant.VM_STATUS.RUNNING, constant.VM_STATUS.STOPPED]: reason = _("vm status is not running or stopped !") raise fc_exc.InstanceAttachvolFailure(reason=reason) # 2. ignore this op when vm already has this volume vol_urn = self._get_vol_urn_from_connection(connection_info) if self._check_if_vol_in_instance(fc_vm, vol_urn) is True: LOG.info(_("vm %s already has vol %s, consider it success"), fc_vm.name, vol_urn) return # 3. attach this volume self._volume_action(self._volume_ops.attach_volume, vol_urn, fc_vm, mountpoint)
def live_migration(self, instance_ref, nodename): """Live migration of an instance to another host. :param instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param nodename: destination node name """ LOG.info(_("trying to migrate vm: %s.") % instance_ref['name']) # get destination cluster urn cluster_urn = self._cluster_ops.get_cluster_urn_by_nodename(nodename) if not cluster_urn: raise fc_exc.ClusterNotFound(cluster_name=nodename) LOG.debug(_("get cluster urn: %s."), cluster_urn) # generate migrate url and post msg to FC body = { 'location': cluster_urn } fc_vm = FC_MGR.get_vm_by_uuid(instance_ref) self.post(fc_vm.get_vm_action_uri('migrate'), data=body, excp=exception.MigrationError) LOG.info(_("migrate vm %s success" % fc_vm.name))
def start_vm(self): """ :return: """ fc_vm = FC_MGR.get_vm_by_uuid(self._instance) self.post(fc_vm.get_vm_action_uri('start'), excp=exception.InstancePowerOnFailure)
def migrate_disk_and_power_off(self, instance, flavor): """ modify the vm spec info :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param flavor: :return: """ fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status == constant.VM_STATUS.UNKNOWN \ or fc_vm.status == constant.VM_STATUS.FAULTRESUMING: LOG.debug(_("vm %s status is fault-resuming or unknown, " "can not do migrate or resize."), fc_vm.uri) raise exception.InstanceFaultRollback LOG.info(_("begin power off vm ...")) # 1.stop vm self.stop_vm(instance) # 2.save flavor and vol info in vm fc_vm = FC_MGR.get_vm_by_uuid(instance) old_flavor = self._gen_old_flavor_for_fc(fc_vm) new_flavor = self._gen_new_flavor_for_fc(flavor) flavor = { 'old_flavor': old_flavor, 'new_flavor': new_flavor } data = { 'group': '%s:%s' % (constant.VM_GROUP_FLAG, jsonutils.dumps(flavor)) } self.modify_vm(fc_vm, vm_config=data) LOG.info(_("save flavor info success.")) # 3. check cpu mem changes flavor = None if self._check_if_need_modify_vm_spec(old_flavor, new_flavor): flavor = new_flavor data = self._generate_vm_spec_info(flavor=flavor) self.modify_vm(fc_vm, vm_config=data) LOG.info(_("modify cpu and mem success."))
def modify_vm(self, instance, vm_config=None): """ Modify vm config in FC :param instance: :param vm_config: :return: """ fc_vm = FC_MGR.get_vm_by_uuid(instance) self.put(fc_vm.uri, data=vm_config, excp=fc_exc.InstanceModifyFailure)
def delete_vm(self, context, instance, block_device_info=None, destroy_disks=True): """Delete VM on FC :param context: :param instance: :param block_device_info: :param destroy_disks: :return: """ # if revert resize, only stop vm. when resize operation # task state will be resize_reverting or resize_confirming if instance and (instance.get('task_state') == 'resize_reverting' or instance.get('task_state') == 'resize_confirming'): LOG.info(_('revert resize now, here only stop vm.')) try: self.stop_vm(instance) except Exception as e: LOG.warn(_('stop vm failed, trigger rollback')) raise exception.InstanceFaultRollback(inner_exception=e) return try: fc_vm = FC_MGR.get_vm_by_uuid(instance) except exception.InstanceNotFound: LOG.warn(_('instance exist no more. ignore this deleting.')) return # detach volume created by cinder if block_device_info: LOG.info(_('now will stop vm before detach cinder volumes.')) self.stop_vm(instance) for vol in block_device_info['block_device_mapping']: self.detach_volume(vol['connection_info'], instance) # if vm is in fault-resuming or unknown status, stop it before delete if fc_vm.status == constant.VM_STATUS.UNKNOWN \ or fc_vm.status == constant.VM_STATUS.FAULTRESUMING: LOG.debug( _("vm %s status is fault-resuming or unknown, " "stop it before delete."), fc_vm.uri) self.stop_vm(instance) self._delete_vm_with_fc_vm(fc_vm, destroy_disks) # update affinity group info if needed try: self._update_drs_rules(instance) self._update_affinity_groups(context, instance) #ignore pylint:disable=W0703 except Exception as excp: utils.log_exception(excp) LOG.error(_('update affinity group info failed !'))
def modify_vm(self, instance, vm_config=None): """ Modify vm config in FC :param instance: :param vm_config: :return: """ fc_vm = FC_MGR.get_vm_by_uuid(instance) self.put(fc_vm.uri, data=vm_config, excp=fc_exc.InstanceModifyFailure)
def clone_vm(self, instance, vm_config=None): """ Clone vn in FC :param instance: :param vm_config: :return: """ fc_vm = FC_MGR.get_vm_by_uuid(instance) return self.post(fc_vm.get_vm_action_uri('clone'), data=vm_config, excp=fc_exc.InstanceCloneFailure)
def migrate_disk_and_power_off(self, instance, flavor): """ modify the vm spec info :param instance: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param flavor: :return: """ fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status == constant.VM_STATUS.UNKNOWN \ or fc_vm.status == constant.VM_STATUS.FAULTRESUMING: LOG.debug( _("vm %s status is fault-resuming or unknown, " "can not do migrate or resize."), fc_vm.uri) raise exception.InstanceFaultRollback LOG.info(_("begin power off vm ...")) # 1.stop vm self.stop_vm(instance) # 2.save flavor and vol info in vm fc_vm = FC_MGR.get_vm_by_uuid(instance) old_flavor = self._gen_old_flavor_for_fc(fc_vm) new_flavor = self._gen_new_flavor_for_fc(flavor) flavor = {'old_flavor': old_flavor, 'new_flavor': new_flavor} data = { 'group': '%s:%s' % (constant.VM_GROUP_FLAG, jsonutils.dumps(flavor)) } self.modify_vm(fc_vm, vm_config=data) LOG.info(_("save flavor info success.")) # 3. check cpu mem changes flavor = None if self._check_if_need_modify_vm_spec(old_flavor, new_flavor): flavor = new_flavor data = self._generate_vm_spec_info(flavor=flavor) self.modify_vm(fc_vm, vm_config=data) LOG.info(_("modify cpu and mem success."))
def get_info(self, instance): """ Get vm info from instance :param instance: :return: """ fc_vm = FC_MGR.get_vm_state(instance) state = constant.VM_POWER_STATE_MAPPING.get(fc_vm.status, power_state.NOSTATE) return {'state': state}
def get_info(self, instance): """ Get vm info from instance :param instance: :return: """ fc_vm = FC_MGR.get_vm_state(instance) state = constant.VM_POWER_STATE_MAPPING.get(fc_vm.status, power_state.NOSTATE) return {'state': state}
def clone_vm(self, instance, vm_config=None): """ Clone vn in FC :param instance: :param vm_config: :return: """ fc_vm = FC_MGR.get_vm_by_uuid(instance) return self.post(fc_vm.get_vm_action_uri('clone'), data=vm_config, excp=fc_exc.InstanceCloneFailure)
def confirm_migration(self, instance): """ :param instance: :return: """ LOG.info(_("begin confirm_migration ...")) # clear vm group info fc_vm = FC_MGR.get_vm_by_uuid(instance) self._reset_vm_group(fc_vm)
def confirm_migration(self, instance): """ :param instance: :return: """ LOG.info(_("begin confirm_migration ...")) # clear vm group info fc_vm = FC_MGR.get_vm_by_uuid(instance) self._reset_vm_group(fc_vm)
def delete_vm(self, context, instance, block_device_info=None, destroy_disks=True): """Delete VM on FC :param context: :param instance: :param block_device_info: :param destroy_disks: :return: """ # if revert resize, only stop vm. when resize operation # task state will be resize_reverting or resize_confirming if instance and (instance.get('task_state') == 'resize_reverting' or instance.get('task_state') == 'resize_confirming'): LOG.info(_('revert resize now, here only stop vm.')) try: self.stop_vm(instance) except Exception as e: LOG.warn(_('stop vm failed, trigger rollback')) raise exception.InstanceFaultRollback(inner_exception=e) return try: fc_vm = FC_MGR.get_vm_by_uuid(instance) except exception.InstanceNotFound: LOG.warn(_('instance exist no more. ignore this deleting.')) return # detach volume created by cinder if block_device_info: LOG.info(_('now will stop vm before detach cinder volumes.')) self.stop_vm(instance) for vol in block_device_info['block_device_mapping']: self.detach_volume(vol['connection_info'], instance) # if vm is in fault-resuming or unknown status, stop it before delete if fc_vm.status == constant.VM_STATUS.UNKNOWN \ or fc_vm.status == constant.VM_STATUS.FAULTRESUMING: LOG.debug(_("vm %s status is fault-resuming or unknown, " "stop it before delete."), fc_vm.uri) self.stop_vm(instance) self._delete_vm_with_fc_vm(fc_vm, destroy_disks) # update affinity group info if needed try: self._update_drs_rules(instance) self._update_affinity_groups(context, instance) #ignore pylint:disable=W0703 except Exception as excp: utils.log_exception(excp) LOG.error(_('update affinity group info failed !'))
def _attach_user_vols(self): """ :return: """ fc_vm = FC_MGR.get_vm_by_uuid(self._instance) for disk in self._volume_ops.ensure_volume(self._block_device_info): body = { 'volUrn': disk['urn'], 'sequenceNum': constant.MOUNT_DEVICE_SEQNUM_MAP[disk['mount_device']] } LOG.debug(_("begin attach user vol: %s"), disk['urn']) self._volume_ops.attach_volume(fc_vm, vol_config=body)
def inject_files(self): """ :return: """ fc_vm = FC_MGR.get_vm_by_uuid(self._instance) for (path, contents) in self._injected_files: body = { 'fileName': path, 'vmData': contents } self.post(fc_vm.get_vm_action_uri('set_vm_data'), data=body) LOG.debug(_('inject file %s succeed.') % path)
def list_all_fc_instance(self): """ List all vm info :return: """ fc_all_vms = FC_MGR.get_all_vms(isTemplate='false', group=constant.VM_GROUP_FLAG) cluster_urn_list = self._cluster_ops.get_local_cluster_urn_list() result = [] for fc_vm in fc_all_vms: if fc_vm['clusterUrn'] in cluster_urn_list: result.append(fc_vm) LOG.debug(_("after filtered by clusters, instance number is %d"), len(result)) return result
def change_instance_metadata(self, instance): """ :param instance: :return: """ LOG.info(_("trying to change metadata for vm: %s.") % instance['name']) try: fc_vm = FC_MGR.get_vm_by_uuid(instance) self._modify_boot_option_if_needed(instance, fc_vm) #ignore pylint:disable=W0703 except Exception as msg: LOG.error( _("change_instance_metadata has exception, msg = %s") % msg)
def list_all_fc_instance(self): """ List all vm info :return: """ fc_all_vms = FC_MGR.get_all_vms(isTemplate='false', group=constant.VM_GROUP_FLAG) cluster_urn_list = self._cluster_ops.get_local_cluster_urn_list() result = [] for fc_vm in fc_all_vms: if fc_vm['clusterUrn'] in cluster_urn_list: result.append(fc_vm) LOG.debug(_("after filtered by clusters, instance number is %d"), len(result)) return result
def change_instance_metadata(self, instance): """ :param instance: :return: """ LOG.info(_("trying to change metadata for vm: %s.") % instance['name']) try: fc_vm = FC_MGR.get_vm_by_uuid(instance) self._modify_boot_option_if_needed(instance, fc_vm) #ignore pylint:disable=W0703 except Exception as msg: LOG.error(_("change_instance_metadata has exception, msg = %s") % msg)
def _wait_for_boot(): """Called at an interval until the VM is running.""" statue = FC_MGR.get_vm_by_uuid(instance).status if statue == constant.VM_STATUS.RUNNING: LOG.debug(_("vm %s create success."), instance['name']) boot_result['result'] = True raise loopingcall.LoopingCallDone() elif statue == constant.VM_STATUS.STOPPED: LOG.debug(_("create vm %s success, but start failed."), instance['name']) raise loopingcall.LoopingCallDone() else: LOG.debug(_("vm %s is still in creating state."), instance['name'])
def _wait_for_boot(): """Called at an interval until the VM is running.""" statue = FC_MGR.get_vm_by_uuid(instance).status if statue == constant.VM_STATUS.RUNNING: LOG.debug(_("vm %s create success."), instance['name']) boot_result['result'] = True raise loopingcall.LoopingCallDone() elif statue == constant.VM_STATUS.STOPPED: LOG.debug(_("create vm %s success, but start failed."), instance['name']) raise loopingcall.LoopingCallDone() else: LOG.debug(_("vm %s is still in creating state."), instance['name'])
def suspend_vm(self, instance): """suspend vm on FC :param instance:nova.objects.instance.Instance :return: """ LOG.info(_("trying to suspend vm: %s."), instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status == constant.VM_STATUS.RUNNING: self.post(fc_vm.get_vm_action_uri('suspend'), excp=exception.InstanceFaultRollback) LOG.info(_("suspend vm %s success"), fc_vm.name) else: LOG.error(_("error vm status: %s.") % fc_vm.status) raise exception.InstanceFaultRollback
def suspend_vm(self, instance): """suspend vm on FC :param instance:nova.objects.instance.Instance :return: """ LOG.info(_("trying to suspend vm: %s."), instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status == constant.VM_STATUS.RUNNING: self.post(fc_vm.get_vm_action_uri('suspend'), excp=exception.InstanceFaultRollback) LOG.info(_("suspend vm %s success"), fc_vm.name) else: LOG.error(_("error vm status: %s.") % fc_vm.status) raise exception.InstanceFaultRollback
def _get_vm_by_template_url(self, template_url): """ :param template_url: {vrm site id}:{vm id} 239d8a8e:i-00000061 """ vm_id = None if template_url: url = template_url.strip() str_array = re.split(":", url) if len(str_array) == 2: vm_id = str_array[1] if vm_id is not None: return FC_MGR.get_vm_by_id(vm_id) return None
def unpause_vm(self, instance): """Unpause vm on FC :param instance:nova.objects.instance.Instance :return: """ LOG.info(_("trying to unpause vm: %s."), instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status == constant.VM_STATUS.PAUSED: self.post(fc_vm.get_vm_action_uri('unpause'), excp=fc_exc.InstanceUnpauseFailure) LOG.info(_("unpause vm %s success"), fc_vm.name) elif fc_vm.status == constant.VM_STATUS.RUNNING: LOG.info(_("vm status is running, consider it success")) else: reason = _("vm status is %s and cannot be unpaused.") % \ fc_vm.status raise fc_exc.InstanceUnpauseFailure(reason=reason)
def unpause_vm(self, instance): """Unpause vm on FC :param instance:nova.objects.instance.Instance :return: """ LOG.info(_("trying to unpause vm: %s."), instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status == constant.VM_STATUS.PAUSED: self.post(fc_vm.get_vm_action_uri('unpause'), excp=fc_exc.InstanceUnpauseFailure) LOG.info(_("unpause vm %s success"), fc_vm.name) elif fc_vm.status == constant.VM_STATUS.RUNNING: LOG.info(_("vm status is running, consider it success")) else: reason = _("vm status is %s and cannot be unpaused.") % \ fc_vm.status raise fc_exc.InstanceUnpauseFailure(reason=reason)
def stop_vm(self, instance): """Stop vm on FC :param instance:nova.objects.instance.Instance :return: """ LOG.info(_("trying to stop vm: %s."), instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status == constant.VM_STATUS.STOPPED: LOG.info(_("vm has already stopped.")) return elif fc_vm.status == constant.VM_STATUS.RUNNING \ and fc_vm['pvDriverStatus'] == 'running': body = {'mode': 'safe'} else: body = {'mode': 'force'} self.post(fc_vm.get_vm_action_uri('stop'), data=body, excp=exception.InstancePowerOffFailure) LOG.info(_("stop vm %s success"), fc_vm.name)
def snapshot(self, context, instance, image_href, update_task_state): """ Create sys vol image and upload to glance :param instance: :param image_href: :param update_task_state: :return: """ if not constant.CONF.fusioncompute.fc_image_path: LOG.error(_("config option fc_image_path is None.")) raise fc_exc.InvalidImageDir() # 0.get image service and image id _image_service = glance.get_remote_image_service(context, image_href) snapshot_image_service, image_id = _image_service # 1.import sys vol to nfs dir LOG.info(_("begin uploading sys vol to glance ...")) fc_vm = FC_MGR.get_vm_by_uuid(instance) sys_vol = self._get_sys_vol_from_vm_info(fc_vm) if not sys_vol: raise exception.DiskNotFound(_("can not find sys volume.")) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) self._volume_ops.create_image_from_volume(self.site.volume_uri, sys_vol, image_id) # 2.update image metadata LOG.info(_("begin update image metadata ...")) update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) name = snapshot_image_service.show(context, image_id).get('name') location = self._generate_image_location(image_id) metadata = self._generate_image_metadata(name, location, fc_vm, instance) snapshot_image_service.update(context, image_id, metadata)
def finish_migration(self, instance, power_on=True): """ :param instance: :param power_on: :return: """ LOG.info(_("begin finish_migration ...")) fc_vm = FC_MGR.get_vm_by_uuid(instance) # update location location = self._cluster_ops.\ get_cluster_urn_by_nodename(instance['node']) data = self._generate_vm_spec_info(location=location) self.modify_vm(fc_vm, vm_config=data) # power on vm if needed if power_on: self.start_vm(instance) LOG.info(_("modify location success, new location %s."), location)
def start_vm(self, instance): """Start vm on FC :param instance:nova.objects.instance.Instance :return: """ LOG.info(_("trying to start vm: %s.") % instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status in [constant.VM_STATUS.STOPPED, constant.VM_STATUS.SUSPENDED]: self._modify_boot_option_if_needed(instance, fc_vm) self.post(fc_vm.get_vm_action_uri('start'), excp=exception.InstancePowerOnFailure) LOG.info(_("start vm %s success"), fc_vm.name) elif fc_vm.status == constant.VM_STATUS.RUNNING: LOG.info(_("vm has already running.")) else: reason = _("vm status is %s and cannot be powered on.") % \ fc_vm.status raise exception.InstancePowerOnFailure(reason=reason)
def finish_migration(self, instance, power_on=True): """ :param instance: :param power_on: :return: """ LOG.info(_("begin finish_migration ...")) fc_vm = FC_MGR.get_vm_by_uuid(instance) # update location location = self._cluster_ops.\ get_cluster_urn_by_nodename(instance['node']) data = self._generate_vm_spec_info(location=location) self.modify_vm(fc_vm, vm_config=data) # power on vm if needed if power_on: self.start_vm(instance) LOG.info(_("modify location success, new location %s."), location)
def stop_vm(self, instance): """Stop vm on FC :param instance:nova.objects.instance.Instance :return: """ LOG.info(_("trying to stop vm: %s."), instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status == constant.VM_STATUS.STOPPED: LOG.info(_("vm has already stopped.")) return elif fc_vm.status == constant.VM_STATUS.RUNNING \ and fc_vm['pvDriverStatus'] == 'running': body = {'mode': 'safe'} else: body = {'mode': 'force'} self.post(fc_vm.get_vm_action_uri('stop'), data=body, excp=exception.InstancePowerOffFailure) LOG.info(_("stop vm %s success"), fc_vm.name)
def reboot_vm(self, instance, reboot_type): """reboot vm""" fc_vm = FC_MGR.get_vm_by_uuid(instance) LOG.debug(_("reboot_vm %s, reboot_type %s, fc_vm.status %s."), instance['name'], reboot_type, fc_vm.status) # if it is fault-resuming or unknown, do nothing if fc_vm.status == constant.VM_STATUS.UNKNOWN \ or fc_vm.status == constant.VM_STATUS.FAULTRESUMING \ or fc_vm.status == constant.VM_STATUS.MIGRATING: LOG.debug(_("vm %s status is fault-resuming or unknown " "or migrating, just ignore this reboot action."), fc_vm.uri) return # if it is stopped or suspended, just start it if fc_vm.status == constant.VM_STATUS.STOPPED \ or fc_vm.status == constant.VM_STATUS.SUSPENDED: LOG.debug(_("vm %s is stopped, will start vm."), fc_vm.uri) self.start_vm(instance) return # if it is paused, first unpause it if fc_vm.status == constant.VM_STATUS.PAUSED: self.unpause_vm(instance) # modify vm boot type if needed self._modify_boot_option_if_needed(instance, fc_vm) if reboot_type == constant.REBOOT_TYPE.SOFT: try: self._reboot_vm(fc_vm, reboot_type) return except exception.InstanceRebootFailure: LOG.debug(_("soft reboot vm %s failed, will hard reboot."), instance['name']) # if soft reboot failed, hard reboot self._reboot_vm(fc_vm, constant.REBOOT_TYPE.HARD)
def start_vm(self, instance): """Start vm on FC :param instance:nova.objects.instance.Instance :return: """ LOG.info(_("trying to start vm: %s.") % instance['name']) fc_vm = FC_MGR.get_vm_by_uuid(instance) if fc_vm.status in [ constant.VM_STATUS.STOPPED, constant.VM_STATUS.SUSPENDED ]: self._modify_boot_option_if_needed(instance, fc_vm) self.post(fc_vm.get_vm_action_uri('start'), excp=exception.InstancePowerOnFailure) LOG.info(_("start vm %s success"), fc_vm.name) elif fc_vm.status == constant.VM_STATUS.RUNNING: LOG.info(_("vm has already running.")) else: reason = _("vm status is %s and cannot be powered on.") % \ fc_vm.status raise exception.InstancePowerOnFailure(reason=reason)
def reboot_vm(self, instance, reboot_type): """reboot vm""" fc_vm = FC_MGR.get_vm_by_uuid(instance) LOG.debug(_("reboot_vm %s, reboot_type %s, fc_vm.status %s."), instance['name'], reboot_type, fc_vm.status) # if it is fault-resuming or unknown, do nothing if fc_vm.status == constant.VM_STATUS.UNKNOWN \ or fc_vm.status == constant.VM_STATUS.FAULTRESUMING \ or fc_vm.status == constant.VM_STATUS.MIGRATING: LOG.debug( _("vm %s status is fault-resuming or unknown " "or migrating, just ignore this reboot action."), fc_vm.uri) return # if it is stopped or suspended, just start it if fc_vm.status == constant.VM_STATUS.STOPPED \ or fc_vm.status == constant.VM_STATUS.SUSPENDED: LOG.debug(_("vm %s is stopped, will start vm."), fc_vm.uri) self.start_vm(instance) return # if it is paused, first unpause it if fc_vm.status == constant.VM_STATUS.PAUSED: self.unpause_vm(instance) # modify vm boot type if needed self._modify_boot_option_if_needed(instance, fc_vm) if reboot_type == constant.REBOOT_TYPE.SOFT: try: self._reboot_vm(fc_vm, reboot_type) return except exception.InstanceRebootFailure: LOG.debug(_("soft reboot vm %s failed, will hard reboot."), instance['name']) # if soft reboot failed, hard reboot self._reboot_vm(fc_vm, constant.REBOOT_TYPE.HARD)
def snapshot(self, context, instance, image_href, update_task_state): """ Create sys vol image and upload to glance :param instance: :param image_href: :param update_task_state: :return: """ if not constant.CONF.fusioncompute.fc_image_path: LOG.error(_("config option fc_image_path is None.")) raise fc_exc.InvalidImageDir() # 0.get image service and image id _image_service = glance.get_remote_image_service(context, image_href) snapshot_image_service, image_id = _image_service # 1.import sys vol to nfs dir LOG.info(_("begin uploading sys vol to glance ...")) fc_vm = FC_MGR.get_vm_by_uuid(instance) sys_vol = self._get_sys_vol_from_vm_info(fc_vm) if not sys_vol: raise exception.DiskNotFound(_("can not find sys volume.")) update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) self._volume_ops.create_image_from_volume(self.site.volume_uri, sys_vol, image_id) # 2.update image metadata LOG.info(_("begin update image metadata ...")) update_task_state(task_state=task_states.IMAGE_UPLOADING, expected_state=task_states.IMAGE_PENDING_UPLOAD) name = snapshot_image_service.show(context, image_id).get('name') location = self._generate_image_location(image_id) metadata = self._generate_image_metadata(name, location, fc_vm, instance) snapshot_image_service.update(context, image_id, metadata)
def detach_interface(self, instance, vif): """ Send message to fusion compute virtual machine :param instance: :param vif: :return: response : {"taskUrn": string, "taskUri": string} if the nic does not exited, return {} else {"taskUrn": string, "taskUri": string} """ LOG.debug( _("trying to detach interface for vm name: %s," "vm uuid: %s, vif information is %s"), instance['name'], instance['uuid'], vif) response = {} fc_vm = FC_MGR.get_vm_by_uuid(instance) nics = fc_vm["vmConfig"]["nics"] LOG.info("nics in FusionCompute is %s", nics) nic_uri = None for nic in nics: if nic['name'] == vif['id']: nic_uri = nic['uri'] break if nic_uri: detach_interface_uri = (nic_uri.replace("nics", "virtualNics")) LOG.info("detach_interface_uri is %s", detach_interface_uri) response = self.delete(detach_interface_uri, excp=exception.InstanceInvalidState) else: LOG.warn(_("detach interface for vm name: %s, not exist nic."), instance['name']) LOG.info(_('send detach interface finished, return is: %s'), jsonutils.dumps(response)) return response
def live_migration(self, instance_ref, nodename): """Live migration of an instance to another host. :param instance_ref: nova.db.sqlalchemy.models.Instance object instance object that is migrated. :param nodename: destination node name """ LOG.info(_("trying to migrate vm: %s.") % instance_ref['name']) # get destination cluster urn cluster_urn = self._cluster_ops.get_cluster_urn_by_nodename(nodename) if not cluster_urn: raise fc_exc.ClusterNotFound(cluster_name=nodename) LOG.debug(_("get cluster urn: %s."), cluster_urn) # generate migrate url and post msg to FC body = {'location': cluster_urn} fc_vm = FC_MGR.get_vm_by_uuid(instance_ref) self.post(fc_vm.get_vm_action_uri('migrate'), data=body, excp=exception.MigrationError) LOG.info(_("migrate vm %s success" % fc_vm.name))
def instance_exists(self, instance): try: FC_MGR.get_vm_by_uuid(instance) return True except nova_exc.InstanceNotFound: return False
def get_instances_info(self): """ Get all instances info from FusionCompute :return: """ return FC_MGR.get_all_vms_info()