Пример #1
0
    def __init__(self, virtapi):
        LOG.info(_('begin to init FusionComputeDriver ...'))
        super(FusionComputeDriver, self).__init__(virtapi)

        self._client = FCBaseClient(
            constant.CONF.fusioncompute.fc_ip,
            constant.CONF.fusioncompute.fc_user,
            crypt.decrypt(constant.CONF.fusioncompute.fc_pwd),
            constant.FC_DRIVER_JOINT_CFG['user_type'],
            ssl=True,
            port=constant.FC_DRIVER_JOINT_CFG['fc_port'],
            api_version=constant.FC_DRIVER_JOINT_CFG['api_version'],
            request_time_out=constant.FC_DRIVER_JOINT_CFG['request_time_out'])
        self._client.set_default_site()

        # task ops is need by other ops, init it first
        self.task_ops = taskops.TaskOperation(self._client)
        FC_MGR.set_client(self._client)

        self.network_ops = networkops.NetworkOps(self._client, self.task_ops)
        self.volume_ops = volumeops.VolumeOps(self._client, self.task_ops)
        self.cluster_ops = fc_cluster.ClusterOps(self._client, self.task_ops)
        self.compute_ops = computeops.ComputeOps(self._client, self.task_ops,
                                                 self.network_ops,
                                                 self.volume_ops,
                                                 self.cluster_ops)
Пример #2
0
    def __init__(self, virtapi):
        LOG.info(_("begin to init FusionComputeDriver ..."))
        super(FusionComputeDriver, self).__init__(virtapi)

        self._client = FCBaseClient(
            constant.CONF.fusioncompute.fc_ip,
            constant.CONF.fusioncompute.fc_user,
            crypt.decrypt(constant.CONF.fusioncompute.fc_pwd),
            constant.FC_DRIVER_JOINT_CFG["user_type"],
            ssl=True,
            port=constant.FC_DRIVER_JOINT_CFG["fc_port"],
            api_version=constant.FC_DRIVER_JOINT_CFG["api_version"],
            request_time_out=constant.FC_DRIVER_JOINT_CFG["request_time_out"],
        )
        self._client.set_default_site()

        # task ops is need by other ops, init it first
        self.task_ops = taskops.TaskOperation(self._client)
        FC_MGR.set_client(self._client)

        self.network_ops = networkops.NetworkOps(self._client, self.task_ops)
        self.volume_ops = volumeops.VolumeOps(self._client, self.task_ops)
        self.cluster_ops = fc_cluster.ClusterOps(self._client, self.task_ops)
        self.compute_ops = computeops.ComputeOps(
            self._client, self.task_ops, self.network_ops, self.volume_ops, self.cluster_ops
        )
Пример #3
0
    def attach_interface(self, instance, vif):
        """
        Send message to fusion compute virtual machine

        :param instance:
        :param vif:
        :return: response : {"taskUrn": string, "taskUri": string}
        """
        LOG.debug(
            _("trying to attach interface, vm name: %s,"
              "vm uuid: %s, vif info: %s"), instance['name'], instance['uuid'],
            vif)

        pg_urn = self._network_ops.ensure_network(vif['network'])
        vsp_body = {
            'name': vif['id'],
            'portId': vif['id'],
            'portGroupUrn': pg_urn,
            'mac': vif['address']
        }
        LOG.info("the vsp information is %s", vsp_body)

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        attach_interface_uri = fc_vm.get_vm_action_uri('nics')

        response = self.post(attach_interface_uri,
                             data=vsp_body,
                             excp=exception.InterfaceAttachFailed)
        LOG.info('send attach interface finished, return is: %s',
                 jsonutils.dumps(response))
        return response
Пример #4
0
    def live_migration(self, instance_ref, nodename):
        """Live migration of an instance to another host.

        :param instance_ref:
            nova.db.sqlalchemy.models.Instance object
            instance object that is migrated.
        :param nodename: destination node name

        """
        LOG.info(_("trying to migrate vm: %s.") % instance_ref['name'])

        # get destination cluster urn
        cluster_urn = self._cluster_ops.get_cluster_urn_by_nodename(nodename)
        if not cluster_urn:
            raise fc_exc.ClusterNotFound(cluster_name=nodename)
        LOG.debug(_("get cluster urn: %s."), cluster_urn)

        # generate migrate url and post msg to FC
        body = {
            'location': cluster_urn
        }
        fc_vm = FC_MGR.get_vm_by_uuid(instance_ref)
        self.post(fc_vm.get_vm_action_uri('migrate'), data=body,
                  excp=exception.MigrationError)
        LOG.info(_("migrate vm %s success" % fc_vm.name))
Пример #5
0
    def detach_volume(self, connection_info, instance):
        """
        Detach volume for vm
        :param connection_info:
        :param instance:
        :return:
        """
        LOG.info(_("trying to detach vol for vm: %s.") % instance['name'])

        # 1. volume can only be detached when vm is running or stopped
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status not in [
                constant.VM_STATUS.RUNNING, constant.VM_STATUS.STOPPED
        ]:
            reason = _("vm status is not running or stopped !")
            raise fc_exc.InstanceDetachvolFailure(reason=reason)

        # 2. ignore this op when vm do not have this volume
        vol_urn = self._get_vol_urn_from_connection(connection_info)
        if self._check_if_vol_in_instance(fc_vm, vol_urn) is False:
            LOG.info(_("vol %s is not in vm %s, consider it success"), vol_urn,
                     fc_vm.name)
            return

        # 3. detach this volume
        self._volume_action(self._volume_ops.detach_volume, vol_urn, fc_vm)
Пример #6
0
    def detach_volume(self, connection_info, instance):
        """
        Detach volume for vm
        :param connection_info:
        :param instance:
        :return:
        """
        LOG.info(_("trying to detach vol for vm: %s.") % instance['name'])

        # 1. volume can only be detached when vm is running or stopped
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status not in [constant.VM_STATUS.RUNNING,
                                constant.VM_STATUS.STOPPED]:
            reason = _("vm status is not running or stopped !")
            raise fc_exc.InstanceDetachvolFailure(reason=reason)

        # 2. ignore this op when vm do not have this volume
        vol_urn = self._get_vol_urn_from_connection(connection_info)
        if self._check_if_vol_in_instance(fc_vm, vol_urn) is False:
            LOG.info(_("vol %s is not in vm %s, consider it success"),
                     vol_urn, fc_vm.name)
            return

        # 3. detach this volume
        self._volume_action(self._volume_ops.detach_volume, vol_urn, fc_vm)
Пример #7
0
    def attach_volume(self, connection_info, instance, mountpoint):
        """
        Attach volume for vm
        :param connection_info:
        :param instance:
        :return:
        """
        LOG.info(_("trying to attach vol for vm: %s.") % instance['name'])
        # 0. set qos io
        self._volume_ops.set_qos_specs_to_volume(connection_info)

        # 1. volume can only be attached when vm is running or stopped
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status not in [
                constant.VM_STATUS.RUNNING, constant.VM_STATUS.STOPPED
        ]:
            reason = _("vm status is not running or stopped !")
            raise fc_exc.InstanceAttachvolFailure(reason=reason)

        # 2. ignore this op when vm already has this volume
        vol_urn = self._get_vol_urn_from_connection(connection_info)
        if self._check_if_vol_in_instance(fc_vm, vol_urn) is True:
            LOG.info(_("vm %s already has vol %s, consider it success"),
                     fc_vm.name, vol_urn)
            return

        # 3. attach this volume
        self._volume_action(self._volume_ops.attach_volume, vol_urn, fc_vm,
                            mountpoint)
Пример #8
0
    def request_msg(self, method, path, data=None, headers=None, **kwargs):
        req_headers = self._update_and_get_headers(headers, False)

        # set default request time out
        kwargs['timeout'] = kwargs.get('timeout', self.__request_time_out)
        rsp = self._request(method, path, data, headers=req_headers, **kwargs)

        if rsp.status_code in self.STATUS_NO_AUTH:
            LOG.info('token may expired, fetch again.')
            req_headers = self._update_and_get_headers(headers, True)
            rsp = self._request(method, path, data, headers=req_headers,
                                **kwargs)

        #catch message sending exception
        self._raise_if_not_in_status_ok(rsp)
        ret_data = {'response': rsp, 'data': None}

        if rsp.text:
            try:
                ret_data['data'] = rsp.json()
            #ignore pylint:disable=W0703
            except Exception as excp:
                LOG.warn(_('failed to loads json response data, %s'), excp)
                ret_data['data'] = rsp.text

        if kwargs.get('need_response', False):
            return ret_data
        return ret_data['data']
Пример #9
0
    def request_msg(self, method, path, data=None, headers=None, **kwargs):
        req_headers = self._update_and_get_headers(headers, False)

        # set default request time out
        kwargs['timeout'] = kwargs.get('timeout', self.__request_time_out)
        rsp = self._request(method, path, data, headers=req_headers, **kwargs)

        if rsp.status_code in self.STATUS_NO_AUTH:
            LOG.info('token may expired, fetch again.')
            req_headers = self._update_and_get_headers(headers, True)
            rsp = self._request(method,
                                path,
                                data,
                                headers=req_headers,
                                **kwargs)

        #catch message sending exception
        self._raise_if_not_in_status_ok(rsp)
        ret_data = {'response': rsp, 'data': None}

        if rsp.text:
            try:
                ret_data['data'] = rsp.json()
            #ignore pylint:disable=W0703
            except Exception as excp:
                LOG.warn(_('failed to loads json response data, %s'), excp)
                ret_data['data'] = rsp.text

        if kwargs.get('need_response', False):
            return ret_data
        return ret_data['data']
Пример #10
0
    def _modify_boot_option_if_needed(self, instance, fc_vm):
        """

        :param instance: OpenStack instance object
        :param fc_vm: FusionCompute vm object
        :return:
        """

        new_boot_option = utils.get_boot_option_from_metadata(
            instance.get('metadata'))

        old_boot_option = None
        if 'vmConfig' in fc_vm:
            vm_property = fc_vm['vmConfig'].get('properties')
            old_boot_option = vm_property.get('bootOption') if vm_property \
                              else None

        if new_boot_option and old_boot_option and \
           new_boot_option != old_boot_option:
            LOG.info(_("trying to modify boot option from %s to %s") %
                     (old_boot_option, new_boot_option))
            body = {
                'properties':{
                    'bootOption': new_boot_option
                }
            }
            try:
                self.modify_vm(instance, vm_config=body)
            except Exception as msg:
                LOG.error(_("modify boot option has exception: %s") % msg)
Пример #11
0
    def finish_revert_migration(self, instance, power_on=True):
        """

        :param instance:
        :param power_on:
        :return:
        """

        LOG.info(_("begin finish_revert_migration ..."))

        # 1. get flavor info from fc
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        #ignore pylint:disable=W0612
        old_flavor, new_flavor = self._get_flavor_from_group(fc_vm.group)

        # 2. check cpu mem changes
        location = self._cluster_ops.\
            get_cluster_urn_by_nodename(instance['node'])
        data = self._generate_vm_spec_info(location=location,
                                           flavor=old_flavor)
        self.modify_vm(fc_vm, vm_config=data)
        LOG.info(_("modify cpu and mem success."))

        # 5. clear vm group info
        self._reset_vm_group(fc_vm)

        # 6. power on vm if needed
        if power_on:
            self.start_vm(instance)
Пример #12
0
    def _modify_boot_option_if_needed(self, instance, fc_vm):
        """

        :param instance: OpenStack instance object
        :param fc_vm: FusionCompute vm object
        :return:
        """

        new_boot_option = utils.get_boot_option_from_metadata(
            instance.get('metadata'))

        old_boot_option = None
        if 'vmConfig' in fc_vm:
            vm_property = fc_vm['vmConfig'].get('properties')
            old_boot_option = vm_property.get('bootOption') if vm_property \
                              else None

        if new_boot_option and old_boot_option and \
           new_boot_option != old_boot_option:
            LOG.info(
                _("trying to modify boot option from %s to %s") %
                (old_boot_option, new_boot_option))
            body = {'properties': {'bootOption': new_boot_option}}
            try:
                self.modify_vm(instance, vm_config=body)
            except Exception as msg:
                LOG.error(_("modify boot option has exception: %s") % msg)
Пример #13
0
    def attach_volume(self, connection_info, instance, mountpoint):
        """
        Attach volume for vm
        :param connection_info:
        :param instance:
        :return:
        """
        LOG.info(_("trying to attach vol for vm: %s.") % instance['name'])
        # 0. set qos io
        self._volume_ops.set_qos_specs_to_volume(connection_info)

        # 1. volume can only be attached when vm is running or stopped
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status not in [constant.VM_STATUS.RUNNING,
                                constant.VM_STATUS.STOPPED]:
            reason = _("vm status is not running or stopped !")
            raise fc_exc.InstanceAttachvolFailure(reason=reason)

        # 2. ignore this op when vm already has this volume
        vol_urn = self._get_vol_urn_from_connection(connection_info)
        if self._check_if_vol_in_instance(fc_vm, vol_urn) is True:
            LOG.info(_("vm %s already has vol %s, consider it success"),
                     fc_vm.name, vol_urn)
            return

        # 3. attach this volume
        self._volume_action(self._volume_ops.attach_volume,
                            vol_urn, fc_vm, mountpoint)
Пример #14
0
    def finish_revert_migration(self, instance, power_on=True):
        """

        :param instance:
        :param power_on:
        :return:
        """

        LOG.info(_("begin finish_revert_migration ..."))

        # 1. get flavor info from fc
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        #ignore pylint:disable=W0612
        old_flavor, new_flavor = self._get_flavor_from_group(fc_vm.group)

        # 2. check cpu mem changes
        location = self._cluster_ops.\
            get_cluster_urn_by_nodename(instance['node'])
        data = self._generate_vm_spec_info(location=location,
                                           flavor=old_flavor)
        self.modify_vm(fc_vm, vm_config=data)
        LOG.info(_("modify cpu and mem success."))

        # 5. clear vm group info
        self._reset_vm_group(fc_vm)

        # 6. power on vm if needed
        if power_on:
            self.start_vm(instance)
Пример #15
0
    def attach_interface(self, instance, vif):
        """
        Send message to fusion compute virtual machine

        :param instance:
        :param vif:
        :return: response : {"taskUrn": string, "taskUri": string}
        """
        LOG.debug(_("trying to attach interface, vm name: %s,"
                    "vm uuid: %s, vif info: %s"), instance['name'],
                  instance['uuid'], vif)

        pg_urn = self._network_ops.ensure_network(vif['network'])
        vsp_body = {
            'name': vif['id'],
            'portId': vif['id'],
            'portGroupUrn': pg_urn,
            'mac': vif['address']
        }
        LOG.info("the vsp information is %s", vsp_body)

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        attach_interface_uri = fc_vm.get_vm_action_uri('nics')

        response = self.post(attach_interface_uri,
                             data=vsp_body,
                             excp=exception.InterfaceAttachFailed)
        LOG.info('send attach interface finished, return is: %s',
                 jsonutils.dumps(response))
        return response
Пример #16
0
    def delete_vm(self,
                  context,
                  instance,
                  block_device_info=None,
                  destroy_disks=True):
        """Delete VM on FC

        :param context:
        :param instance:
        :param block_device_info:
        :param destroy_disks:
        :return:
        """

        # if revert resize, only stop vm. when resize operation
        # task state will be resize_reverting or resize_confirming
        if instance and (instance.get('task_state') == 'resize_reverting'
                         or instance.get('task_state') == 'resize_confirming'):
            LOG.info(_('revert resize now, here only stop vm.'))
            try:
                self.stop_vm(instance)
            except Exception as e:
                LOG.warn(_('stop vm failed, trigger rollback'))
                raise exception.InstanceFaultRollback(inner_exception=e)
            return

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
        except exception.InstanceNotFound:
            LOG.warn(_('instance exist no more. ignore this deleting.'))
            return

        # detach volume created by cinder
        if block_device_info:
            LOG.info(_('now will stop vm before detach cinder volumes.'))
            self.stop_vm(instance)
            for vol in block_device_info['block_device_mapping']:
                self.detach_volume(vol['connection_info'], instance)

        # if vm is in fault-resuming or unknown status, stop it before delete
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(
                _("vm %s status is fault-resuming or unknown, "
                  "stop it before delete."), fc_vm.uri)
            self.stop_vm(instance)

        self._delete_vm_with_fc_vm(fc_vm, destroy_disks)

        # update affinity group info if needed
        try:
            self._update_drs_rules(instance)
            self._update_affinity_groups(context, instance)
        #ignore pylint:disable=W0703
        except Exception as excp:
            utils.log_exception(excp)
            LOG.error(_('update affinity group info failed !'))
Пример #17
0
    def change_instance_info(self, instance):

        LOG.info(_("trying to change instance display_name = %s"),
                 instance['display_name'])

        body = {'name': instance['display_name']}
        try:
            self.modify_vm(instance, vm_config=body)
        except Exception as msg:
            LOG.error(_("change_instance_info has exception, msg = %s") % msg)
Пример #18
0
 def get_total_vm_numbers(self, **kwargs):
     """
     Get total numbers in fc
     :return:
     """
     instances = self._query_vm(limit=1, offset=0, detail=0, **kwargs)
     if not instances or not instances.get('total'):
         return 0
     total = int(instances.get('total'))
     LOG.info(_("total instance number is %d."), total)
     return total
Пример #19
0
 def get_total_vm_numbers(self, **kwargs):
     """
     Get total numbers in fc
     :return:
     """
     instances = self._query_vm(limit=1, offset=0, detail=0, **kwargs)
     if not instances or not instances.get('total'):
         return 0
     total = int(instances.get('total'))
     LOG.info(_("total instance number is %d."), total)
     return total
Пример #20
0
    def change_instance_info(self, instance):

        LOG.info(_("trying to change instance display_name = %s"),
                 instance['display_name'])

        body = {'name':instance['display_name']}
        try:
            self.modify_vm(instance,vm_config=body)
        except Exception as msg:
            LOG.error(_("change_instance_info has exception, msg = %s")
                  % msg)
Пример #21
0
    def _delete_vm_with_fc_vm(self, fc_vm, destroy_disks=True):
        """
        delete vm with fc instance, inner function
        :param fc_vm:
        :param destroy_disks:
        :return:
        """
        reserve_disks = {'isReserveDisks': 0 if destroy_disks else 1}
        LOG.info(_('Deleting VM on FC, instance: %s reserve_disks %s'),
                 fc_vm.name, jsonutils.dumps(reserve_disks))

        self.delete(utils.build_uri_with_params(fc_vm.uri, reserve_disks))
Пример #22
0
    def delete_vm(self, context, instance, block_device_info=None,
                  destroy_disks=True):
        """Delete VM on FC

        :param context:
        :param instance:
        :param block_device_info:
        :param destroy_disks:
        :return:
        """

        # if revert resize, only stop vm. when resize operation
        # task state will be resize_reverting or resize_confirming
        if instance and (instance.get('task_state') == 'resize_reverting'
                         or instance.get('task_state') == 'resize_confirming'):
            LOG.info(_('revert resize now, here only stop vm.'))
            try:
                self.stop_vm(instance)
            except Exception as e:
                LOG.warn(_('stop vm failed, trigger rollback'))
                raise exception.InstanceFaultRollback(inner_exception=e)
            return

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
        except exception.InstanceNotFound:
            LOG.warn(_('instance exist no more. ignore this deleting.'))
            return

        # detach volume created by cinder
        if block_device_info:
            LOG.info(_('now will stop vm before detach cinder volumes.'))
            self.stop_vm(instance)
            for vol in block_device_info['block_device_mapping']:
                self.detach_volume(vol['connection_info'], instance)

        # if vm is in fault-resuming or unknown status, stop it before delete
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(_("vm %s status is fault-resuming or unknown, "
                        "stop it before delete."), fc_vm.uri)
            self.stop_vm(instance)

        self._delete_vm_with_fc_vm(fc_vm, destroy_disks)

        # update affinity group info if needed
        try:
            self._update_drs_rules(instance)
            self._update_affinity_groups(context, instance)
        #ignore pylint:disable=W0703
        except Exception as excp:
            utils.log_exception(excp)
            LOG.error(_('update affinity group info failed !'))
Пример #23
0
    def _delete_vm_with_fc_vm(self, fc_vm, destroy_disks=True):
        """
        delete vm with fc instance, inner function
        :param fc_vm:
        :param destroy_disks:
        :return:
        """
        reserve_disks = {'isReserveDisks': 0 if destroy_disks else 1}
        LOG.info(_('Deleting VM on FC, instance: %s reserve_disks %s'),
                 fc_vm.name, jsonutils.dumps(reserve_disks))

        self.delete(utils.build_uri_with_params(fc_vm.uri, reserve_disks))
Пример #24
0
    def confirm_migration(self, instance):
        """

        :param instance:
        :return:
        """

        LOG.info(_("begin confirm_migration ..."))

        # clear vm group info
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        self._reset_vm_group(fc_vm)
Пример #25
0
    def confirm_migration(self, instance):
        """

        :param instance:
        :return:
        """

        LOG.info(_("begin confirm_migration ..."))

        # clear vm group info
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        self._reset_vm_group(fc_vm)
Пример #26
0
    def change_instance_metadata(self, instance):
        """

        :param instance:
        :return:
        """
        LOG.info(_("trying to change metadata for vm: %s.") % instance['name'])

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
            self._modify_boot_option_if_needed(instance, fc_vm)
        #ignore pylint:disable=W0703
        except Exception as msg:
            LOG.error(
                _("change_instance_metadata has exception, msg = %s") % msg)
Пример #27
0
    def change_instance_metadata(self, instance):
        """

        :param instance:
        :return:
        """
        LOG.info(_("trying to change metadata for vm: %s.") % instance['name'])

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
            self._modify_boot_option_if_needed(instance, fc_vm)
        #ignore pylint:disable=W0703
        except Exception as msg:
            LOG.error(_("change_instance_metadata has exception, msg = %s")
                      % msg)
Пример #28
0
    def suspend_vm(self, instance):
        """suspend vm on FC

        :param instance:nova.objects.instance.Instance
        :return:
        """

        LOG.info(_("trying to suspend vm: %s."), instance['name'])
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.RUNNING:
            self.post(fc_vm.get_vm_action_uri('suspend'),
                      excp=exception.InstanceFaultRollback)
            LOG.info(_("suspend vm %s success"), fc_vm.name)
        else:
            LOG.error(_("error vm status: %s.") % fc_vm.status)
            raise exception.InstanceFaultRollback
Пример #29
0
    def suspend_vm(self, instance):
        """suspend vm on FC

        :param instance:nova.objects.instance.Instance
        :return:
        """

        LOG.info(_("trying to suspend vm: %s."), instance['name'])
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.RUNNING:
            self.post(fc_vm.get_vm_action_uri('suspend'),
                      excp=exception.InstanceFaultRollback)
            LOG.info(_("suspend vm %s success"), fc_vm.name)
        else:
            LOG.error(_("error vm status: %s.") % fc_vm.status)
            raise exception.InstanceFaultRollback
Пример #30
0
    def _update_affinity_groups(self, context, instance):
        """

        :param context:
        :param instance:
        :return:
        """

        groups = affinitygroup_obj.AffinityGroupList().get_all(context)
        for group in groups:
            vm_id = str(instance.get('id'))
            all_vms = group.get_all_vms(context)
            if vm_id in all_vms:
                vms_to_del = [vm_id] if len(all_vms) > 2 else all_vms
                for vm_to_del in vms_to_del:
                    LOG.info(_('delete vm %s from affinity group %s'),
                             vm_to_del, group.id)
                    group.delete_vm(context, vm_to_del)
Пример #31
0
    def _update_affinity_groups(self, context, instance):
        """

        :param context:
        :param instance:
        :return:
        """

        groups = affinitygroup_obj.AffinityGroupList().get_all(context)
        for group in groups:
            vm_id = str(instance.get('id'))
            all_vms = group.get_all_vms(context)
            if vm_id in all_vms:
                vms_to_del = [vm_id] if len(all_vms) > 2 else all_vms
                for vm_to_del in vms_to_del:
                    LOG.info(_('delete vm %s from affinity group %s'),
                             vm_to_del, group.id)
                    group.delete_vm(context, vm_to_del)
Пример #32
0
    def snapshot(self, context, instance, image_href, update_task_state):
        """
        Create sys vol image and upload to glance
        :param instance:
        :param image_href:
        :param update_task_state:
        :return:
        """

        if not constant.CONF.fusioncompute.fc_image_path:
            LOG.error(_("config option fc_image_path is None."))
            raise fc_exc.InvalidImageDir()

        # 0.get image service and image id
        _image_service = glance.get_remote_image_service(context, image_href)
        snapshot_image_service, image_id = _image_service

        # 1.import sys vol to nfs dir
        LOG.info(_("begin uploading sys vol to glance ..."))
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        sys_vol = self._get_sys_vol_from_vm_info(fc_vm)
        if not sys_vol:
            raise exception.DiskNotFound(_("can not find sys volume."))

        update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
        self._volume_ops.create_image_from_volume(self.site.volume_uri,
                                                  sys_vol,
                                                  image_id)

        # 2.update image metadata
        LOG.info(_("begin update image metadata ..."))
        update_task_state(task_state=task_states.IMAGE_UPLOADING,
                          expected_state=task_states.IMAGE_PENDING_UPLOAD)

        name = snapshot_image_service.show(context, image_id).get('name')
        location = self._generate_image_location(image_id)
        metadata = self._generate_image_metadata(name,
                                                 location,
                                                 fc_vm,
                                                 instance)
        snapshot_image_service.update(context, image_id, metadata)
Пример #33
0
    def finish_migration(self, instance, power_on=True):
        """

        :param instance:
        :param power_on:
        :return:
        """
        LOG.info(_("begin finish_migration ..."))

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        # update location
        location = self._cluster_ops.\
            get_cluster_urn_by_nodename(instance['node'])
        data = self._generate_vm_spec_info(location=location)
        self.modify_vm(fc_vm, vm_config=data)

        # power on vm if needed
        if power_on:
            self.start_vm(instance)

        LOG.info(_("modify location success, new location %s."), location)
Пример #34
0
    def finish_migration(self, instance, power_on=True):
        """

        :param instance:
        :param power_on:
        :return:
        """
        LOG.info(_("begin finish_migration ..."))

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        # update location
        location = self._cluster_ops.\
            get_cluster_urn_by_nodename(instance['node'])
        data = self._generate_vm_spec_info(location=location)
        self.modify_vm(fc_vm, vm_config=data)

        # power on vm if needed
        if power_on:
            self.start_vm(instance)

        LOG.info(_("modify location success, new location %s."), location)
Пример #35
0
    def detach_interface(self, instance, vif):
        """
        Send message to fusion compute virtual machine

        :param instance:
        :param vif:
        :return: response : {"taskUrn": string, "taskUri": string}
        if the nic does not exited, return {} else {"taskUrn": string,
        "taskUri": string}
        """
        LOG.debug(_("trying to detach interface for vm name: %s,"
                    "vm uuid: %s, vif information is %s"), instance['name'],
                  instance['uuid'], vif)

        response = {}
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        nics = fc_vm["vmConfig"]["nics"]
        LOG.info("nics in FusionCompute is %s", nics)
        nic_uri = None
        for nic in nics:
            if nic['name'] == vif['id']:
                nic_uri = nic['uri']
                break

        if nic_uri:
            detach_interface_uri = (nic_uri.replace("nics", "virtualNics"))
            LOG.info("detach_interface_uri is %s", detach_interface_uri)
            response = self.delete(detach_interface_uri,
                                   excp=exception.InstanceInvalidState)
        else:
            LOG.warn(_("detach interface for vm name: %s, not exist nic."),
                     instance['name'])
        LOG.info(_('send detach interface finished, return is: %s'),
                 jsonutils.dumps(response))
        return response
Пример #36
0
    def snapshot(self, context, instance, image_href, update_task_state):
        """
        Create sys vol image and upload to glance
        :param instance:
        :param image_href:
        :param update_task_state:
        :return:
        """

        if not constant.CONF.fusioncompute.fc_image_path:
            LOG.error(_("config option fc_image_path is None."))
            raise fc_exc.InvalidImageDir()

        # 0.get image service and image id
        _image_service = glance.get_remote_image_service(context, image_href)
        snapshot_image_service, image_id = _image_service

        # 1.import sys vol to nfs dir
        LOG.info(_("begin uploading sys vol to glance ..."))
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        sys_vol = self._get_sys_vol_from_vm_info(fc_vm)
        if not sys_vol:
            raise exception.DiskNotFound(_("can not find sys volume."))

        update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
        self._volume_ops.create_image_from_volume(self.site.volume_uri,
                                                  sys_vol, image_id)

        # 2.update image metadata
        LOG.info(_("begin update image metadata ..."))
        update_task_state(task_state=task_states.IMAGE_UPLOADING,
                          expected_state=task_states.IMAGE_PENDING_UPLOAD)

        name = snapshot_image_service.show(context, image_id).get('name')
        location = self._generate_image_location(image_id)
        metadata = self._generate_image_metadata(name, location, fc_vm,
                                                 instance)
        snapshot_image_service.update(context, image_id, metadata)
Пример #37
0
    def live_migration(self, instance_ref, nodename):
        """Live migration of an instance to another host.

        :param instance_ref:
            nova.db.sqlalchemy.models.Instance object
            instance object that is migrated.
        :param nodename: destination node name

        """
        LOG.info(_("trying to migrate vm: %s.") % instance_ref['name'])

        # get destination cluster urn
        cluster_urn = self._cluster_ops.get_cluster_urn_by_nodename(nodename)
        if not cluster_urn:
            raise fc_exc.ClusterNotFound(cluster_name=nodename)
        LOG.debug(_("get cluster urn: %s."), cluster_urn)

        # generate migrate url and post msg to FC
        body = {'location': cluster_urn}
        fc_vm = FC_MGR.get_vm_by_uuid(instance_ref)
        self.post(fc_vm.get_vm_action_uri('migrate'),
                  data=body,
                  excp=exception.MigrationError)
        LOG.info(_("migrate vm %s success" % fc_vm.name))
Пример #38
0
    def migrate_disk_and_power_off(self, instance, flavor):
        """
        modify the vm spec info
        :param instance:
            nova.db.sqlalchemy.models.Instance object
            instance object that is migrated.
        :param flavor:
        :return:
        """

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(_("vm %s status is fault-resuming or unknown, "
                "can not do migrate or resize."), fc_vm.uri)
            raise exception.InstanceFaultRollback

        LOG.info(_("begin power off vm ..."))

        # 1.stop vm
        self.stop_vm(instance)

        # 2.save flavor and vol info in vm
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        old_flavor = self._gen_old_flavor_for_fc(fc_vm)
        new_flavor = self._gen_new_flavor_for_fc(flavor)
        flavor = {
            'old_flavor': old_flavor,
            'new_flavor': new_flavor
        }
        data = {
            'group': '%s:%s' % (constant.VM_GROUP_FLAG,
                                jsonutils.dumps(flavor))
        }
        self.modify_vm(fc_vm, vm_config=data)
        LOG.info(_("save flavor info success."))

        # 3. check cpu mem changes
        flavor = None
        if self._check_if_need_modify_vm_spec(old_flavor, new_flavor):
            flavor = new_flavor

        data = self._generate_vm_spec_info(flavor=flavor)
        self.modify_vm(fc_vm, vm_config=data)
        LOG.info(_("modify cpu and mem success."))
Пример #39
0
    def unpause_vm(self, instance):
        """Unpause vm on FC

        :param instance:nova.objects.instance.Instance
        :return:
        """
        LOG.info(_("trying to unpause vm: %s."), instance['name'])
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.PAUSED:
            self.post(fc_vm.get_vm_action_uri('unpause'),
                      excp=fc_exc.InstanceUnpauseFailure)
            LOG.info(_("unpause vm %s success"), fc_vm.name)
        elif fc_vm.status == constant.VM_STATUS.RUNNING:
            LOG.info(_("vm status is running, consider it success"))
        else:
            reason = _("vm status is %s and cannot be unpaused.") % \
                     fc_vm.status
            raise fc_exc.InstanceUnpauseFailure(reason=reason)
Пример #40
0
    def unpause_vm(self, instance):
        """Unpause vm on FC

        :param instance:nova.objects.instance.Instance
        :return:
        """
        LOG.info(_("trying to unpause vm: %s."), instance['name'])
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.PAUSED:
            self.post(fc_vm.get_vm_action_uri('unpause'),
                      excp=fc_exc.InstanceUnpauseFailure)
            LOG.info(_("unpause vm %s success"), fc_vm.name)
        elif fc_vm.status == constant.VM_STATUS.RUNNING:
            LOG.info(_("vm status is running, consider it success"))
        else:
            reason = _("vm status is %s and cannot be unpaused.") % \
                     fc_vm.status
            raise fc_exc.InstanceUnpauseFailure(reason=reason)
Пример #41
0
    def migrate_disk_and_power_off(self, instance, flavor):
        """
        modify the vm spec info
        :param instance:
            nova.db.sqlalchemy.models.Instance object
            instance object that is migrated.
        :param flavor:
        :return:
        """

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(
                _("vm %s status is fault-resuming or unknown, "
                  "can not do migrate or resize."), fc_vm.uri)
            raise exception.InstanceFaultRollback

        LOG.info(_("begin power off vm ..."))

        # 1.stop vm
        self.stop_vm(instance)

        # 2.save flavor and vol info in vm
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        old_flavor = self._gen_old_flavor_for_fc(fc_vm)
        new_flavor = self._gen_new_flavor_for_fc(flavor)
        flavor = {'old_flavor': old_flavor, 'new_flavor': new_flavor}
        data = {
            'group':
            '%s:%s' % (constant.VM_GROUP_FLAG, jsonutils.dumps(flavor))
        }
        self.modify_vm(fc_vm, vm_config=data)
        LOG.info(_("save flavor info success."))

        # 3. check cpu mem changes
        flavor = None
        if self._check_if_need_modify_vm_spec(old_flavor, new_flavor):
            flavor = new_flavor

        data = self._generate_vm_spec_info(flavor=flavor)
        self.modify_vm(fc_vm, vm_config=data)
        LOG.info(_("modify cpu and mem success."))
Пример #42
0
        def _wait_done():
            """
            wait task result
            """

            task = self.get_task(task_uri)

            if task['status'] == "success":
                LOG.info(_("Task [%s] is successfully." % task_uri))
                ret['success'] = True
                raise loopingcall.LoopingCallDone()
            elif task['status'] == "failed":
                LOG.info(_("Task [%s] is failed, the reason is %s."),
                         task_uri, task['reasonDes'])
                ret['reason'] = task['reasonDes']
                raise loopingcall.LoopingCallDone()
            else:
                LOG.info(_("Task [%s] is running, the progress is %s."),
                         task_uri, task['progress'])
Пример #43
0
        def _wait_done():
            """
            wait task result
            """

            task = self.get_task(task_uri)

            if task['status'] == "success":
                LOG.info(_("Task [%s] is successfully." % task_uri))
                ret['success'] = True
                raise loopingcall.LoopingCallDone()
            elif task['status'] == "failed":
                LOG.info(_("Task [%s] is failed, the reason is %s."), task_uri,
                         task['reasonDes'])
                ret['reason'] = task['reasonDes']
                raise loopingcall.LoopingCallDone()
            else:
                LOG.info(_("Task [%s] is running, the progress is %s."),
                         task_uri, task['progress'])
Пример #44
0
    def stop_vm(self, instance):
        """Stop vm on FC

        :param instance:nova.objects.instance.Instance
        :return:
        """
        LOG.info(_("trying to stop vm: %s."), instance['name'])
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.STOPPED:
            LOG.info(_("vm has already stopped."))
            return
        elif fc_vm.status == constant.VM_STATUS.RUNNING \
        and fc_vm['pvDriverStatus'] == 'running':
            body = {'mode': 'safe'}
        else:
            body = {'mode': 'force'}

        self.post(fc_vm.get_vm_action_uri('stop'), data=body,
                  excp=exception.InstancePowerOffFailure)
        LOG.info(_("stop vm %s success"), fc_vm.name)
Пример #45
0
    def stop_vm(self, instance):
        """Stop vm on FC

        :param instance:nova.objects.instance.Instance
        :return:
        """
        LOG.info(_("trying to stop vm: %s."), instance['name'])
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.STOPPED:
            LOG.info(_("vm has already stopped."))
            return
        elif fc_vm.status == constant.VM_STATUS.RUNNING \
        and fc_vm['pvDriverStatus'] == 'running':
            body = {'mode': 'safe'}
        else:
            body = {'mode': 'force'}

        self.post(fc_vm.get_vm_action_uri('stop'),
                  data=body,
                  excp=exception.InstancePowerOffFailure)
        LOG.info(_("stop vm %s success"), fc_vm.name)
Пример #46
0
    def start_vm(self, instance):
        """Start vm on FC

        :param instance:nova.objects.instance.Instance
        :return:
        """
        LOG.info(_("trying to start vm: %s.") % instance['name'])

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status in [constant.VM_STATUS.STOPPED,
                            constant.VM_STATUS.SUSPENDED]:
            self._modify_boot_option_if_needed(instance, fc_vm)
            self.post(fc_vm.get_vm_action_uri('start'),
                      excp=exception.InstancePowerOnFailure)
            LOG.info(_("start vm %s success"), fc_vm.name)
        elif fc_vm.status == constant.VM_STATUS.RUNNING:
            LOG.info(_("vm has already running."))
        else:
            reason = _("vm status is %s and cannot be powered on.") % \
                     fc_vm.status
            raise exception.InstancePowerOnFailure(reason=reason)
Пример #47
0
    def start_vm(self, instance):
        """Start vm on FC

        :param instance:nova.objects.instance.Instance
        :return:
        """
        LOG.info(_("trying to start vm: %s.") % instance['name'])

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status in [
                constant.VM_STATUS.STOPPED, constant.VM_STATUS.SUSPENDED
        ]:
            self._modify_boot_option_if_needed(instance, fc_vm)
            self.post(fc_vm.get_vm_action_uri('start'),
                      excp=exception.InstancePowerOnFailure)
            LOG.info(_("start vm %s success"), fc_vm.name)
        elif fc_vm.status == constant.VM_STATUS.RUNNING:
            LOG.info(_("vm has already running."))
        else:
            reason = _("vm status is %s and cannot be powered on.") % \
                     fc_vm.status
            raise exception.InstancePowerOnFailure(reason=reason)
Пример #48
0
    def detach_interface(self, instance, vif):
        """
        Send message to fusion compute virtual machine

        :param instance:
        :param vif:
        :return: response : {"taskUrn": string, "taskUri": string}
        if the nic does not exited, return {} else {"taskUrn": string,
        "taskUri": string}
        """
        LOG.debug(
            _("trying to detach interface for vm name: %s,"
              "vm uuid: %s, vif information is %s"), instance['name'],
            instance['uuid'], vif)

        response = {}
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        nics = fc_vm["vmConfig"]["nics"]
        LOG.info("nics in FusionCompute is %s", nics)
        nic_uri = None
        for nic in nics:
            if nic['name'] == vif['id']:
                nic_uri = nic['uri']
                break

        if nic_uri:
            detach_interface_uri = (nic_uri.replace("nics", "virtualNics"))
            LOG.info("detach_interface_uri is %s", detach_interface_uri)
            response = self.delete(detach_interface_uri,
                                   excp=exception.InstanceInvalidState)
        else:
            LOG.warn(_("detach interface for vm name: %s, not exist nic."),
                     instance['name'])
        LOG.info(_('send detach interface finished, return is: %s'),
                 jsonutils.dumps(response))
        return response
Пример #49
0
    def reconfigure_affinity_group(self,
                                   instances,
                                   affinity_group,
                                   action,
                                   node=None):
        """

        :param instances:
        :param affinity_group:
        :param action:
        :param node:
        :return:
        """

        LOG.info(_("begin reconfigure affinity group ..."))

        # 1. all vms passed in should in the same cluster
        if node is None and len(instances) > 0:
            node = instances[0].get('node')

        if node is None:
            msg = _("Can not get any node info !")
            raise fc_exc.AffinityGroupException(reason=msg)

        for instance in instances:
            if node != instance.get('node'):
                msg = _("VMs cluster must be same !")
                raise fc_exc.AffinityGroupException(reason=msg)

        # 2. get fc cluster object
        cluster = self._cluster_ops.get_cluster_detail_by_nodename(node)
        if cluster is None:
            raise fc_exc.ClusterNotFound(cluster_name=node)

        # 3. do reconfigure
        rule_name = str(affinity_group.id)
        rule_type = constant.DRS_RULES_TYPE_MAP.get(affinity_group.type) or \
                    constant.DRS_RULES_TYPE_MAP['affinity']

        if action == 'remove':
            self._cluster_ops.delete_drs_rules(cluster, rule_name, rule_type)
            LOG.info(_("delete affinity group success and return"))
            return

        if action == 'add':
            self._cluster_ops.create_drs_rules(cluster, rule_name, rule_type)
            cluster = self._cluster_ops.get_cluster_detail_by_nodename(node)
            LOG.info(_("create affinity group success"))

        vms = []
        for instance in instances:
            instance['uuid'] = instance['name']
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
            vm_info = {'urn': fc_vm['urn'], 'name': fc_vm['name']}
            vms.append(vm_info)

        try:
            self._cluster_ops.\
                modify_drs_rules(cluster, rule_name, rule_type, vms)
        except Exception as exc:
            LOG.error(_("modify drs rules failed !"))
            if action == 'add':
                self._cluster_ops.\
                    delete_drs_rules(cluster, rule_name, rule_type)
            raise exc

        LOG.info(_("reconfigure affinity group success"))
Пример #50
0
    def reconfigure_affinity_group(self, instances, affinity_group, action,
                                   node=None):
        """

        :param instances:
        :param affinity_group:
        :param action:
        :param node:
        :return:
        """

        LOG.info(_("begin reconfigure affinity group ..."))

        # 1. all vms passed in should in the same cluster
        if node is None and len(instances) > 0:
            node = instances[0].get('node')

        if node is None:
            msg = _("Can not get any node info !")
            raise fc_exc.AffinityGroupException(reason=msg)

        for instance in instances:
            if node != instance.get('node'):
                msg = _("VMs cluster must be same !")
                raise fc_exc.AffinityGroupException(reason=msg)

        # 2. get fc cluster object
        cluster = self._cluster_ops.get_cluster_detail_by_nodename(node)
        if cluster is None:
            raise fc_exc.ClusterNotFound(cluster_name=node)

        # 3. do reconfigure
        rule_name = str(affinity_group.id)
        rule_type = constant.DRS_RULES_TYPE_MAP.get(affinity_group.type) or \
                    constant.DRS_RULES_TYPE_MAP['affinity']

        if action == 'remove':
            self._cluster_ops.delete_drs_rules(cluster, rule_name, rule_type)
            LOG.info(_("delete affinity group success and return"))
            return

        if action == 'add':
            self._cluster_ops.create_drs_rules(cluster, rule_name, rule_type)
            cluster = self._cluster_ops.get_cluster_detail_by_nodename(node)
            LOG.info(_("create affinity group success"))

        vms = []
        for instance in instances:
            instance['uuid'] = instance['name']
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
            vm_info = {
                'urn': fc_vm['urn'],
                'name': fc_vm['name']
            }
            vms.append(vm_info)

        try:
            self._cluster_ops.\
                modify_drs_rules(cluster, rule_name, rule_type, vms)
        except Exception as exc:
            LOG.error(_("modify drs rules failed !"))
            if action == 'add':
                self._cluster_ops.\
                    delete_drs_rules(cluster, rule_name, rule_type)
            raise exc

        LOG.info(_("reconfigure affinity group success"))