Example #1
0
    def _modify_boot_option_if_needed(self, instance, fc_vm):
        """

        :param instance: OpenStack instance object
        :param fc_vm: FusionCompute vm object
        :return:
        """

        new_boot_option = utils.get_boot_option_from_metadata(
            instance.get('metadata'))

        old_boot_option = None
        if 'vmConfig' in fc_vm:
            vm_property = fc_vm['vmConfig'].get('properties')
            old_boot_option = vm_property.get('bootOption') if vm_property \
                              else None

        if new_boot_option and old_boot_option and \
           new_boot_option != old_boot_option:
            LOG.info(_("trying to modify boot option from %s to %s") %
                     (old_boot_option, new_boot_option))
            body = {
                'properties':{
                    'bootOption': new_boot_option
                }
            }
            try:
                self.modify_vm(instance, vm_config=body)
            except Exception as msg:
                LOG.error(_("modify boot option has exception: %s") % msg)
Example #2
0
    def _modify_boot_option_if_needed(self, instance, fc_vm):
        """

        :param instance: OpenStack instance object
        :param fc_vm: FusionCompute vm object
        :return:
        """

        new_boot_option = utils.get_boot_option_from_metadata(
            instance.get('metadata'))

        old_boot_option = None
        if 'vmConfig' in fc_vm:
            vm_property = fc_vm['vmConfig'].get('properties')
            old_boot_option = vm_property.get('bootOption') if vm_property \
                              else None

        if new_boot_option and old_boot_option and \
           new_boot_option != old_boot_option:
            LOG.info(
                _("trying to modify boot option from %s to %s") %
                (old_boot_option, new_boot_option))
            body = {'properties': {'bootOption': new_boot_option}}
            try:
                self.modify_vm(instance, vm_config=body)
            except Exception as msg:
                LOG.error(_("modify boot option has exception: %s") % msg)
Example #3
0
        def inner(*args, **kwargs):
            """
            inner function

            :param args: the list format args of function that will
            be decorated
            :param kwargs: the dict format args of function that will
            be decorated
            :return:
            """
            try:
                resp = func(*args, **kwargs)
            except fc_exc.RequestError as req_exc:
                if exc:
                    raise exc(str(req_exc.kwargs['reason']))
                raise req_exc

            if isinstance(resp, dict) and resp.get('taskUri'):
                success, reason = task_ops.wait_task_done(resp['taskUri'])
                if not success:
                    LOG.error(_('task failed: %s'), reason)
                    if exc:
                        raise exc(str(reason))
                    raise fc_exc.FusionComputeTaskException(reason=reason)

            return resp
Example #4
0
        def inner(*args, **kwargs):
            """
            inner function

            :param args: the list format args of function that will
            be decorated
            :param kwargs: the dict format args of function that will
            be decorated
            :return:
            """
            try:
                resp = func(*args, **kwargs)
            except fc_exc.RequestError as req_exc:
                if exc:
                    raise exc(str(req_exc.kwargs['reason']))
                raise req_exc

            if isinstance(resp, dict) and resp.get('taskUri'):
                success, reason = task_ops.wait_task_done(resp['taskUri'])
                if not success:
                    LOG.error(_('task failed: %s'), reason)
                    if exc:
                        raise exc(str(reason))
                    raise fc_exc.FusionComputeTaskException(reason=reason)

            return resp
Example #5
0
    def delete_vm(self,
                  context,
                  instance,
                  block_device_info=None,
                  destroy_disks=True):
        """Delete VM on FC

        :param context:
        :param instance:
        :param block_device_info:
        :param destroy_disks:
        :return:
        """

        # if revert resize, only stop vm. when resize operation
        # task state will be resize_reverting or resize_confirming
        if instance and (instance.get('task_state') == 'resize_reverting'
                         or instance.get('task_state') == 'resize_confirming'):
            LOG.info(_('revert resize now, here only stop vm.'))
            try:
                self.stop_vm(instance)
            except Exception as e:
                LOG.warn(_('stop vm failed, trigger rollback'))
                raise exception.InstanceFaultRollback(inner_exception=e)
            return

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
        except exception.InstanceNotFound:
            LOG.warn(_('instance exist no more. ignore this deleting.'))
            return

        # detach volume created by cinder
        if block_device_info:
            LOG.info(_('now will stop vm before detach cinder volumes.'))
            self.stop_vm(instance)
            for vol in block_device_info['block_device_mapping']:
                self.detach_volume(vol['connection_info'], instance)

        # if vm is in fault-resuming or unknown status, stop it before delete
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(
                _("vm %s status is fault-resuming or unknown, "
                  "stop it before delete."), fc_vm.uri)
            self.stop_vm(instance)

        self._delete_vm_with_fc_vm(fc_vm, destroy_disks)

        # update affinity group info if needed
        try:
            self._update_drs_rules(instance)
            self._update_affinity_groups(context, instance)
        #ignore pylint:disable=W0703
        except Exception as excp:
            utils.log_exception(excp)
            LOG.error(_('update affinity group info failed !'))
Example #6
0
    def change_instance_info(self, instance):

        LOG.info(_("trying to change instance display_name = %s"),
                 instance['display_name'])

        body = {'name': instance['display_name']}
        try:
            self.modify_vm(instance, vm_config=body)
        except Exception as msg:
            LOG.error(_("change_instance_info has exception, msg = %s") % msg)
Example #7
0
 def check_input(self):
     """
     check function input params
     :return:
     """
     os_option = self.get_os_options()
     LOG.debug(_('os option: %s .'), jsonutils.dumps(os_option))
     if not (os_option['osType'] and os_option['osVersion']):
         LOG.error('Invalid os option for vm %s!', self._instance['name'])
         raise fc_exc.InvalidOsOption()
Example #8
0
    def change_instance_info(self, instance):

        LOG.info(_("trying to change instance display_name = %s"),
                 instance['display_name'])

        body = {'name':instance['display_name']}
        try:
            self.modify_vm(instance,vm_config=body)
        except Exception as msg:
            LOG.error(_("change_instance_info has exception, msg = %s")
                  % msg)
Example #9
0
    def audit_pg(self):
        context = nova_ctxt.get_admin_context()
        networks = self._neutron.get_all(context=context)

        if len(self.dvs_mapping) == 0:
            self._init_all_fc_dvs()

        pg_list = self.query_all_pg()
        for pg in pg_list:
            pg_name_ayn_list = []
            try:
                pg_name_ayn_list = re.split('#', pg['name'])
            except:
                pass
            if len(pg_name_ayn_list) != 3:
                continue

            fc_network_name = pg_name_ayn_list[0]
            fc_network_id = pg_name_ayn_list[1]
            fc_dvs_id = pg_name_ayn_list[2]
            pg_id = self._get_pg_id_pg_date(pg)

            if fc_network_name is None \
                    or fc_network_id is None\
                    or fc_dvs_id is None\
                    or pg_id is None:
                continue

            if fc_dvs_id not in self.dvs_mapping.values():
                continue
            pg_user = pg.get('userName')
            if pg_user is None:
                continue
            if pg_user != constant.CONF.fusioncompute.fc_user:
                continue

            is_need_remove = True
            for network in networks:
                if network['name'] == fc_network_name \
                        and network['id'] == fc_network_id:
                    is_need_remove = False
                    break

            if is_need_remove:
                try:
                    self.del_port_group(fc_dvs_id, pg_id)
                    LOG.warn('port group remove dvs_id=%s,ps_id=%s', fc_dvs_id,
                             pg_id)
                except Exception:
                    LOG.error(
                        'Error happen while delete port group remove '
                        'dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id)
                    pass
Example #10
0
    def check_input(self):
        """
        create vm image detail check
        :return:
        """
        super(VmCreateWithImage, self).check_input()

        disk_quantity_gb = self._instance['root_gb']
        image_size = self._get_image_size()
        if image_size > disk_quantity_gb:
            LOG.error(_("image is larger than sys-vol."))
            raise fc_exc.ImageTooLarge
Example #11
0
    def delete_vm(self, context, instance, block_device_info=None,
                  destroy_disks=True):
        """Delete VM on FC

        :param context:
        :param instance:
        :param block_device_info:
        :param destroy_disks:
        :return:
        """

        # if revert resize, only stop vm. when resize operation
        # task state will be resize_reverting or resize_confirming
        if instance and (instance.get('task_state') == 'resize_reverting'
                         or instance.get('task_state') == 'resize_confirming'):
            LOG.info(_('revert resize now, here only stop vm.'))
            try:
                self.stop_vm(instance)
            except Exception as e:
                LOG.warn(_('stop vm failed, trigger rollback'))
                raise exception.InstanceFaultRollback(inner_exception=e)
            return

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
        except exception.InstanceNotFound:
            LOG.warn(_('instance exist no more. ignore this deleting.'))
            return

        # detach volume created by cinder
        if block_device_info:
            LOG.info(_('now will stop vm before detach cinder volumes.'))
            self.stop_vm(instance)
            for vol in block_device_info['block_device_mapping']:
                self.detach_volume(vol['connection_info'], instance)

        # if vm is in fault-resuming or unknown status, stop it before delete
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(_("vm %s status is fault-resuming or unknown, "
                        "stop it before delete."), fc_vm.uri)
            self.stop_vm(instance)

        self._delete_vm_with_fc_vm(fc_vm, destroy_disks)

        # update affinity group info if needed
        try:
            self._update_drs_rules(instance)
            self._update_affinity_groups(context, instance)
        #ignore pylint:disable=W0703
        except Exception as excp:
            utils.log_exception(excp)
            LOG.error(_('update affinity group info failed !'))
Example #12
0
    def check_input(self):
        super(VmCreateWithTemplate, self).check_input()

        properties = self._image_meta.get('properties')
        if properties:
            try:
                self._cloned_source_vm_or_tpl = \
                    self._get_vm_by_template_url(
                        properties.get(constant.HUAWEI_IMAGE_LOCATION))
                self._validate_template(self._cloned_source_vm_or_tpl)
            except Exception:
                LOG.error(_("Invalid FusionCompute template !"))
                raise fc_exc.InstanceCloneFailure
Example #13
0
    def audit_pg(self):
        context = nova_ctxt.get_admin_context()
        networks = self._neutron.get_all(context=context)

        if len(self.dvs_mapping)==0:
            self._init_all_fc_dvs()

        pg_list = self.query_all_pg()
        for pg in pg_list:
            pg_name_ayn_list = []
            try:
                pg_name_ayn_list = re.split('#', pg['name'])
            except:
                pass
            if len(pg_name_ayn_list)!=3:
                continue

            fc_network_name = pg_name_ayn_list[0]
            fc_network_id = pg_name_ayn_list[1]
            fc_dvs_id = pg_name_ayn_list[2]
            pg_id = self._get_pg_id_pg_date(pg)
            
            if fc_network_name is None \
                    or fc_network_id is None\
                    or fc_dvs_id is None\
                    or pg_id is None:
                continue

            if fc_dvs_id not in self.dvs_mapping.values():
                continue
            pg_user = pg.get('userName')
            if pg_user is None:
                continue
            if pg_user != constant.CONF.fusioncompute.fc_user:
                continue

            is_need_remove = True
            for network in networks:
                if network['name'] == fc_network_name \
                        and network['id'] == fc_network_id:
                    is_need_remove = False
                    break
            
            if is_need_remove:
                try:
                    self.del_port_group(fc_dvs_id, pg_id)
                    LOG.warn('port group remove dvs_id=%s,ps_id=%s',fc_dvs_id,pg_id)
                except Exception:
                    LOG.error('Error happen while delete port group remove '
                              'dvs_id=%s,ps_id=%s', fc_dvs_id, pg_id)
                    pass
Example #14
0
    def change_instance_metadata(self, instance):
        """

        :param instance:
        :return:
        """
        LOG.info(_("trying to change metadata for vm: %s.") % instance['name'])

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
            self._modify_boot_option_if_needed(instance, fc_vm)
        #ignore pylint:disable=W0703
        except Exception as msg:
            LOG.error(_("change_instance_metadata has exception, msg = %s")
                      % msg)
Example #15
0
    def change_instance_metadata(self, instance):
        """

        :param instance:
        :return:
        """
        LOG.info(_("trying to change metadata for vm: %s.") % instance['name'])

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
            self._modify_boot_option_if_needed(instance, fc_vm)
        #ignore pylint:disable=W0703
        except Exception as msg:
            LOG.error(
                _("change_instance_metadata has exception, msg = %s") % msg)
 def _get_fc_vm(self, vm_info, limit=1, offset=0, detail=2, **kwargs):
     """
     get fv vm info by conditions
     :param vm_info:
     :param limit:
     :param offset:
     :param detail:
     :param kwargs:
     :return:
     """
     instances = self._query_vm(limit=limit, offset=offset, detail=detail,
                               **kwargs)
     if not instances or not instances['vms']:
         LOG.error(_("can not find instance %s."), vm_info)
         raise exception.InstanceNotFound(instance_id=vm_info)
     return FCInstance(instances['vms'][0])
Example #17
0
    def suspend_vm(self, instance):
        """suspend vm on FC

        :param instance:nova.objects.instance.Instance
        :return:
        """

        LOG.info(_("trying to suspend vm: %s."), instance['name'])
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.RUNNING:
            self.post(fc_vm.get_vm_action_uri('suspend'),
                      excp=exception.InstanceFaultRollback)
            LOG.info(_("suspend vm %s success"), fc_vm.name)
        else:
            LOG.error(_("error vm status: %s.") % fc_vm.status)
            raise exception.InstanceFaultRollback
Example #18
0
    def suspend_vm(self, instance):
        """suspend vm on FC

        :param instance:nova.objects.instance.Instance
        :return:
        """

        LOG.info(_("trying to suspend vm: %s."), instance['name'])
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.RUNNING:
            self.post(fc_vm.get_vm_action_uri('suspend'),
                      excp=exception.InstanceFaultRollback)
            LOG.info(_("suspend vm %s success"), fc_vm.name)
        else:
            LOG.error(_("error vm status: %s.") % fc_vm.status)
            raise exception.InstanceFaultRollback
Example #19
0
 def _get_fc_vm(self, vm_info, limit=1, offset=0, detail=2, **kwargs):
     """
     get fv vm info by conditions
     :param vm_info:
     :param limit:
     :param offset:
     :param detail:
     :param kwargs:
     :return:
     """
     instances = self._query_vm(limit=limit,
                                offset=offset,
                                detail=detail,
                                **kwargs)
     if not instances or not instances['vms']:
         LOG.error(_("can not find instance %s."), vm_info)
         raise exception.InstanceNotFound(instance_id=vm_info)
     return FCInstance(instances['vms'][0])
Example #20
0
    def snapshot(self, context, instance, image_href, update_task_state):
        """
        Create sys vol image and upload to glance
        :param instance:
        :param image_href:
        :param update_task_state:
        :return:
        """

        if not constant.CONF.fusioncompute.fc_image_path:
            LOG.error(_("config option fc_image_path is None."))
            raise fc_exc.InvalidImageDir()

        # 0.get image service and image id
        _image_service = glance.get_remote_image_service(context, image_href)
        snapshot_image_service, image_id = _image_service

        # 1.import sys vol to nfs dir
        LOG.info(_("begin uploading sys vol to glance ..."))
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        sys_vol = self._get_sys_vol_from_vm_info(fc_vm)
        if not sys_vol:
            raise exception.DiskNotFound(_("can not find sys volume."))

        update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
        self._volume_ops.create_image_from_volume(self.site.volume_uri,
                                                  sys_vol,
                                                  image_id)

        # 2.update image metadata
        LOG.info(_("begin update image metadata ..."))
        update_task_state(task_state=task_states.IMAGE_UPLOADING,
                          expected_state=task_states.IMAGE_PENDING_UPLOAD)

        name = snapshot_image_service.show(context, image_id).get('name')
        location = self._generate_image_location(image_id)
        metadata = self._generate_image_metadata(name,
                                                 location,
                                                 fc_vm,
                                                 instance)
        snapshot_image_service.update(context, image_id, metadata)
Example #21
0
    def get_available_resource(self, nodename):
        """Retrieve resource info.

        This method is called when nova-compute launches, and
        as part of a periodic task.

        :returns: dictionary describing resources
        """
        LOG.debug(_("get_available_resource, nodename: %s ." % nodename))
        cluster_name = self.get_cluster_name_by_nodename(nodename)
        cluster_resource = self.get_cluster_resource(cluster_name)
        if not cluster_resource:
            LOG.error(_("Invalid cluster name : %s"), nodename)
            return {}

        cluster_resource['cpu_info'] = \
        jsonutils.dumps(cluster_resource['cpu_info'])
        cluster_resource['supported_instances'] = jsonutils.dumps(
            cluster_resource['supported_instances'])

        LOG.debug("the resource status is %s", cluster_resource)
        return cluster_resource
Example #22
0
    def get_available_resource(self, nodename):
        """Retrieve resource info.

        This method is called when nova-compute launches, and
        as part of a periodic task.

        :returns: dictionary describing resources
        """
        LOG.debug(_("get_available_resource, nodename: %s ." % nodename))
        cluster_name = self.get_cluster_name_by_nodename(nodename)
        cluster_resource = self.get_cluster_resource(cluster_name)
        if not cluster_resource:
            LOG.error(_("Invalid cluster name : %s"), nodename)
            return {}

        cluster_resource['cpu_info'] = \
        jsonutils.dumps(cluster_resource['cpu_info'])
        cluster_resource['supported_instances'] = jsonutils.dumps(
            cluster_resource['supported_instances'])

        LOG.debug("the resource status is %s", cluster_resource)
        return cluster_resource
Example #23
0
    def snapshot(self, context, instance, image_href, update_task_state):
        """
        Create sys vol image and upload to glance
        :param instance:
        :param image_href:
        :param update_task_state:
        :return:
        """

        if not constant.CONF.fusioncompute.fc_image_path:
            LOG.error(_("config option fc_image_path is None."))
            raise fc_exc.InvalidImageDir()

        # 0.get image service and image id
        _image_service = glance.get_remote_image_service(context, image_href)
        snapshot_image_service, image_id = _image_service

        # 1.import sys vol to nfs dir
        LOG.info(_("begin uploading sys vol to glance ..."))
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        sys_vol = self._get_sys_vol_from_vm_info(fc_vm)
        if not sys_vol:
            raise exception.DiskNotFound(_("can not find sys volume."))

        update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD)
        self._volume_ops.create_image_from_volume(self.site.volume_uri,
                                                  sys_vol, image_id)

        # 2.update image metadata
        LOG.info(_("begin update image metadata ..."))
        update_task_state(task_state=task_states.IMAGE_UPLOADING,
                          expected_state=task_states.IMAGE_PENDING_UPLOAD)

        name = snapshot_image_service.show(context, image_id).get('name')
        location = self._generate_image_location(image_id)
        metadata = self._generate_image_metadata(name, location, fc_vm,
                                                 instance)
        snapshot_image_service.update(context, image_id, metadata)
Example #24
0
    def _raise_if_not_in_status_ok(self, rsp):
        """
        if response is not normal,rasise exception
        :param rsp:
        :return:
        """
        if rsp.status_code not in self.STATUS_OK:
            error_info = {}
            try:
                error_info = rsp.json()
            #ignore pylint:disable=W0703
            except Exception as excp:
                LOG.warn('try to get error response content failed: %s', excp)

            LOG.error(
                _('FC request error: <status_code> %s <reason> '
                  '%s <url> %s <errorcode> %s <errorDes> %s'), rsp.status_code,
                rsp.reason, rsp.url, error_info.get('errorCode', 'unknown'),
                error_info.get('errorDes', 'unknown'))

            raise exception.RequestError(
                reason=error_info.get('errorDes'),
                error_code=error_info.get('errorCode'))
Example #25
0
    def _raise_if_not_in_status_ok(self, rsp):
        """
        if response is not normal,rasise exception
        :param rsp:
        :return:
        """
        if rsp.status_code not in self.STATUS_OK:
            error_info = {}
            try:
                error_info = rsp.json()
            #ignore pylint:disable=W0703
            except Exception as excp:
                LOG.warn('try to get error response content failed: %s', excp)

            LOG.error(_('FC request error: <status_code> %s <reason> '
                        '%s <url> %s <errorcode> %s <errorDes> %s'),
                      rsp.status_code, rsp.reason, rsp.url,
                      error_info.get('errorCode', 'unknown'),
                      error_info.get('errorDes', 'unknown'))

            raise exception.RequestError(reason=error_info.get('errorDes'),
                                         error_code=error_info.get('errorCode')
                                        )
Example #26
0
    def _update_drs_rules(self, instance):
        """

        :param instance:
        :return:
        """

        node = instance.get('node')
        if node is None:
            LOG.error(_('failed to get node info from instance'))
            return

        cluster = self._cluster_ops.get_cluster_detail_by_nodename(node)
        if cluster is None:
            LOG.error(_('failed to get cluster info by node: %s'), node)
            return

        drs_rules = cluster['drsSetting']['drsRules']
        for drs_rule in drs_rules:
            if len(drs_rule['vms']) < 2:
                rule_name = str(drs_rule['ruleName'])
                rule_type = drs_rule['ruleType']
                self._cluster_ops.\
                    delete_drs_rules(cluster, rule_name, rule_type)
Example #27
0
    def _update_drs_rules(self, instance):
        """

        :param instance:
        :return:
        """

        node = instance.get('node')
        if node is None:
            LOG.error(_('failed to get node info from instance'))
            return

        cluster = self._cluster_ops.get_cluster_detail_by_nodename(node)
        if cluster is None:
            LOG.error(_('failed to get cluster info by node: %s'), node)
            return

        drs_rules = cluster['drsSetting']['drsRules']
        for drs_rule in drs_rules:
            if len(drs_rule['vms']) < 2:
                rule_name = str(drs_rule['ruleName'])
                rule_type = drs_rule['ruleType']
                self._cluster_ops.\
                    delete_drs_rules(cluster, rule_name, rule_type)
Example #28
0
def get_vm_create(fc_client, task_ops, instance, image_meta=None):
    """get create vm object"""
    if instance.get('image_ref'):
        image_type = None
        if image_meta:
            properties = image_meta.get('properties')
            if properties:
                image_type = properties.get(constant.HUAWEI_IMAGE_TYPE)

        if image_type == 'nfs':
            vm_class = VmCreateWithNfsImage
        elif image_type == 'uds':
            vm_class = VmCreateWithUdsImage
        elif image_type == 'template':
            vm_class = VmCreateWithTemplate
        elif image_type == 'glance' or image_type == None:
            vm_class = VmCreateWithGlanceImage
        else:
            LOG.error(_("image type is error %s."), image_type)
            raise fc_exc.InvalidImageDir
    else:
        vm_class = VmCreateWithVolume

    return vm_class(fc_client, task_ops, instance)
Example #29
0
    def reconfigure_affinity_group(self,
                                   instances,
                                   affinity_group,
                                   action,
                                   node=None):
        """

        :param instances:
        :param affinity_group:
        :param action:
        :param node:
        :return:
        """

        LOG.info(_("begin reconfigure affinity group ..."))

        # 1. all vms passed in should in the same cluster
        if node is None and len(instances) > 0:
            node = instances[0].get('node')

        if node is None:
            msg = _("Can not get any node info !")
            raise fc_exc.AffinityGroupException(reason=msg)

        for instance in instances:
            if node != instance.get('node'):
                msg = _("VMs cluster must be same !")
                raise fc_exc.AffinityGroupException(reason=msg)

        # 2. get fc cluster object
        cluster = self._cluster_ops.get_cluster_detail_by_nodename(node)
        if cluster is None:
            raise fc_exc.ClusterNotFound(cluster_name=node)

        # 3. do reconfigure
        rule_name = str(affinity_group.id)
        rule_type = constant.DRS_RULES_TYPE_MAP.get(affinity_group.type) or \
                    constant.DRS_RULES_TYPE_MAP['affinity']

        if action == 'remove':
            self._cluster_ops.delete_drs_rules(cluster, rule_name, rule_type)
            LOG.info(_("delete affinity group success and return"))
            return

        if action == 'add':
            self._cluster_ops.create_drs_rules(cluster, rule_name, rule_type)
            cluster = self._cluster_ops.get_cluster_detail_by_nodename(node)
            LOG.info(_("create affinity group success"))

        vms = []
        for instance in instances:
            instance['uuid'] = instance['name']
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
            vm_info = {'urn': fc_vm['urn'], 'name': fc_vm['name']}
            vms.append(vm_info)

        try:
            self._cluster_ops.\
                modify_drs_rules(cluster, rule_name, rule_type, vms)
        except Exception as exc:
            LOG.error(_("modify drs rules failed !"))
            if action == 'add':
                self._cluster_ops.\
                    delete_drs_rules(cluster, rule_name, rule_type)
            raise exc

        LOG.info(_("reconfigure affinity group success"))
Example #30
0
    def reconfigure_affinity_group(self, instances, affinity_group, action,
                                   node=None):
        """

        :param instances:
        :param affinity_group:
        :param action:
        :param node:
        :return:
        """

        LOG.info(_("begin reconfigure affinity group ..."))

        # 1. all vms passed in should in the same cluster
        if node is None and len(instances) > 0:
            node = instances[0].get('node')

        if node is None:
            msg = _("Can not get any node info !")
            raise fc_exc.AffinityGroupException(reason=msg)

        for instance in instances:
            if node != instance.get('node'):
                msg = _("VMs cluster must be same !")
                raise fc_exc.AffinityGroupException(reason=msg)

        # 2. get fc cluster object
        cluster = self._cluster_ops.get_cluster_detail_by_nodename(node)
        if cluster is None:
            raise fc_exc.ClusterNotFound(cluster_name=node)

        # 3. do reconfigure
        rule_name = str(affinity_group.id)
        rule_type = constant.DRS_RULES_TYPE_MAP.get(affinity_group.type) or \
                    constant.DRS_RULES_TYPE_MAP['affinity']

        if action == 'remove':
            self._cluster_ops.delete_drs_rules(cluster, rule_name, rule_type)
            LOG.info(_("delete affinity group success and return"))
            return

        if action == 'add':
            self._cluster_ops.create_drs_rules(cluster, rule_name, rule_type)
            cluster = self._cluster_ops.get_cluster_detail_by_nodename(node)
            LOG.info(_("create affinity group success"))

        vms = []
        for instance in instances:
            instance['uuid'] = instance['name']
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
            vm_info = {
                'urn': fc_vm['urn'],
                'name': fc_vm['name']
            }
            vms.append(vm_info)

        try:
            self._cluster_ops.\
                modify_drs_rules(cluster, rule_name, rule_type, vms)
        except Exception as exc:
            LOG.error(_("modify drs rules failed !"))
            if action == 'add':
                self._cluster_ops.\
                    delete_drs_rules(cluster, rule_name, rule_type)
            raise exc

        LOG.info(_("reconfigure affinity group success"))