Пример #1
0
    def create_drs_rules(self, cluster, rule_name, rule_type):
        """

        :param cluster:
        :param rule_name:
        :param rule_type:
        :return:
        """

        rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type)
        if rule:
            LOG.debug(_("drs rules %s already exists"), rule_name)
            return

        body = {
            'drsSetting': {
                'drsRules': [{
                    'operationType':
                    constant.DRS_RULES_OP_TYPE_MAP['create'],
                    'ruleName':
                    rule_name,
                    'ruleType':
                    rule_type
                }]
            }
        }
        self._modify_cluster(cluster, body)
        LOG.debug(_("create drs rules %s succeed"), rule_name)
Пример #2
0
    def _request(self, method, path, data=None, headers=None, **kwargs):
        """
        send request msg
        :param method:
        :param path:
        :param data:
        :param headers:
        :param kwargs:
        :return:
        """

        url = self._to_url(path)

        if not data:
            data = jsonutils.dumps({})
        elif isinstance(data, dict) or isinstance(data, list):
            data = jsonutils.dumps(data)

        try:
            data_for_log = copy.deepcopy(jsonutils.loads(data))
            utils.drop_password_key(data_for_log)
            LOG.debug(_('request: %s %s %s'), method, url,
                jsonutils.dumps(data_for_log))
        except Exception:
            LOG.debug(_('request: %s %s'), method, url)

        rsp = requests.request(method, url, data=data, headers=headers,
                               verify=False, **kwargs)
        return rsp
Пример #3
0
    def _generate_vm_spec_info(self, location=None, flavor=None):
        """
        Generate the vm spec info for cole migration
        :param location:
        :param flavor:
        :return:
        """

        data = {}
        if location:
            data['location'] = location
        if flavor:
            if flavor.get('vcpus'):
                data['cpu'] = {
                    'quantity':flavor.get('vcpus')
                }

            if flavor.get('memory_mb'):
                data['memory'] = {
                    'quantityMB':flavor.get('memory_mb')
                }

            cpu_qos = utils.dict_filter_and_convert(flavor,
                                                    constant.CPU_QOS_FC_KEY,
                                                    constant.CPU_QOS_FC_KEY)
            if data.get('cpu', None):
                data['cpu'] = utils.dict_add(data['cpu'], cpu_qos)
            else:
                data['cpu'] = cpu_qos

        LOG.debug(_("vm spec data: %s.") % jsonutils.dumps(data))
        return data
Пример #4
0
    def attach_interface(self, instance, vif):
        """
        Send message to fusion compute virtual machine

        :param instance:
        :param vif:
        :return: response : {"taskUrn": string, "taskUri": string}
        """
        LOG.debug(_("trying to attach interface, vm name: %s,"
                    "vm uuid: %s, vif info: %s"), instance['name'],
                  instance['uuid'], vif)

        pg_urn = self._network_ops.ensure_network(vif['network'])
        vsp_body = {
            'name': vif['id'],
            'portId': vif['id'],
            'portGroupUrn': pg_urn,
            'mac': vif['address']
        }
        LOG.info("the vsp information is %s", vsp_body)

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        attach_interface_uri = fc_vm.get_vm_action_uri('nics')

        response = self.post(attach_interface_uri,
                             data=vsp_body,
                             excp=exception.InterfaceAttachFailed)
        LOG.info('send attach interface finished, return is: %s',
                 jsonutils.dumps(response))
        return response
Пример #5
0
    def modify_drs_rules(self, cluster, rule_name, rule_type, vms):
        """

        :param cluster:
        :param rule_name:
        :param rule_type:
        :param vms:
        :return:
        """

        rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type)
        if rule is None:
            msg = _("Can not find drs rules: name=%s, type=%d") % \
                  (rule_name, rule_type)
            raise fc_exc.AffinityGroupException(reason=msg)

        body = {
            'drsSetting':{
                'drsRules':[{
                    'operationType': constant.DRS_RULES_OP_TYPE_MAP['modify'],
                    'ruleIndex': rule['ruleIndex'],
                    'ruleName': rule_name,
                    'ruleType': rule_type,
                    'vms': vms
                    }]
            }
        }
        self._modify_cluster(cluster, body)
        LOG.debug(_("modify drs rules %s succeed"), rule_name)
Пример #6
0
    def detach_interface(self, instance, vif):
        """
        Send message to fusion compute virtual machine

        :param instance:
        :param vif:
        :return: response : {"taskUrn": string, "taskUri": string}
        if the nic does not exited, return {} else {"taskUrn": string,
        "taskUri": string}
        """
        LOG.debug(_("trying to detach interface for vm name: %s,"
                    "vm uuid: %s, vif information is %s"), instance['name'],
                  instance['uuid'], vif)

        response = {}
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        nics = fc_vm["vmConfig"]["nics"]
        LOG.info("nics in FusionCompute is %s", nics)
        nic_uri = None
        for nic in nics:
            if nic['name'] == vif['id']:
                nic_uri = nic['uri']
                break

        if nic_uri:
            detach_interface_uri = (nic_uri.replace("nics", "virtualNics"))
            LOG.info("detach_interface_uri is %s", detach_interface_uri)
            response = self.delete(detach_interface_uri,
                                   excp=exception.InstanceInvalidState)
        else:
            LOG.warn(_("detach interface for vm name: %s, not exist nic."),
                     instance['name'])
        LOG.info(_('send detach interface finished, return is: %s'),
                 jsonutils.dumps(response))
        return response
Пример #7
0
    def live_migration(self, instance_ref, nodename):
        """Live migration of an instance to another host.

        :param instance_ref:
            nova.db.sqlalchemy.models.Instance object
            instance object that is migrated.
        :param nodename: destination node name

        """
        LOG.info(_("trying to migrate vm: %s.") % instance_ref['name'])

        # get destination cluster urn
        cluster_urn = self._cluster_ops.get_cluster_urn_by_nodename(nodename)
        if not cluster_urn:
            raise fc_exc.ClusterNotFound(cluster_name=nodename)
        LOG.debug(_("get cluster urn: %s."), cluster_urn)

        # generate migrate url and post msg to FC
        body = {
            'location': cluster_urn
        }
        fc_vm = FC_MGR.get_vm_by_uuid(instance_ref)
        self.post(fc_vm.get_vm_action_uri('migrate'), data=body,
                  excp=exception.MigrationError)
        LOG.info(_("migrate vm %s success" % fc_vm.name))
Пример #8
0
    def _request(self, method, path, data=None, headers=None, **kwargs):
        """
        send request msg
        :param method:
        :param path:
        :param data:
        :param headers:
        :param kwargs:
        :return:
        """

        url = self._to_url(path)

        if not data:
            data = jsonutils.dumps({})
        elif isinstance(data, dict) or isinstance(data, list):
            data = jsonutils.dumps(data)

        try:
            data_for_log = copy.deepcopy(jsonutils.loads(data))
            utils.drop_password_key(data_for_log)
            LOG.debug(_('request: %s %s %s'), method, url,
                      jsonutils.dumps(data_for_log))
        except Exception:
            LOG.debug(_('request: %s %s'), method, url)

        rsp = requests.request(method,
                               url,
                               data=data,
                               headers=headers,
                               verify=False,
                               **kwargs)
        return rsp
Пример #9
0
    def get_cpu_usage(self, monitor_period, cluster_urn):
        end_time = self.get_fc_current_time()
        start_time = end_time - (monitor_period * 2)

        body = [{
            "startTime": str(start_time),
            "endTime": str(end_time),
            "interval": str(monitor_period),
            "metricId": "cpu_usage",
            "urn": cluster_urn
        }]

        LOG.debug("get_cpu_usage body:%s", json.dumps(body))
        response = self.fc_client.post(self.site.metric_curvedata_uri,
                                       data=body)
        LOG.debug("get_cpu_usage body:%s response:%s", json.dumps(body),
                  json.dumps(response))
        if response:
            if len(response["items"]) > 0:
                metric_value = response["items"][0]["metricValue"]
                if len(metric_value) > 0:
                    value = metric_value[0]["value"]
                    if len(metric_value) is 2:
                        if metric_value[1]["value"] is not None:
                            value = metric_value[1]["value"]
                    return value
        return None
Пример #10
0
    def delete_drs_rules(self, cluster, rule_name, rule_type):
        """

        :param cluster:
        :param rule_name:
        :param rule_type:
        :return:
        """

        rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type)
        if rule is None:
            LOG.debug(_("drs rules %s not exists"), rule_name)
            return

        body = {
            'drsSetting': {
                'drsRules': [{
                    'operationType':
                    constant.DRS_RULES_OP_TYPE_MAP['delete'],
                    'ruleIndex':
                    rule['ruleIndex']
                }]
            }
        }
        self._modify_cluster(cluster, body)
        LOG.debug(_("delete drs rules %s succeed"), rule_name)
Пример #11
0
    def attach_interface(self, instance, vif):
        """
        Send message to fusion compute virtual machine

        :param instance:
        :param vif:
        :return: response : {"taskUrn": string, "taskUri": string}
        """
        LOG.debug(
            _("trying to attach interface, vm name: %s,"
              "vm uuid: %s, vif info: %s"), instance['name'], instance['uuid'],
            vif)

        pg_urn = self._network_ops.ensure_network(vif['network'])
        vsp_body = {
            'name': vif['id'],
            'portId': vif['id'],
            'portGroupUrn': pg_urn,
            'mac': vif['address']
        }
        LOG.info("the vsp information is %s", vsp_body)

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        attach_interface_uri = fc_vm.get_vm_action_uri('nics')

        response = self.post(attach_interface_uri,
                             data=vsp_body,
                             excp=exception.InterfaceAttachFailed)
        LOG.info('send attach interface finished, return is: %s',
                 jsonutils.dumps(response))
        return response
Пример #12
0
    def create_drs_rules(self, cluster, rule_name, rule_type):
        """

        :param cluster:
        :param rule_name:
        :param rule_type:
        :return:
        """

        rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type)
        if rule:
            LOG.debug(_("drs rules %s already exists"), rule_name)
            return

        body = {
            'drsSetting':{
                'drsRules':[{
                    'operationType': constant.DRS_RULES_OP_TYPE_MAP['create'],
                    'ruleName': rule_name,
                    'ruleType': rule_type
                    }]
            }
        }
        self._modify_cluster(cluster, body)
        LOG.debug(_("create drs rules %s succeed"), rule_name)
Пример #13
0
    def ensure_network(self, network_info):
        """
            Ensure network resource on FC

        :param network_info: network_info from nova, dictionary type
        :return:
        """
        # NOTE: physical network only visible to admin user
        context = nova_ctxt.get_admin_context()

        network = self._get_network_from_neutron(context, network_info)
        LOG.debug(_('get network info from neutron: %s'), network)

        if network['provider:network_type'] == constant.TYPE_VXLAN:
            dvs_name = constant.CONF.fusioncompute.vxlan_dvs_name
        else:
            dvs_name = network['provider:physical_network']
        dvs_id = self._get_dvs_id_by_physnet_name(dvs_name)
        if not dvs_id:
            raise fc_exc.DVSwitchNotFound(
                dvs_id=network['provider:physical_network'])

        pg_adpt = PortGroupQueryAdapter(network, dvs_id)
        pg_data = self.query_port_group(pg_adpt)
        if not pg_data:
            try:
                pg_adpt = PortGroupCreateAdapter(network, dvs_id)
                pg_data = self.create_port_group(dvs_id, pg_adpt)
            except Exception as e:
                # race condition
                LOG.warn(_('create pg failed (%s), will check it again'), e)
                pg_adpt = PortGroupQueryAdapter(network, dvs_id)
                pg_data = self.query_port_group(pg_adpt)

        return pg_data['urn'] if pg_data else None
Пример #14
0
    def modify_drs_rules(self, cluster, rule_name, rule_type, vms):
        """

        :param cluster:
        :param rule_name:
        :param rule_type:
        :param vms:
        :return:
        """

        rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type)
        if rule is None:
            msg = _("Can not find drs rules: name=%s, type=%d") % \
                  (rule_name, rule_type)
            raise fc_exc.AffinityGroupException(reason=msg)

        body = {
            'drsSetting': {
                'drsRules': [{
                    'operationType':
                    constant.DRS_RULES_OP_TYPE_MAP['modify'],
                    'ruleIndex':
                    rule['ruleIndex'],
                    'ruleName':
                    rule_name,
                    'ruleType':
                    rule_type,
                    'vms':
                    vms
                }]
            }
        }
        self._modify_cluster(cluster, body)
        LOG.debug(_("modify drs rules %s succeed"), rule_name)
Пример #15
0
    def ensure_network(self, network_info):
        """
            Ensure network resource on FC

        :param network_info: network_info from nova, dictionary type
        :return:
        """
        # NOTE: physical network only visible to admin user
        context = nova_ctxt.get_admin_context()

        network = self._get_network_from_neutron(context, network_info)
        LOG.debug(_('get network info from neutron: %s'), network)

        if network['provider:network_type'] == constant.TYPE_VXLAN:
            dvs_name = constant.CONF.fusioncompute.vxlan_dvs_name
        else:
            dvs_name = network['provider:physical_network']
        dvs_id = self._get_dvs_id_by_physnet_name(dvs_name)
        if not dvs_id:
            raise fc_exc.DVSwitchNotFound(
                dvs_id=network['provider:physical_network'])

        pg_adpt = PortGroupQueryAdapter(network, dvs_id)
        pg_data = self.query_port_group(pg_adpt)
        if not pg_data:
            try:
                pg_adpt = PortGroupCreateAdapter(network, dvs_id)
                pg_data = self.create_port_group(dvs_id, pg_adpt)
            except Exception as e:
                # race condition
                LOG.warn(_('create pg failed (%s), will check it again'), e)
                pg_adpt = PortGroupQueryAdapter(network, dvs_id)
                pg_data = self.query_port_group(pg_adpt)

        return pg_data['urn'] if pg_data else None
Пример #16
0
    def _generate_vm_spec_info(self, location=None, flavor=None):
        """
        Generate the vm spec info for cole migration
        :param location:
        :param flavor:
        :return:
        """

        data = {}
        if location:
            data['location'] = location
        if flavor:
            if flavor.get('vcpus'):
                data['cpu'] = {'quantity': flavor.get('vcpus')}

            if flavor.get('memory_mb'):
                data['memory'] = {'quantityMB': flavor.get('memory_mb')}

            cpu_qos = utils.dict_filter_and_convert(flavor,
                                                    constant.CPU_QOS_FC_KEY,
                                                    constant.CPU_QOS_FC_KEY)
            if data.get('cpu', None):
                data['cpu'] = utils.dict_add(data['cpu'], cpu_qos)
            else:
                data['cpu'] = cpu_qos

        LOG.debug(_("vm spec data: %s.") % jsonutils.dumps(data))
        return data
Пример #17
0
 def _reboot_vm(self, fc_vm, reboot_type):
     """reboot vm inner func"""
     body = {'mode': constant.FC_REBOOT_TYPE[reboot_type]}
     self.post(fc_vm.get_vm_action_uri('reboot'),
               data=body,
               excp=exception.InstanceRebootFailure)
     LOG.debug(_("_reboot_vm %s success"), fc_vm.uri)
Пример #18
0
    def get_cpu_usage(self, monitor_period, cluster_urn):
        end_time = self.get_fc_current_time()
        start_time = end_time - (monitor_period*2)

        body = [
            {
                "startTime": str(start_time),
                "endTime": str(end_time),
                "interval": str(monitor_period),
                "metricId": "cpu_usage",
                "urn": cluster_urn
            }
        ]

        LOG.debug("get_cpu_usage body:%s", json.dumps(body))
        response = self.fc_client.post(self.site.metric_curvedata_uri,
                                       data=body)
        LOG.debug("get_cpu_usage body:%s response:%s",
                  json.dumps(body), json.dumps(response))
        if response:
            if len(response["items"]) > 0:
                metric_value = response["items"][0]["metricValue"]
                if len(metric_value) > 0:
                    value = metric_value[0]["value"]
                    if len(metric_value) is 2:
                        if metric_value[1]["value"] is not None:
                            value = metric_value[1]["value"]
                    return value
        return None
Пример #19
0
    def get_disks_info(self):
        """get image disk detail info"""

        LOG.debug(_('prepare volume'))

        disks_info = []

        # sys vol info
        sys_disk_info = {
            'sequenceNum': 1,
            'quantityGB': self._instance['root_gb'],
            'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin']
        }
        disks_info.append(sys_disk_info)

        # user vol info
        for disk in self._volume_ops.ensure_volume(self._block_device_info):
            user_disk_info = {
                'pciType': disk['pci'],
                'volumeUrn': disk['urn'],
                'sequenceNum':
                    constant.MOUNT_DEVICE_SEQNUM_MAP[disk['mount_device']],
                'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin']
            }
            disks_info.append(user_disk_info)

        return disks_info
Пример #20
0
        def _set_qos_specs_to_volume(self, connection_info):
            """

            :param connection_info
            :return:
            """
            qos_para = {'maxReadBytes': 0,
                        'maxWriteBytes': 0,
                        'maxReadRequest': 0,
                        'maxWriteRequest': 0}
            key_cvt_map = {'read_bytes_sec': 'maxReadBytes',
                           'write_bytes_sec': 'maxWriteBytes',
                           'read_iops_sec': 'maxReadRequest',
                           'write_iops_sec': 'maxWriteRequest'}
            tune_opts = ['read_bytes_sec', 'write_bytes_sec',
                         'read_iops_sec', 'write_iops_sec']
            tune_cvt_opts = ['read_bytes_sec', 'write_bytes_sec']
            # Extract rate_limit control parameters
            if connection_info is None or 'data' not in connection_info:
                return

            specs = connection_info['data']['qos_specs']
            vol_urn = connection_info.get('vol_urn')

            if vol_urn is None:
                return

            # because the volume can be detached and attach to another instance
            # qos maybe disassociated from volume type
            # between the up two operations
            # so if specs is none,set default value to FC.
            if specs is not None:
                if isinstance(specs, dict):
                    for key, value in specs.iteritems():
                        if key in tune_opts:
                            # convert byte to KB for FC,0 is no limited,
                            # the value is at least 1
                            output_value = value

                            if key in tune_cvt_opts:
                                addition = 0
                                if output_value.isdigit():
                                    if long(value) % 1024 != 0:
                                        addition = 1
                                    output_value = long(value) / 1024 \
                                                   + addition

                            qos_para[key_cvt_map[key]] = output_value
                else:
                    LOG.debug(_('Unknown content in connection_info '
                                'qos_specs: %s'), specs)
                    return

            qos_specs_uri = utils.generate_uri_from_urn(vol_urn) \
                            + constant.VOL_URI_MAP['modio']

            # Send Qos IO Specs to VRM with put method
            self.put(qos_specs_uri, data=qos_para,
                     excp=fc_exc.SetQosIoFailure)
Пример #21
0
 def _get_volume_meta_data(self, context, volume_id):
     """
     from cinder get volume metadata
     :param volume_id:
     :return:
     """
     LOG.debug(_('get_volume_meta_data enter, volume_id:%s.'), volume_id)
     return self._volume_api.get(context, volume_id)
Пример #22
0
    def delete_vm(self,
                  context,
                  instance,
                  block_device_info=None,
                  destroy_disks=True):
        """Delete VM on FC

        :param context:
        :param instance:
        :param block_device_info:
        :param destroy_disks:
        :return:
        """

        # if revert resize, only stop vm. when resize operation
        # task state will be resize_reverting or resize_confirming
        if instance and (instance.get('task_state') == 'resize_reverting'
                         or instance.get('task_state') == 'resize_confirming'):
            LOG.info(_('revert resize now, here only stop vm.'))
            try:
                self.stop_vm(instance)
            except Exception as e:
                LOG.warn(_('stop vm failed, trigger rollback'))
                raise exception.InstanceFaultRollback(inner_exception=e)
            return

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
        except exception.InstanceNotFound:
            LOG.warn(_('instance exist no more. ignore this deleting.'))
            return

        # detach volume created by cinder
        if block_device_info:
            LOG.info(_('now will stop vm before detach cinder volumes.'))
            self.stop_vm(instance)
            for vol in block_device_info['block_device_mapping']:
                self.detach_volume(vol['connection_info'], instance)

        # if vm is in fault-resuming or unknown status, stop it before delete
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(
                _("vm %s status is fault-resuming or unknown, "
                  "stop it before delete."), fc_vm.uri)
            self.stop_vm(instance)

        self._delete_vm_with_fc_vm(fc_vm, destroy_disks)

        # update affinity group info if needed
        try:
            self._update_drs_rules(instance)
            self._update_affinity_groups(context, instance)
        #ignore pylint:disable=W0703
        except Exception as excp:
            utils.log_exception(excp)
            LOG.error(_('update affinity group info failed !'))
Пример #23
0
 def check_input(self):
     """
     check function input params
     :return:
     """
     os_option = self.get_os_options()
     LOG.debug(_('os option: %s .'), jsonutils.dumps(os_option))
     if not (os_option['osType'] and os_option['osVersion']):
         LOG.error('Invalid os option for vm %s!', self._instance['name'])
         raise fc_exc.InvalidOsOption()
Пример #24
0
    def get_available_nodes(self, refresh=True):
        """Returns nodenames of all nodes managed by the compute service."""

        # default is refresh to ensure it is latest
        if refresh:
            self.cluster_ops.update_resources()

        node_list = self.cluster_ops.resources
        LOG.debug(_("The available nodes are: %s") % node_list)
        return node_list
Пример #25
0
    def get_available_nodes(self, refresh=True):
        """Returns nodenames of all nodes managed by the compute service."""

        # default is refresh to ensure it is latest
        if refresh:
            self.cluster_ops.update_resources()

        node_list = self.cluster_ops.resources
        LOG.debug(_("The available nodes are: %s") % node_list)
        return node_list
Пример #26
0
    def delete_vm(self, context, instance, block_device_info=None,
                  destroy_disks=True):
        """Delete VM on FC

        :param context:
        :param instance:
        :param block_device_info:
        :param destroy_disks:
        :return:
        """

        # if revert resize, only stop vm. when resize operation
        # task state will be resize_reverting or resize_confirming
        if instance and (instance.get('task_state') == 'resize_reverting'
                         or instance.get('task_state') == 'resize_confirming'):
            LOG.info(_('revert resize now, here only stop vm.'))
            try:
                self.stop_vm(instance)
            except Exception as e:
                LOG.warn(_('stop vm failed, trigger rollback'))
                raise exception.InstanceFaultRollback(inner_exception=e)
            return

        try:
            fc_vm = FC_MGR.get_vm_by_uuid(instance)
        except exception.InstanceNotFound:
            LOG.warn(_('instance exist no more. ignore this deleting.'))
            return

        # detach volume created by cinder
        if block_device_info:
            LOG.info(_('now will stop vm before detach cinder volumes.'))
            self.stop_vm(instance)
            for vol in block_device_info['block_device_mapping']:
                self.detach_volume(vol['connection_info'], instance)

        # if vm is in fault-resuming or unknown status, stop it before delete
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(_("vm %s status is fault-resuming or unknown, "
                        "stop it before delete."), fc_vm.uri)
            self.stop_vm(instance)

        self._delete_vm_with_fc_vm(fc_vm, destroy_disks)

        # update affinity group info if needed
        try:
            self._update_drs_rules(instance)
            self._update_affinity_groups(context, instance)
        #ignore pylint:disable=W0703
        except Exception as excp:
            utils.log_exception(excp)
            LOG.error(_('update affinity group info failed !'))
Пример #27
0
 def get_block_device_meta_data(self, context, block_device_info):
     """
     get volume meta data info from input info
     :param context:
     :param block_device_info:
     :return:
     """
     LOG.debug('volume info is: %s', block_device_info)
     if len(block_device_info['block_device_mapping']) > 0:
         volume_info = block_device_info['block_device_mapping'][0]
         volume_id = volume_info['connection_info']['serial']
         return self._get_volume_meta_data(context, volume_id)
     return None
Пример #28
0
    def _attach_user_vols(self):
        """

        :return:
        """
        fc_vm = FC_MGR.get_vm_by_uuid(self._instance)
        for disk in self._volume_ops.ensure_volume(self._block_device_info):
            body = {
                'volUrn': disk['urn'],
                'sequenceNum':
                    constant.MOUNT_DEVICE_SEQNUM_MAP[disk['mount_device']]
            }
            LOG.debug(_("begin attach user vol: %s"), disk['urn'])
            self._volume_ops.attach_volume(fc_vm, vol_config=body)
Пример #29
0
    def inject_files(self):
        """

        :return:
        """

        fc_vm = FC_MGR.get_vm_by_uuid(self._instance)
        for (path, contents) in self._injected_files:
            body = {
                'fileName': path,
                'vmData': contents
            }
            self.post(fc_vm.get_vm_action_uri('set_vm_data'), data=body)
            LOG.debug(_('inject file %s succeed.') % path)
Пример #30
0
 def list_all_fc_instance(self):
     """
     List all vm info
     :return:
     """
     fc_all_vms = FC_MGR.get_all_vms(isTemplate='false',
                                     group=constant.VM_GROUP_FLAG)
     cluster_urn_list = self._cluster_ops.get_local_cluster_urn_list()
     result = []
     for fc_vm in fc_all_vms:
         if fc_vm['clusterUrn'] in cluster_urn_list:
             result.append(fc_vm)
     LOG.debug(_("after filtered by clusters, instance number is %d"),
               len(result))
     return result
Пример #31
0
 def list_all_fc_instance(self):
     """
     List all vm info
     :return:
     """
     fc_all_vms = FC_MGR.get_all_vms(isTemplate='false',
         group=constant.VM_GROUP_FLAG)
     cluster_urn_list = self._cluster_ops.get_local_cluster_urn_list()
     result = []
     for fc_vm in fc_all_vms:
         if fc_vm['clusterUrn'] in cluster_urn_list:
             result.append(fc_vm)
     LOG.debug(_("after filtered by clusters, instance number is %d"),
         len(result))
     return result
Пример #32
0
    def ensure_volume(self, volume_info):
        """
        Ensure volume resource on FC
        :param volume_info:
        :return:
        """
        LOG.debug('volume info is: %s', volume_info)

        return [
            {
                'pci': 'IDE',
                'urn': bdm['connection_info']['vol_urn'],
                'mount_device': bdm['mount_device']
            }
            for bdm in volume_info['block_device_mapping']
        ]
Пример #33
0
    def init_all_cluster(self):
        """
        get all cluster info
        :return:
        """
        LOG.debug('self.site.cluster_uri:%s .' % self.site.cluster_uri)

        cfg_cluster_list = utils.split_strip(constant.CONF.fusioncompute.clusters)
        cluster_list = self.get(self.site.cluster_uri)['clusters']
        LOG.debug('clusters:%s, split:%s .' % (constant.CONF.fusioncompute.clusters,
                                               ','.join(cfg_cluster_list)))

        self.clusters = {}
        for cluster in cluster_list:
            if cluster['name'] in cfg_cluster_list:
                self.clusters[cluster['name']] = cluster
Пример #34
0
    def _get_dvs_id_by_physnet_name(self, physnet_name):
        """
        get dvswitch id from cache according to physical network name

        :param physnet_name:
        :return:
        """
        LOG.debug(_("physnet_name is %s"), physnet_name)

        dvs_id = self.dvs_mapping.get(physnet_name)
        if not dvs_id:
            self._init_all_fc_dvs()
        else:
            if not self._is_dvs_in_hypervisor(dvs_id):
                self._init_all_fc_dvs()

        return self.dvs_mapping.get(physnet_name)
Пример #35
0
    def _get_dvs_id_by_physnet_name(self, physnet_name):
        """
        get dvswitch id from cache according to physical network name

        :param physnet_name:
        :return:
        """
        LOG.debug(_("physnet_name is %s"), physnet_name)

        dvs_id = self.dvs_mapping.get(physnet_name)
        if not dvs_id:
            self._init_all_fc_dvs()
        else:
            if not self._is_dvs_in_hypervisor(dvs_id):
                self._init_all_fc_dvs()

        return self.dvs_mapping.get(physnet_name)
Пример #36
0
    def migrate_disk_and_power_off(self, instance, flavor):
        """
        modify the vm spec info
        :param instance:
            nova.db.sqlalchemy.models.Instance object
            instance object that is migrated.
        :param flavor:
        :return:
        """

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(_("vm %s status is fault-resuming or unknown, "
                "can not do migrate or resize."), fc_vm.uri)
            raise exception.InstanceFaultRollback

        LOG.info(_("begin power off vm ..."))

        # 1.stop vm
        self.stop_vm(instance)

        # 2.save flavor and vol info in vm
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        old_flavor = self._gen_old_flavor_for_fc(fc_vm)
        new_flavor = self._gen_new_flavor_for_fc(flavor)
        flavor = {
            'old_flavor': old_flavor,
            'new_flavor': new_flavor
        }
        data = {
            'group': '%s:%s' % (constant.VM_GROUP_FLAG,
                                jsonutils.dumps(flavor))
        }
        self.modify_vm(fc_vm, vm_config=data)
        LOG.info(_("save flavor info success."))

        # 3. check cpu mem changes
        flavor = None
        if self._check_if_need_modify_vm_spec(old_flavor, new_flavor):
            flavor = new_flavor

        data = self._generate_vm_spec_info(flavor=flavor)
        self.modify_vm(fc_vm, vm_config=data)
        LOG.info(_("modify cpu and mem success."))
Пример #37
0
    def get_disks_info(self):
        """
        FC itself will clone disks belonging to this tpl/vm(it should and
        must has only one sys volume).
        """
        LOG.debug(_('prepare volume'))
        disks_info = []
        disk_sequence = 1

        # sys vol info
        sys_disk_info = {
            'sequenceNum': disk_sequence,
            'quantityGB': self._instance['root_gb'],
            'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin']
        }
        disks_info.append(sys_disk_info)

        return disks_info
Пример #38
0
    def init_all_cluster(self):
        """
        get all cluster info
        :return:
        """
        LOG.debug('self.site.cluster_uri:%s .' % self.site.cluster_uri)

        cfg_cluster_list = utils.split_strip(
            constant.CONF.fusioncompute.clusters)
        cluster_list = self.get(self.site.cluster_uri)['clusters']
        LOG.debug(
            'clusters:%s, split:%s .' %
            (constant.CONF.fusioncompute.clusters, ','.join(cfg_cluster_list)))

        self.clusters = {}
        for cluster in cluster_list:
            if cluster['name'] in cfg_cluster_list:
                self.clusters[cluster['name']] = cluster
Пример #39
0
    def migrate_disk_and_power_off(self, instance, flavor):
        """
        modify the vm spec info
        :param instance:
            nova.db.sqlalchemy.models.Instance object
            instance object that is migrated.
        :param flavor:
        :return:
        """

        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        if fc_vm.status == constant.VM_STATUS.UNKNOWN \
        or fc_vm.status == constant.VM_STATUS.FAULTRESUMING:
            LOG.debug(
                _("vm %s status is fault-resuming or unknown, "
                  "can not do migrate or resize."), fc_vm.uri)
            raise exception.InstanceFaultRollback

        LOG.info(_("begin power off vm ..."))

        # 1.stop vm
        self.stop_vm(instance)

        # 2.save flavor and vol info in vm
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        old_flavor = self._gen_old_flavor_for_fc(fc_vm)
        new_flavor = self._gen_new_flavor_for_fc(flavor)
        flavor = {'old_flavor': old_flavor, 'new_flavor': new_flavor}
        data = {
            'group':
            '%s:%s' % (constant.VM_GROUP_FLAG, jsonutils.dumps(flavor))
        }
        self.modify_vm(fc_vm, vm_config=data)
        LOG.info(_("save flavor info success."))

        # 3. check cpu mem changes
        flavor = None
        if self._check_if_need_modify_vm_spec(old_flavor, new_flavor):
            flavor = new_flavor

        data = self._generate_vm_spec_info(flavor=flavor)
        self.modify_vm(fc_vm, vm_config=data)
        LOG.info(_("modify cpu and mem success."))
Пример #40
0
    def _init_all_fc_dvs(self):
        """
        Send message to fc and get dvswitch info

        :return:
        """
        LOG.debug("loading dvs mapping ")
        result = {}
        data = self.get(self.site.dvswitchs_uri)
        if not data.get(constant.DVSWITCHS):
            raise fc_exc.DVSwitchNotFound()

        dvs = data.get(constant.DVSWITCHS)
        if dvs and len(dvs) > 0:
            for dvswitch in dvs:
                dvs_id = utils.get_id_from_urn(dvswitch.get('urn'))
                result[dvswitch["name"]] = dvs_id

        LOG.debug("init all fc dvs %s.", jsonutils.dumps(result))
        self.dvs_mapping = result
Пример #41
0
    def _init_all_fc_dvs(self):
        """
        Send message to fc and get dvswitch info

        :return:
        """
        LOG.debug("loading dvs mapping ")
        result = {}
        data = self.get(self.site.dvswitchs_uri)
        if not data.get(constant.DVSWITCHS):
            raise fc_exc.DVSwitchNotFound()

        dvs = data.get(constant.DVSWITCHS)
        if dvs and len(dvs) > 0:
            for dvswitch in dvs:
                dvs_id = utils.get_id_from_urn(dvswitch.get('urn'))
                result[dvswitch["name"]] = dvs_id

        LOG.debug("init all fc dvs %s.", jsonutils.dumps(result))
        self.dvs_mapping = result
Пример #42
0
    def get_disks_info(self):
        """override get vm disk specific info"""

        LOG.debug(_('prepare volume'))

        disks_info = []
        for disk in self._volume_ops.ensure_volume(self._block_device_info):
            disk_info = {
                'pciType': disk['pci'],
                'volumeUrn': disk['urn'],
                'isThin': constant.FC_DRIVER_JOINT_CFG['volume_is_thin']
            }

            if disk['mount_device'] == self._root_device_name:
                disk_info['sequenceNum'] = 1
            else:
                disk_info['sequenceNum'] = \
                    constant.MOUNT_DEVICE_SEQNUM_MAP[disk['mount_device']]

            disks_info.append(disk_info)
        return disks_info
Пример #43
0
    def get_vnc_console(self, instance, get_opt):
        """
        Get the vnc console information

        :param instance: the instance info
        :return: HuaweiConsoleVNC or ConsoleVNC
        """
        LOG.debug(_("start to get %s vnc console"), instance['name'])
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        host_ip = fc_vm.vncAcessInfo.get('hostIp', None)
        host_port = fc_vm.vncAcessInfo.get('vncPort', None)

        # raise exception if no information is provided
        if not host_port or not host_ip:
            raise exception.ConsoleNotFoundForInstance\
                (instance_uuid=instance['uuid'])

        if get_opt is False:
            LOG.debug(_("return vnc info is host: %s, port:%s,"
                        " internal_access_path: %s"),
                      host_ip, host_port, 'None')
            return ctype.ConsoleVNC(host=host_ip, port=host_port)

        password = fc_vm.vncAcessInfo.get('vncPassword', None)
        LOG.debug(_("return get vnc info is host: %s, port:%s,"
                    " internal_access_path: %s"),
                  host_ip, host_port, 'None')

        return hwtype.HuaweiConsoleVNC(host_ip, host_port, password, None)
Пример #44
0
    def get_vnc_console(self, instance, get_opt):
        """
        Get the vnc console information

        :param instance: the instance info
        :return: HuaweiConsoleVNC or ConsoleVNC
        """
        LOG.debug(_("start to get %s vnc console"), instance['name'])
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        host_ip = fc_vm.vncAcessInfo.get('hostIp', None)
        host_port = fc_vm.vncAcessInfo.get('vncPort', None)

        # raise exception if no information is provided
        if not host_port or not host_ip:
            raise exception.ConsoleNotFoundForInstance\
                (instance_uuid=instance['uuid'])

        if get_opt is False:
            LOG.debug(
                _("return vnc info is host: %s, port:%s,"
                  " internal_access_path: %s"), host_ip, host_port, 'None')
            return ctype.ConsoleVNC(host=host_ip, port=host_port)

        password = fc_vm.vncAcessInfo.get('vncPassword', None)
        LOG.debug(
            _("return get vnc info is host: %s, port:%s,"
              " internal_access_path: %s"), host_ip, host_port, 'None')

        return hwtype.HuaweiConsoleVNC(host_ip, host_port, password, None)
Пример #45
0
    def get_available_resource(self, nodename):
        """Retrieve resource info.

        This method is called when nova-compute launches, and
        as part of a periodic task.

        :returns: dictionary describing resources
        """
        LOG.debug(_("get_available_resource, nodename: %s ." % nodename))
        cluster_name = self.get_cluster_name_by_nodename(nodename)
        cluster_resource = self.get_cluster_resource(cluster_name)
        if not cluster_resource:
            LOG.error(_("Invalid cluster name : %s"), nodename)
            return {}

        cluster_resource['cpu_info'] = \
        jsonutils.dumps(cluster_resource['cpu_info'])
        cluster_resource['supported_instances'] = jsonutils.dumps(
            cluster_resource['supported_instances'])

        LOG.debug("the resource status is %s", cluster_resource)
        return cluster_resource
Пример #46
0
    def _get_vm_customization_nics(self):
        """get vm customization nics"""
        cus_nics = []
        for vif in self._vifs:
            if vif['enable_dhcp']:
                cus_nic = {
                    'sequenceNum': vif['sequence_num'] + 1
                }
                cus_nics.append(cus_nic)
                continue

            network = vif['network_info']['network']
            subnet_ipv4_list = [s for s in network['subnets']
                                if s['version'] == constant.IPV4_VERSION]
            if len(subnet_ipv4_list) > 0:
                ip_ipv4 = None

                dns = [None, None]
                if len(subnet_ipv4_list[0]['ips']) > 0:
                    ip_ipv4 = subnet_ipv4_list[0]['ips'][0]

                dns_len = len(subnet_ipv4_list[0]['dns'])
                for index in range(0, min(2, dns_len)):
                    dns[index] = subnet_ipv4_list[0]['dns'][index]['address']

                netmask_ipv4 = str(subnet_ipv4_list[0].as_netaddr().netmask)
                gateway_ipv4 = subnet_ipv4_list[0]['gateway']['address']

                cus_nic = {'sequenceNum': vif['sequence_num'] + 1,
                           'ip':  ip_ipv4 and ip_ipv4['address'] or '',
                           'gateway': gateway_ipv4,
                           'netmask': netmask_ipv4,
                           'ipVersion': constant.IPV4_VERSION,
                           'setdns': dns[0],
                           'adddns': dns[1]}
                cus_nics.append(cus_nic)

        LOG.debug(_('cus_nic: %s.'), jsonutils.dumps(cus_nics))
        return cus_nics
Пример #47
0
    def get_available_resource(self, nodename):
        """Retrieve resource info.

        This method is called when nova-compute launches, and
        as part of a periodic task.

        :returns: dictionary describing resources
        """
        LOG.debug(_("get_available_resource, nodename: %s ." % nodename))
        cluster_name = self.get_cluster_name_by_nodename(nodename)
        cluster_resource = self.get_cluster_resource(cluster_name)
        if not cluster_resource:
            LOG.error(_("Invalid cluster name : %s"), nodename)
            return {}

        cluster_resource['cpu_info'] = \
        jsonutils.dumps(cluster_resource['cpu_info'])
        cluster_resource['supported_instances'] = jsonutils.dumps(
            cluster_resource['supported_instances'])

        LOG.debug("the resource status is %s", cluster_resource)
        return cluster_resource
Пример #48
0
    def delete_drs_rules(self, cluster, rule_name, rule_type):
        """

        :param cluster:
        :param rule_name:
        :param rule_type:
        :return:
        """

        rule = self._get_drs_rules_from_cluster(cluster, rule_name, rule_type)
        if rule is None:
            LOG.debug(_("drs rules %s not exists"), rule_name)
            return

        body = {
            'drsSetting':{
                'drsRules':[{
                    'operationType': constant.DRS_RULES_OP_TYPE_MAP['delete'],
                    'ruleIndex': rule['ruleIndex']
                    }]
            }
        }
        self._modify_cluster(cluster, body)
        LOG.debug(_("delete drs rules %s succeed"), rule_name)
Пример #49
0
    def detach_interface(self, instance, vif):
        """
        Send message to fusion compute virtual machine

        :param instance:
        :param vif:
        :return: response : {"taskUrn": string, "taskUri": string}
        if the nic does not exited, return {} else {"taskUrn": string,
        "taskUri": string}
        """
        LOG.debug(
            _("trying to detach interface for vm name: %s,"
              "vm uuid: %s, vif information is %s"), instance['name'],
            instance['uuid'], vif)

        response = {}
        fc_vm = FC_MGR.get_vm_by_uuid(instance)
        nics = fc_vm["vmConfig"]["nics"]
        LOG.info("nics in FusionCompute is %s", nics)
        nic_uri = None
        for nic in nics:
            if nic['name'] == vif['id']:
                nic_uri = nic['uri']
                break

        if nic_uri:
            detach_interface_uri = (nic_uri.replace("nics", "virtualNics"))
            LOG.info("detach_interface_uri is %s", detach_interface_uri)
            response = self.delete(detach_interface_uri,
                                   excp=exception.InstanceInvalidState)
        else:
            LOG.warn(_("detach interface for vm name: %s, not exist nic."),
                     instance['name'])
        LOG.info(_('send detach interface finished, return is: %s'),
                 jsonutils.dumps(response))
        return response