Exemplo n.º 1
0
    def host_maintenance_mode(self, host, mode):
        """Starts/Stops host maintenance. On start, it triggers
        guest VMs evacuation.
        """
        ctxt = context.get_admin_context()

        if not mode:
            self._set_service_state(host=host,
                                    binary='nova-compute',
                                    is_disabled=False)
            LOG.info(_LI('Host is no longer under maintenance.'))
            return 'off_maintenance'

        self._set_service_state(host=host,
                                binary='nova-compute',
                                is_disabled=True)
        vms_uuids = self._vmops.list_instance_uuids()
        for vm_uuid in vms_uuids:
            self._wait_for_instance_pending_task(ctxt, vm_uuid)

        vm_names = self._vmutils.list_instances()
        for vm_name in vm_names:
            self._migrate_vm(ctxt, vm_name, host)

        vms_uuid_after_migration = self._vmops.list_instance_uuids()
        remaining_vms = len(vms_uuid_after_migration)
        if remaining_vms == 0:
            LOG.info(
                _LI('All vms have been migrated successfully.'
                    'Host is down for maintenance'))
            return 'on_maintenance'
        raise exception.MigrationError(reason=_(
            'Not all vms have been migrated: %s remaining instances.') %
                                       remaining_vms)
Exemplo n.º 2
0
    def host_maintenance_mode(self, host, mode):
        """Starts/Stops host maintenance. On start, it triggers
        guest VMs evacuation.
        """
        ctxt = context.get_admin_context()

        if not mode:
            self._set_service_state(host=host, binary='nova-compute',
                                    is_disabled=False)
            LOG.info(_LI('Host is no longer under maintenance.'))
            return 'off_maintenance'

        self._set_service_state(host=host, binary='nova-compute',
                                is_disabled=True)
        vms_uuids = self._vmops.list_instance_uuids()
        for vm_uuid in vms_uuids:
            self._wait_for_instance_pending_task(ctxt, vm_uuid)

        vm_names = self._vmutils.list_instances()
        for vm_name in vm_names:
            self._migrate_vm(ctxt, vm_name, host)

        vms_uuid_after_migration = self._vmops.list_instance_uuids()
        remaining_vms = len(vms_uuid_after_migration)
        if remaining_vms == 0:
            LOG.info(_LI('All vms have been migrated successfully.'
                         'Host is down for maintenance'))
            return 'on_maintenance'
        raise exception.MigrationError(
            reason=_('Not all vms have been migrated: %s remaining instances.')
            % remaining_vms)
Exemplo n.º 3
0
    def _create_config_drive(self,
                             instance,
                             injected_files,
                             admin_password,
                             network_info,
                             rescue=False):
        if CONF.config_drive_format != 'iso9660':
            raise vmutils.UnsupportedConfigDriveFormatException(
                _('Invalid config_drive_format "%s"') %
                CONF.config_drive_format)

        LOG.info(_LI('Using config drive for instance'), instance=instance)

        extra_md = {}
        if admin_password and CONF.hyperv.config_drive_inject_password:
            extra_md['admin_pass'] = admin_password

        inst_md = instance_metadata.InstanceMetadata(instance,
                                                     content=injected_files,
                                                     extra_md=extra_md,
                                                     network_info=network_info)

        configdrive_path_iso = self._pathutils.get_configdrive_path(
            instance.name, constants.DVD_FORMAT, rescue=rescue)
        LOG.info(_LI('Creating config drive at %(path)s'),
                 {'path': configdrive_path_iso},
                 instance=instance)

        with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
            try:
                cdb.make_drive(configdrive_path_iso)
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE('Creating config drive failed with '
                                  'error: %s'),
                              e,
                              instance=instance)

        if not CONF.hyperv.config_drive_cdrom:
            configdrive_path = self._pathutils.get_configdrive_path(
                instance.name, constants.DISK_FORMAT_VHD, rescue=rescue)
            utils.execute(CONF.hyperv.qemu_img_cmd,
                          'convert',
                          '-f',
                          'raw',
                          '-O',
                          'vpc',
                          configdrive_path_iso,
                          configdrive_path,
                          attempts=1)
            self._pathutils.remove(configdrive_path_iso)
        else:
            configdrive_path = configdrive_path_iso

        return configdrive_path
Exemplo n.º 4
0
    def _failover_migrate(self, instance_name, old_host, new_host):
        """This method will check if the generated event is a legitimate
        failover to this node. If it is, it will proceed to prepare the
        failovered VM if necessary and update the owner of the compute vm in
        nova and ports in neutron.
        """
        LOG.info(
            _LI('Checking instance failover %(instance)s to %(new_host)s '
                'from host %(old_host)s.'), {
                    'instance': instance_name,
                    'new_host': new_host,
                    'old_host': old_host
                })

        instance = self._get_instance_by_name(instance_name)
        nw_info = self._network_api.get_instance_nw_info(
            self._context, instance)

        if not instance:
            # Some instances on the hypervisor may not be tracked by nova
            LOG.debug('Instance %s does not exist in nova. Skipping.',
                      instance_name)
            return

        if instance.task_state == task_states.MIGRATING:
            # In case of live migration triggered by the user, we get the
            # event that the instance changed host but we do not want
            # to treat it as a failover.
            LOG.debug('Instance %s is live migrating.', instance_name)
            return
        if instance.host.upper() == self._this_node.upper():
            return

        if old_host and old_host.upper() == self._this_node.upper():
            LOG.debug('Actions at source node.')
            self._vmops.unplug_vifs(instance, nw_info)
            return
        elif new_host.upper() != self._this_node.upper():
            LOG.debug('Instance %s did not failover to this node.',
                      instance_name)
            return

        LOG.info(_LI('Instance %(instance)s  failover to %(host)s.'), {
            'instance': instance_name,
            'host': new_host
        })

        self._nova_failover_server(instance, new_host)
        self._failover_migrate_networks(instance, old_host)
        self._vmops.post_start_vifs(instance, nw_info)
        self._serial_console_ops.start_console_handler(instance_name)
Exemplo n.º 5
0
    def _create_config_drive(self, instance, injected_files, admin_password,
                             network_info, rescue=False):
        if CONF.config_drive_format != 'iso9660':
            raise vmutils.UnsupportedConfigDriveFormatException(
                _('Invalid config_drive_format "%s"') %
                CONF.config_drive_format)

        LOG.info(_LI('Using config drive for instance'), instance=instance)

        extra_md = {}
        if admin_password and CONF.hyperv.config_drive_inject_password:
            extra_md['admin_pass'] = admin_password

        inst_md = instance_metadata.InstanceMetadata(instance,
                                                     content=injected_files,
                                                     extra_md=extra_md,
                                                     network_info=network_info)

        configdrive_path_iso = self._pathutils.get_configdrive_path(
            instance.name, constants.DVD_FORMAT, rescue=rescue)
        LOG.info(_LI('Creating config drive at %(path)s'),
                 {'path': configdrive_path_iso}, instance=instance)

        with configdrive.ConfigDriveBuilder(instance_md=inst_md) as cdb:
            try:
                cdb.make_drive(configdrive_path_iso)
            except processutils.ProcessExecutionError as e:
                with excutils.save_and_reraise_exception():
                    LOG.error(_LE('Creating config drive failed with '
                                  'error: %s'),
                              e, instance=instance)

        if not CONF.hyperv.config_drive_cdrom:
            configdrive_path = self._pathutils.get_configdrive_path(
                instance.name, constants.DISK_FORMAT_VHD, rescue=rescue)
            utils.execute(CONF.hyperv.qemu_img_cmd,
                          'convert',
                          '-f',
                          'raw',
                          '-O',
                          'vpc',
                          configdrive_path_iso,
                          configdrive_path,
                          attempts=1)
            self._pathutils.remove(configdrive_path_iso)
        else:
            configdrive_path = configdrive_path_iso

        return configdrive_path
Exemplo n.º 6
0
    def _failover_migrate(self, instance_name, old_host, new_host):
        """This method will check if the generated event is a legitimate
        failover to this node. If it is, it will proceed to prepare the
        failovered VM if necessary and update the owner of the compute vm in
        nova and ports in neutron.
        """
        LOG.info(_LI('Checking instance failover %(instance)s to %(new_host)s '
                     'from host %(old_host)s.'),
                 {'instance': instance_name,
                  'new_host': new_host,
                  'old_host': old_host})

        instance = self._get_instance_by_name(instance_name)
        nw_info = self._network_api.get_instance_nw_info(self._context,
                                                         instance)

        if not instance:
            # Some instances on the hypervisor may not be tracked by nova
            LOG.debug('Instance %s does not exist in nova. Skipping.',
                      instance_name)
            return

        if instance.task_state == task_states.MIGRATING:
            # In case of live migration triggered by the user, we get the
            # event that the instance changed host but we do not want
            # to treat it as a failover.
            LOG.debug('Instance %s is live migrating.', instance_name)
            return
        if instance.host.upper() == self._this_node.upper():
            return

        if old_host and old_host.upper() == self._this_node.upper():
            LOG.debug('Actions at source node.')
            self._vmops.unplug_vifs(instance, nw_info)
            return
        elif new_host.upper() != self._this_node.upper():
            LOG.debug('Instance %s did not failover to this node.',
                      instance_name)
            return

        LOG.info(_LI('Instance %(instance)s  failover to %(host)s.'),
                 {'instance': instance_name,
                  'host': new_host})

        self._nova_failover_server(instance, new_host)
        self._failover_migrate_networks(instance, old_host)
        self._vmops.post_start_vifs(instance, nw_info)
        self._serial_console_ops.start_console_handler(instance_name)
Exemplo n.º 7
0
    def destroy(self,
                instance,
                network_info=None,
                block_device_info=None,
                destroy_disks=True):
        instance_name = instance.name
        LOG.info(_LI("Got request to destroy instance"), instance=instance)
        try:
            if self._vmutils.vm_exists(instance_name):
                self.power_off(instance)

                self._vmutils.destroy_vm(instance_name)
                self._volumeops.disconnect_volumes(block_device_info)
            else:
                LOG.debug("Instance not found", instance=instance)

            if destroy_disks:
                self._delete_disk_files(instance_name)
            if network_info:
                for vif in network_info:
                    vif_driver = self._get_vif_driver(vif.get('type'))
                    vif_driver.unplug(instance, vif)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Failed to destroy instance: %s'),
                              instance_name)
Exemplo n.º 8
0
    def get_instance_dir(self, instance_name, remote_server=None,
                         create_dir=True, remove_dir=False):
        instance_dir = self._get_instances_sub_dir(
            instance_name, remote_server,
            create_dir=False, remove_dir=False)

        # In some situations, the instance files may reside at a different
        # location than the configured one.
        if not os.path.exists(instance_dir):
            vmutils = (self._vmutils if not remote_server
                       else utilsfactory.get_vmutils(remote_server))
            try:
                instance_dir = vmutils.get_vm_config_root_dir(
                    instance_name)
                if remote_server:
                    instance_dir = self._get_remote_unc_path(remote_server,
                                                             instance_dir)
                LOG.info(_LI("Found instance dir at non-default location: %s"),
                         instance_dir)
            except os_win_exc.HyperVVMNotFoundException:
                pass

        self._check_dir(instance_dir,
                        create_dir=create_dir,
                        remove_dir=remove_dir)
        return instance_dir
Exemplo n.º 9
0
    def destroy(self, instance, network_info=None, block_device_info=None,
                destroy_disks=True):
        instance_name = instance.name
        LOG.info(_LI("Got request to destroy instance"), instance=instance)
        try:
            if self._vmutils.vm_exists(instance_name):

                # Stop the VM first.
                self._vmutils.stop_vm_jobs(instance_name)
                self.power_off(instance)

                self._vmutils.destroy_vm(instance_name)
                self._volumeops.disconnect_volumes(block_device_info)
            else:
                LOG.debug("Instance not found", instance=instance)

            if destroy_disks:
                self._delete_disk_files(instance_name)
            if network_info:
                for vif in network_info:
                    vif_driver = self._get_vif_driver(vif.get('type'))
                    vif_driver.unplug(instance, vif)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Failed to destroy instance: %s'),
                              instance_name)
Exemplo n.º 10
0
    def validate_initiators(self):
        # The MS iSCSI initiator service can manage the software iSCSI
        # initiator as well as hardware initiators.
        initiator_list = CONF.hyperv.iscsi_initiator_list
        valid_initiators = True

        if not initiator_list:
            LOG.info(
                _LI("No iSCSI initiator was explicitly requested. "
                    "The Microsoft iSCSI initiator will choose the "
                    "initiator when estabilishing sessions."))
        else:
            available_initiators = self._iscsi_utils.get_iscsi_initiators()
            for initiator in initiator_list:
                if initiator not in available_initiators:
                    valid_initiators = False
                    msg = _LW("The requested initiator %(req_initiator)s "
                              "is not in the list of available initiators: "
                              "%(avail_initiators)s.")
                    LOG.warning(
                        msg,
                        dict(req_initiator=initiator,
                             avail_initiators=available_initiators))

        return valid_initiators
Exemplo n.º 11
0
 def _migrate_vm(self, ctxt, vm_name, host):
     try:
         instance_uuid = self._vmutils.get_instance_uuid(vm_name)
         if not instance_uuid:
             LOG.info(
                 _LI('VM "%s" running on this host was not created by '
                     'nova. Skip migrating this vm to a new host.'),
                 vm_name)
             return
         instance = objects.Instance.get_by_uuid(ctxt, instance_uuid)
         if instance.vm_state == vm_states.ACTIVE:
             self._api.live_migrate(ctxt,
                                    instance,
                                    block_migration=False,
                                    disk_over_commit=False,
                                    host_name=None)
         else:
             self._api.resize(ctxt,
                              instance,
                              flavor_id=None,
                              clean_shutdown=True)
         self._wait_for_instance_pending_task(ctxt, instance_uuid)
     except Exception as e:
         LOG.error(_LE('Migrating vm failed with error: %s '), e)
         raise exception.MigrationError(reason='Unable to migrate %s.' %
                                        vm_name)
Exemplo n.º 12
0
    def destroy(self,
                instance,
                network_info=None,
                block_device_info=None,
                destroy_disks=True):
        instance_name = instance.name
        LOG.info(_LI("Got request to destroy instance"), instance=instance)
        try:
            if self._vmutils.vm_exists(instance_name):

                # Stop the VM first.
                self._vmutils.stop_vm_jobs(instance_name)
                self.power_off(instance)

                self._vmutils.destroy_vm(instance_name)
                self._volumeops.disconnect_volumes(block_device_info)
            else:
                LOG.debug("Instance not found", instance=instance)

            if destroy_disks:
                self._delete_disk_files(instance_name)
            self.unplug_vifs(instance, network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                LOG.exception(_LE('Failed to destroy instance: %s'),
                              instance_name)
Exemplo n.º 13
0
    def get_instance_dir(self,
                         instance_name,
                         remote_server=None,
                         create_dir=True,
                         remove_dir=False):
        instance_dir = self._get_instances_sub_dir(instance_name,
                                                   remote_server,
                                                   create_dir=False,
                                                   remove_dir=False)

        # In some situations, the instance files may reside at a different
        # location than the configured one.
        if not os.path.exists(instance_dir):
            vmutils = (self._vmutils if not remote_server else
                       utilsfactory.get_vmutils(remote_server))
            try:
                instance_dir = vmutils.get_vm_config_root_dir(instance_name)
                if remote_server:
                    instance_dir = self._get_remote_unc_path(
                        remote_server, instance_dir)
                LOG.info(_LI("Found instance dir at non-default location: %s"),
                         instance_dir)
            except os_win_exc.HyperVVMNotFoundException:
                pass

        self._check_dir(instance_dir,
                        create_dir=create_dir,
                        remove_dir=remove_dir)
        return instance_dir
Exemplo n.º 14
0
    def _soft_shutdown(self, instance,
                       timeout=CONF.hyperv.wait_soft_reboot_seconds,
                       retry_interval=SHUTDOWN_TIME_INCREMENT):
        """Perform a soft shutdown on the VM.

           :return: True if the instance was shutdown within time limit,
                    False otherwise.
        """
        LOG.debug("Performing Soft shutdown on instance", instance=instance)

        while timeout > 0:
            # Perform a soft shutdown on the instance.
            # Wait maximum timeout for the instance to be shutdown.
            # If it was not shutdown, retry until it succeeds or a maximum of
            # time waited is equal to timeout.
            wait_time = min(retry_interval, timeout)
            try:
                LOG.debug("Soft shutdown instance, timeout remaining: %d",
                          timeout, instance=instance)
                self._vmutils.soft_shutdown_vm(instance.name)
                if self._wait_for_power_off(instance.name, wait_time):
                    LOG.info(_LI("Soft shutdown succeeded."),
                             instance=instance)
                    return True
            except os_win_exc.HyperVException as e:
                # Exception is raised when trying to shutdown the instance
                # while it is still booting.
                LOG.debug("Soft shutdown failed: %s", e, instance=instance)
                time.sleep(wait_time)

            timeout -= retry_interval

        LOG.warning(_LW("Timed out while waiting for soft shutdown."),
                    instance=instance)
        return False
Exemplo n.º 15
0
    def _soft_shutdown(self, instance,
                       timeout=CONF.hyperv.wait_soft_reboot_seconds,
                       retry_interval=SHUTDOWN_TIME_INCREMENT):
        """Perform a soft shutdown on the VM.

           :return: True if the instance was shutdown within time limit,
                    False otherwise.
        """
        LOG.debug("Performing Soft shutdown on instance", instance=instance)

        while timeout > 0:
            # Perform a soft shutdown on the instance.
            # Wait maximum timeout for the instance to be shutdown.
            # If it was not shutdown, retry until it succeeds or a maximum of
            # time waited is equal to timeout.
            wait_time = min(retry_interval, timeout)
            try:
                LOG.debug("Soft shutdown instance, timeout remaining: %d",
                          timeout, instance=instance)
                self._vmutils.soft_shutdown_vm(instance.name)
                if self._wait_for_power_off(instance.name, wait_time):
                    LOG.info(_LI("Soft shutdown succeeded."),
                             instance=instance)
                    return True
            except vmutils.HyperVException as e:
                # Exception is raised when trying to shutdown the instance
                # while it is still booting.
                LOG.debug("Soft shutdown failed: %s", e, instance=instance)
                time.sleep(wait_time)

            timeout -= retry_interval

        LOG.warning(_LW("Timed out while waiting for soft shutdown."),
                    instance=instance)
        return False
Exemplo n.º 16
0
    def _remove_if_old_image(self, image):
        backing_files = self._get_image_backing_files(image)
        max_age_seconds = CONF.remove_unused_original_minimum_age_seconds

        for img in backing_files:
            age_seconds = self._pathutils.get_age_of_file(img)
            if age_seconds > max_age_seconds:
                LOG.info(_LI("Removing old, unused image: %s"), img)
                self._remove_old_image(img)
Exemplo n.º 17
0
    def ensure_share_mounted(self, connection_info):
        export_path = self._get_export_path(connection_info)

        if self._smbutils.is_local_share(export_path):
            LOG.info(
                _LI("Skipping mounting share %s, "
                    "using local path instead."), export_path)
        elif not self._smbutils.check_smb_mapping(export_path):
            opts_str = connection_info['data'].get('options') or ''
            username, password = self._parse_credentials(opts_str)
            self._smbutils.mount_smb_share(export_path,
                                           username=username,
                                           password=password)
Exemplo n.º 18
0
    def ensure_share_mounted(self, connection_info):
        export_path = self._get_export_path(connection_info)

        if self._smbutils.is_local_share(export_path):
            LOG.info(_LI("Skipping mounting share %s, "
                         "using local path instead."),
                     export_path)
        elif not self._smbutils.check_smb_mapping(export_path):
            opts_str = connection_info['data'].get('options') or ''
            username, password = self._parse_credentials(opts_str)
            self._smbutils.mount_smb_share(export_path,
                                           username=username,
                                           password=password)
Exemplo n.º 19
0
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info,
              block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_LI("Spawning new instance"), instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)
        if 'properties' in image_meta and 'mtwilson_trustpolicy_location' in image_meta[
                'properties']:
            instance['metadata']['mtwilson_trustpolicy_location'] = image_meta[
                'properties']['mtwilson_trustpolicy_location']
            instance.save()

        if self._volumeops.ebs_root_in_block_devices(block_device_info):
            root_vhd_path = None
        else:
            root_vhd_path = self._create_root_vhd(context, instance)

        eph_vhd_path = self.create_ephemeral_vhd(instance)
        # TODO(lpetrut): move this to the create_instance method.
        vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta)

        try:
            self.create_instance(instance, network_info, block_device_info,
                                 root_vhd_path, eph_vhd_path, vm_gen,
                                 image_meta)
            LOG.info(instance)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(
                    instance, injected_files, admin_password, network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)

            self.power_on(instance, network_info=network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
Exemplo n.º 20
0
    def connect_volume(self, connection_info):
        connection_properties = connection_info['data']
        auth_method = connection_properties.get('auth_method')

        if auth_method and auth_method.upper() != 'CHAP':
            LOG.error(
                _LE("Unsupported iSCSI authentication "
                    "method: %(auth_method)s."), dict(auth_method=auth_method))
            raise exception.UnsupportedBDMVolumeAuthMethod(
                auth_method=auth_method)

        volume_connected = False
        for (initiator_name, target_portal, target_iqn,
             target_lun) in self._get_all_paths(connection_properties):
            try:
                msg = _LI("Attempting to estabilish an iSCSI session to "
                          "target %(target_iqn)s on portal %(target_portal)s "
                          "acessing LUN %(target_lun)s using initiator "
                          "%(initiator_name)s.")
                LOG.info(
                    msg,
                    dict(target_portal=target_portal,
                         target_iqn=target_iqn,
                         target_lun=target_lun,
                         initiator_name=initiator_name))
                self._iscsi_utils.login_storage_target(
                    target_lun=target_lun,
                    target_iqn=target_iqn,
                    target_portal=target_portal,
                    auth_username=connection_properties.get('auth_username'),
                    auth_password=connection_properties.get('auth_password'),
                    mpio_enabled=CONF.hyperv.use_multipath_io,
                    initiator_name=initiator_name)

                volume_connected = True
                if not CONF.hyperv.use_multipath_io:
                    break
            except os_win_exc.OSWinException:
                LOG.exception(_LE("Could not connect iSCSI target %s."),
                              target_iqn)

        if not volume_connected:
            raise exception.VolumeAttachFailed(
                _("Could not connect volume %s.") %
                connection_properties['volume_id'])
Exemplo n.º 21
0
    def connect_volume(self, connection_info):
        connection_properties = connection_info['data']
        auth_method = connection_properties.get('auth_method')

        if auth_method and auth_method.upper() != 'CHAP':
            LOG.error(_LE("Unsupported iSCSI authentication "
                          "method: %(auth_method)s."),
                      dict(auth_method=auth_method))
            raise exception.UnsupportedBDMVolumeAuthMethod(
                auth_method=auth_method)

        volume_connected = False
        for (initiator_name,
             target_portal,
             target_iqn,
             target_lun) in self._get_all_paths(connection_properties):
            try:
                msg = _LI("Attempting to estabilish an iSCSI session to "
                          "target %(target_iqn)s on portal %(target_portal)s "
                          "acessing LUN %(target_lun)s using initiator "
                          "%(initiator_name)s.")
                LOG.info(msg, dict(target_portal=target_portal,
                                   target_iqn=target_iqn,
                                   target_lun=target_lun,
                                   initiator_name=initiator_name))
                self._iscsi_utils.login_storage_target(
                    target_lun=target_lun,
                    target_iqn=target_iqn,
                    target_portal=target_portal,
                    auth_username=connection_properties.get('auth_username'),
                    auth_password=connection_properties.get('auth_password'),
                    mpio_enabled=CONF.hyperv.use_multipath_io,
                    initiator_name=initiator_name)

                volume_connected = True
                if not CONF.hyperv.use_multipath_io:
                    break
            except os_win_exc.OSWinException:
                LOG.exception(_LE("Could not connect iSCSI target %s."),
                              target_iqn)

        if not volume_connected:
            raise exception.VolumeAttachFailed(
                _("Could not connect volume %s.") %
                connection_properties['volume_id'])
Exemplo n.º 22
0
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_LI("Spawning new instance"), instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)
        if 'properties' in image_meta and 'mtwilson_trustpolicy_location' in image_meta['properties']:
            instance['metadata']['mtwilson_trustpolicy_location'] = image_meta['properties']['mtwilson_trustpolicy_location']
            instance.save()

        if self._volumeops.ebs_root_in_block_devices(block_device_info):
            root_vhd_path = None
        else:
            root_vhd_path = self._create_root_vhd(context, instance)

        eph_vhd_path = self.create_ephemeral_vhd(instance)
        # TODO(lpetrut): move this to the create_instance method.
        vm_gen = self.get_image_vm_generation(root_vhd_path, image_meta)

        try:
            self.create_instance(instance, network_info, block_device_info,
                                 root_vhd_path, eph_vhd_path,
                                 vm_gen, image_meta)
            LOG.info(instance)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(instance,
                                                             injected_files,
                                                             admin_password,
                                                             network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)

            self.power_on(instance, network_info=network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
Exemplo n.º 23
0
    def spawn(self, context, instance, image_meta, injected_files,
              admin_password, network_info, block_device_info=None):
        """Create a new VM and start it."""
        LOG.debug(_LI("Spawning new instance"), instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)
        if 'properties' in image_meta and 'mtwilson_trustpolicy_location' in image_meta['properties']:
            instance['metadata']['mtwilson_trustpolicy_location'] = image_meta['properties']['mtwilson_trustpolicy_location']
            instance.save()
        LOG.debug("Inside spawn ")
        vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)

        self._block_device_manager.validate_and_update_bdi(
            instance, image_meta, vm_gen, block_device_info)
        root_device = block_device_info['root_disk']
        self._create_root_device(context, instance, root_device, vm_gen)
        self._create_ephemerals(instance, block_device_info['ephemerals'])

        try:
            with self.wait_vif_plug_events(instance, network_info):
                self.create_instance(instance, network_info,
                                     root_device, block_device_info,
                                     vm_gen, image_meta)
                LOG.info(instance)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(instance,
                                                             injected_files,
                                                             admin_password,
                                                             network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)
            self.set_boot_order(vm_gen, block_device_info, instance.name)
            self.power_on(instance, network_info=network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
Exemplo n.º 24
0
 def _migrate_vm(self, ctxt, vm_name, host):
     try:
         instance_uuid = self._vmutils.get_instance_uuid(vm_name)
         if not instance_uuid:
             LOG.info(_LI('VM "%s" running on this host was not created by '
                          'nova. Skip migrating this vm to a new host.'),
                      vm_name)
             return
         instance = objects.Instance.get_by_uuid(ctxt, instance_uuid)
         if instance.vm_state == vm_states.ACTIVE:
             self._api.live_migrate(ctxt, instance, block_migration=False,
                                    disk_over_commit=False, host_name=None)
         else:
             self._api.resize(ctxt, instance, flavor_id=None,
                 clean_shutdown=True)
         self._wait_for_instance_pending_task(ctxt, instance_uuid)
     except Exception as e:
         LOG.error(_LE('Migrating vm failed with error: %s '), e)
         raise exception.MigrationError(reason='Unable to migrate %s.'
                                        % vm_name)
Exemplo n.º 25
0
    def get_iscsi_initiator(self):
        """Get iscsi initiator name for this machine."""

        computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
        hostname = computer_system.name
        keypath = ("SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\"
                   "iSCSI\\Discovery")
        try:
            key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0,
                                  _winreg.KEY_ALL_ACCESS)
            temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName')
            initiator_name = str(temp[0])
            _winreg.CloseKey(key)
        except Exception:
            LOG.info(_LI("The ISCSI initiator name can't be found. "
                         "Choosing the default one"))
            initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
            if computer_system.PartofDomain:
                initiator_name += '.' + computer_system.Domain.lower()
        return initiator_name
Exemplo n.º 26
0
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              network_info,
              block_device_info=None):
        """Create a new VM and start it."""
        LOG.info(_LI("Spawning new instance"), instance=instance)

        instance_name = instance.name
        if self._vmutils.vm_exists(instance_name):
            raise exception.InstanceExists(name=instance_name)

        # Make sure we're starting with a clean slate.
        self._delete_disk_files(instance_name)

        vm_gen = self.get_image_vm_generation(instance.uuid, image_meta)

        self._block_device_manager.validate_and_update_bdi(
            instance, image_meta, vm_gen, block_device_info)
        root_device = block_device_info['root_disk']
        self._create_root_device(context, instance, root_device, vm_gen)
        self._create_ephemerals(instance, block_device_info['ephemerals'])

        try:
            with self.wait_vif_plug_events(instance, network_info):
                self.create_instance(instance, network_info, root_device,
                                     block_device_info, vm_gen, image_meta)

            if configdrive.required_by(instance):
                configdrive_path = self._create_config_drive(
                    instance, injected_files, admin_password, network_info)

                self.attach_config_drive(instance, configdrive_path, vm_gen)
            self.set_boot_order(vm_gen, block_device_info, instance.name)
            self.power_on(instance, network_info=network_info)
        except Exception:
            with excutils.save_and_reraise_exception():
                self.destroy(instance)
Exemplo n.º 27
0
    def get_iscsi_initiator(self):
        """Get iscsi initiator name for this machine."""

        computer_system = self._conn_cimv2.Win32_ComputerSystem()[0]
        hostname = computer_system.name
        keypath = ("SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\"
                   "iSCSI\\Discovery")
        try:
            key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, keypath, 0,
                                  _winreg.KEY_ALL_ACCESS)
            temp = _winreg.QueryValueEx(key, 'DefaultInitiatorName')
            initiator_name = str(temp[0])
            _winreg.CloseKey(key)
        except Exception:
            LOG.info(
                _LI("The ISCSI initiator name can't be found. "
                    "Choosing the default one"))
            initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower()
            if computer_system.PartofDomain:
                initiator_name += '.' + computer_system.Domain.lower()
        return initiator_name
Exemplo n.º 28
0
    def validate_initiators(self):
        # The MS iSCSI initiator service can manage the software iSCSI
        # initiator as well as hardware initiators.
        initiator_list = CONF.hyperv.iscsi_initiator_list
        valid_initiators = True

        if not initiator_list:
            LOG.info(_LI("No iSCSI initiator was explicitly requested. "
                         "The Microsoft iSCSI initiator will choose the "
                         "initiator when estabilishing sessions."))
        else:
            available_initiators = self._iscsi_utils.get_iscsi_initiators()
            for initiator in initiator_list:
                if initiator not in available_initiators:
                    valid_initiators = False
                    msg = _LW("The requested initiator %(req_initiator)s "
                              "is not in the list of available initiators: "
                              "%(avail_initiators)s.")
                    LOG.warning(msg,
                                dict(req_initiator=initiator,
                                     avail_initiators=available_initiators))

        return valid_initiators