예제 #1
0
    def Exec(self, feedback_fn):
        """Start the instance.

    """
        if not self.op.no_remember:
            self.instance = self.cfg.MarkInstanceUp(self.instance.uuid)

        if self.primary_offline:
            assert self.op.ignore_offline_nodes
            self.LogInfo("Primary node offline, marked instance as started")
        else:
            if self.requires_cleanup:
                result = self.rpc.call_instance_shutdown(
                    self.instance.primary_node, self.instance,
                    self.op.shutdown_timeout, self.op.reason)
                result.Raise("Could not shutdown instance '%s'" %
                             self.instance.name)

                ShutdownInstanceDisks(self, self.instance)

            StartInstanceDisks(self, self.instance, self.op.force)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)

            result = \
              self.rpc.call_instance_start(self.instance.primary_node,
                                           (self.instance, self.op.hvparams,
                                            self.op.beparams),
                                           self.op.startup_paused, self.op.reason)
            if result.fail_msg:
                ShutdownInstanceDisks(self, self.instance)
                result.Raise("Could not start instance '%s'" %
                             self.instance.name)
예제 #2
0
    def ZeroFreeSpace(self, feedback_fn):
        """Zeroes the free space on a shutdown instance.

    @type feedback_fn: function
    @param feedback_fn: Function used to log progress

    """
        assert self.op.zeroing_timeout_fixed is not None
        assert self.op.zeroing_timeout_per_mib is not None

        zeroing_image = self.cfg.GetZeroingImage()
        src_node_uuid = self.instance.primary_node
        disk_size = self._DetermineImageSize(zeroing_image, src_node_uuid)

        # Calculate the sum prior to adding the temporary disk
        instance_disks_size_sum = self._InstanceDiskSizeSum()

        with TemporaryDisk(self, self.instance, disk_size, feedback_fn):
            feedback_fn("Activating instance disks")
            StartInstanceDisks(self, self.instance, False)

            feedback_fn("Imaging disk with zeroing image")
            ImageDisks(self, self.instance, zeroing_image)

            feedback_fn("Starting instance with zeroing image")
            result = self.rpc.call_instance_start(src_node_uuid,
                                                  (self.instance, [], []),
                                                  False, self.op.reason)
            result.Raise(
                "Could not start instance %s when using the zeroing image "
                "%s" % (self.instance.name, zeroing_image))

            # First wait for the instance to start up
            running_check = lambda: IsInstanceRunning(
                self, self.instance, check_user_shutdown=True)
            instance_up = retry.SimpleRetry(True, running_check, 5.0,
                                            self.op.shutdown_timeout)
            if not instance_up:
                raise errors.OpExecError(
                    "Could not boot instance when using the "
                    "zeroing image %s" % zeroing_image)

            feedback_fn("Instance is up, now awaiting shutdown")

            # Then for it to be finished, detected by its shutdown
            timeout = self.op.zeroing_timeout_fixed + \
                      self.op.zeroing_timeout_per_mib * instance_disks_size_sum
            instance_up = retry.SimpleRetry(False, running_check, 20.0,
                                            timeout)
            if instance_up:
                self.LogWarning(
                    "Zeroing not completed prior to timeout; instance will"
                    "be shut down forcibly")

        feedback_fn("Zeroing completed!")
예제 #3
0
    def Exec(self, feedback_fn):
        """Reboot the instance.

    """
        cluster = self.cfg.GetClusterInfo()
        remote_info = self.rpc.call_instance_info(
            self.instance.primary_node, self.instance.name,
            self.instance.hypervisor,
            cluster.hvparams[self.instance.hypervisor])
        remote_info.Raise("Error checking node %s" %
                          self.cfg.GetNodeName(self.instance.primary_node))
        instance_running = bool(remote_info.payload)

        current_node_uuid = self.instance.primary_node

        if instance_running and \
            self.op.reboot_type in [constants.INSTANCE_REBOOT_SOFT,
                                    constants.INSTANCE_REBOOT_HARD]:
            result = self.rpc.call_instance_reboot(current_node_uuid,
                                                   self.instance,
                                                   self.op.reboot_type,
                                                   self.op.shutdown_timeout,
                                                   self.op.reason)
            result.Raise("Could not reboot instance")
        else:
            if instance_running:
                result = self.rpc.call_instance_shutdown(
                    current_node_uuid, self.instance, self.op.shutdown_timeout,
                    self.op.reason)
                result.Raise("Could not shutdown instance for full reboot")
                ShutdownInstanceDisks(self, self.instance)
                self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
            else:
                self.LogInfo("Instance %s was already stopped, starting now",
                             self.instance.name)
            StartInstanceDisks(self, self.instance, self.op.ignore_secondaries)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
            result = self.rpc.call_instance_start(current_node_uuid,
                                                  (self.instance, None, None),
                                                  False, self.op.reason)
            msg = result.fail_msg
            if msg:
                ShutdownInstanceDisks(self, self.instance)
                self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
                raise errors.OpExecError("Could not start instance for"
                                         " full reboot: %s" % msg)

        self.cfg.MarkInstanceUp(self.instance.uuid)
예제 #4
0
    def Exec(self, feedback_fn):
        """Reinstall the instance.

    """
        os_image = objects.GetOSImage(self.op.osparams)

        if os_image is not None:
            feedback_fn("Using OS image '%s'" % os_image)
        else:
            os_image = objects.GetOSImage(self.instance.osparams)

        os_type = self.op.os_type

        if os_type is not None:
            feedback_fn("Changing OS scripts to '%s'..." % os_type)
            self.instance.os = os_type
            self.cfg.Update(self.instance, feedback_fn)
        else:
            os_type = self.instance.os

        if not os_image and not os_type:
            self.LogInfo("No OS scripts or OS image specified or found in the"
                         " instance's configuration, nothing to install")
        else:
            if self.op.osparams is not None:
                self.instance.osparams = self.op.osparams
            if self.op.osparams_private is not None:
                self.instance.osparams_private = self.op.osparams_private
            self.cfg.Update(self.instance, feedback_fn)
            StartInstanceDisks(self, self.instance, None)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
            try:
                if os_image:
                    ImageDisks(self, self.instance, os_image)

                if os_type:
                    self._ReinstallOSScripts(self.instance, self.osparams,
                                             self.op.debug_level)

                UpdateMetadata(feedback_fn,
                               self.rpc,
                               self.instance,
                               osparams_public=self.osparams,
                               osparams_private=self.osparams_private,
                               osparams_secret=self.osparams_secret)
            finally:
                ShutdownInstanceDisks(self, self.instance)
예제 #5
0
    def Exec(self, feedback_fn):
        """Start the instance.

    """
        if not self.op.no_remember:
            self.instance = self.cfg.MarkInstanceUp(self.instance.uuid)

        if self.primary_offline:
            assert self.op.ignore_offline_nodes
            self.LogInfo("Primary node offline, marked instance as started")
        else:
            StartInstanceDisks(self, self.instance, self.op.force)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)

            result = \
              self.rpc.call_instance_start(self.instance.primary_node,
                                           (self.instance, self.op.hvparams,
                                            self.op.beparams),
                                           self.op.startup_paused, self.op.reason)
            msg = result.fail_msg
            if msg:
                ShutdownInstanceDisks(self, self.instance)
                raise errors.OpExecError("Could not start instance: %s" % msg)
예제 #6
0
    def Exec(self, feedback_fn):
        """Export an instance to an image in the cluster.

    """
        assert self.op.mode in constants.EXPORT_MODES

        src_node_uuid = self.instance.primary_node

        if self.op.shutdown:
            # shutdown the instance, but not the disks
            feedback_fn("Shutting down instance %s" % self.instance.name)
            result = self.rpc.call_instance_shutdown(src_node_uuid,
                                                     self.instance,
                                                     self.op.shutdown_timeout,
                                                     self.op.reason)
            # TODO: Maybe ignore failures if ignore_remove_failures is set
            result.Raise(
                "Could not shutdown instance %s on"
                " node %s" %
                (self.instance.name, self.cfg.GetNodeName(src_node_uuid)))

        if self.op.zero_free_space:
            self.ZeroFreeSpace(feedback_fn)

        activate_disks = not self.instance.disks_active

        if activate_disks:
            # Activate the instance disks if we're exporting a stopped instance
            feedback_fn("Activating disks for %s" % self.instance.name)
            StartInstanceDisks(self, self.instance, None)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)

        try:
            helper = masterd.instance.ExportInstanceHelper(
                self, feedback_fn, self.instance)

            helper.CreateSnapshots()
            try:
                if (self.op.shutdown
                        and self.instance.admin_state == constants.ADMINST_UP
                        and not self.op.remove_instance):
                    assert self.instance.disks_active
                    feedback_fn("Starting instance %s" % self.instance.name)
                    result = self.rpc.call_instance_start(
                        src_node_uuid, (self.instance, None, None), False,
                        self.op.reason)
                    msg = result.fail_msg
                    if msg:
                        feedback_fn("Failed to start instance: %s" % msg)
                        ShutdownInstanceDisks(self, self.instance)
                        raise errors.OpExecError(
                            "Could not start instance: %s" % msg)

                if self.op.mode == constants.EXPORT_MODE_LOCAL:
                    (fin_resu,
                     dresults) = helper.LocalExport(self.dst_node,
                                                    self.op.compress)
                elif self.op.mode == constants.EXPORT_MODE_REMOTE:
                    connect_timeout = constants.RIE_CONNECT_TIMEOUT
                    timeouts = masterd.instance.ImportExportTimeouts(
                        connect_timeout)

                    (key_name, _, _) = self.x509_key_name

                    dest_ca_pem = \
                      OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
                                                      self.dest_x509_ca)

                    (fin_resu, dresults) = helper.RemoteExport(
                        self.dest_disk_info, key_name, dest_ca_pem,
                        self.op.compress, timeouts)
            finally:
                helper.Cleanup()

            # Check for backwards compatibility
            assert len(dresults) == len(self.instance.disks)
            assert compat.all(isinstance(i, bool) for i in dresults), \
                   "Not all results are boolean: %r" % dresults

        finally:
            if activate_disks:
                feedback_fn("Deactivating disks for %s" % self.instance.name)
                ShutdownInstanceDisks(self, self.instance)

        if not (compat.all(dresults) and fin_resu):
            failures = []
            if not fin_resu:
                failures.append("export finalization")
            if not compat.all(dresults):
                fdsk = utils.CommaJoin(idx
                                       for (idx, dsk) in enumerate(dresults)
                                       if not dsk)
                failures.append("disk export: disk(s) %s" % fdsk)

            raise errors.OpExecError("Export failed, errors in %s" %
                                     utils.CommaJoin(failures))

        # At this point, the export was successful, we can cleanup/finish

        # Remove instance if requested
        if self.op.remove_instance:
            feedback_fn("Removing instance %s" % self.instance.name)
            RemoveInstance(self, feedback_fn, self.instance,
                           self.op.ignore_remove_failures)

        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            self._CleanupExports(feedback_fn)

        return fin_resu, dresults
예제 #7
0
        try:
            disk_size = DetermineImageSize(self, zeroing_image, src_node_uuid)
        except errors.OpExecError, err:
            raise errors.OpExecError(
                "Could not create temporary disk for zeroing:"
                " %s", err)

        # Calculate the sum prior to adding the temporary disk
        instance_disks_size_sum = self._InstanceDiskSizeSum()

        with TemporaryDisk(
                self, self.instance,
            [(constants.DT_PLAIN, constants.DISK_RDWR, disk_size)],
                feedback_fn):
            feedback_fn("Activating instance disks")
            StartInstanceDisks(self, self.instance, False)

            feedback_fn("Imaging disk with zeroing image")
            ImageDisks(self, self.instance, zeroing_image)

            feedback_fn("Starting instance with zeroing image")
            result = self.rpc.call_instance_start(src_node_uuid,
                                                  (self.instance, [], []),
                                                  False, self.op.reason)
            result.Raise(
                "Could not start instance %s when using the zeroing image "
                "%s" % (self.instance.name, zeroing_image))

            # First wait for the instance to start up
            running_check = lambda: IsInstanceRunning(
                self, self.instance, prereq=False)
예제 #8
0
파일: instance.py 프로젝트: dimara/ganeti
    def Exec(self, feedback_fn):
        """Rename the instance.

    """
        old_name = self.instance.name

        rename_file_storage = False
        disks = self.cfg.GetInstanceDisks(self.instance.uuid)
        renamed_storage = [
            d for d in disks if (d.dev_type in constants.DTS_FILEBASED
                                 and d.dev_type != constants.DT_GLUSTER)
        ]
        if (renamed_storage and self.op.new_name != self.instance.name):
            disks = self.cfg.GetInstanceDisks(self.instance.uuid)
            old_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
            rename_file_storage = True

        self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)

        # Assert that we have both the locks needed
        assert old_name in self.owned_locks(locking.LEVEL_INSTANCE)
        assert self.op.new_name in self.owned_locks(locking.LEVEL_INSTANCE)

        # re-read the instance from the configuration after rename
        renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
        disks = self.cfg.GetInstanceDisks(renamed_inst.uuid)

        if self.instance.forthcoming:
            return renamed_inst.name

        if rename_file_storage:
            new_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
            result = self.rpc.call_file_storage_dir_rename(
                renamed_inst.primary_node, old_file_storage_dir,
                new_file_storage_dir)
            result.Raise("Could not rename on node %s directory '%s' to '%s'"
                         " (but the instance has been renamed in Ganeti)" %
                         (self.cfg.GetNodeName(renamed_inst.primary_node),
                          old_file_storage_dir, new_file_storage_dir))

        StartInstanceDisks(self, renamed_inst, None)
        renamed_inst = self.cfg.GetInstanceInfo(renamed_inst.uuid)

        # update info on disks
        info = GetInstanceInfoText(renamed_inst)
        for (idx, disk) in enumerate(disks):
            for node_uuid in self.cfg.GetInstanceNodes(renamed_inst.uuid):
                result = self.rpc.call_blockdev_setinfo(
                    node_uuid, (disk, renamed_inst), info)
                result.Warn(
                    "Error setting info on node %s for disk %s" %
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
        try:
            result = self.rpc.call_instance_run_rename(
                renamed_inst.primary_node, renamed_inst, old_name,
                self.op.debug_level)
            result.Warn(
                "Could not run OS rename script for instance %s on node %s"
                " (but the instance has been renamed in Ganeti)" %
                (renamed_inst.name,
                 self.cfg.GetNodeName(renamed_inst.primary_node)),
                self.LogWarning)
        finally:
            ShutdownInstanceDisks(self, renamed_inst)

        return renamed_inst.name
예제 #9
0
    if feedback_fn is not None:
        log_feedback = lambda msg: feedback_fn(add_prefix(msg))
    else:
        log_feedback = lambda _: None

    try:
        disk_size = DetermineImageSize(lu, vm_image, instance.primary_node)
    except errors.OpExecError, err:
        raise errors.OpExecError("Could not create temporary disk: %s", err)

    with TemporaryDisk(lu, instance,
                       [(constants.DT_PLAIN, constants.DISK_RDWR, disk_size)],
                       log_feedback):
        log_feedback("Activating helper VM's temporary disks")
        StartInstanceDisks(lu, instance, False)

        log_feedback("Imaging temporary disks with image %s" % (vm_image, ))
        ImageDisks(lu, instance, vm_image)

        log_feedback("Starting helper VM")
        result = lu.rpc.call_instance_start(instance.primary_node,
                                            (instance, [], []), False,
                                            lu.op.reason)
        result.Raise(
            add_prefix("Could not start helper VM with image %s" %
                       (vm_image, )))

        # First wait for the instance to start up
        running_check = lambda: IsInstanceRunning(lu, instance, prereq=False)
        instance_up = retry.SimpleRetry(True, running_check, 5.0,
예제 #10
0
def HelperVM(lu,
             instance,
             vm_image,
             startup_timeout,
             vm_timeout,
             log_prefix=None,
             feedback_fn=None):
    """Runs a given helper VM for a given instance.

  @type lu: L{LogicalUnit}
  @param lu: the lu on whose behalf we execute
  @type instance: L{objects.Instance}
  @param instance: the instance definition
  @type vm_image: string
  @param vm_image: the name of the helper VM image to dump on a temporary disk
  @type startup_timeout: int
  @param startup_timeout: how long to wait for the helper VM to start up
  @type vm_timeout: int
  @param vm_timeout: how long to wait for the helper VM to finish its work
  @type log_prefix: string
  @param log_prefix: a prefix for all log messages
  @type feedback_fn: function
  @param feedback_fn: Function used to log progress

  """
    if log_prefix:
        add_prefix = lambda msg: "%s: %s" % (log_prefix, msg)
    else:
        add_prefix = lambda msg: msg

    if feedback_fn is not None:
        log_feedback = lambda msg: feedback_fn(add_prefix(msg))
    else:
        log_feedback = lambda _: None

    try:
        disk_size = DetermineImageSize(lu, vm_image, instance.primary_node)
    except errors.OpExecError as err:
        raise errors.OpExecError("Could not create temporary disk: %s", err)

    with TemporaryDisk(lu, instance,
                       [(constants.DT_PLAIN, constants.DISK_RDWR, disk_size)],
                       log_feedback):
        log_feedback("Activating helper VM's temporary disks")
        StartInstanceDisks(lu, instance, False)

        log_feedback("Imaging temporary disks with image %s" % (vm_image, ))
        ImageDisks(lu, instance, vm_image)

        log_feedback("Starting helper VM")
        result = lu.rpc.call_instance_start(instance.primary_node,
                                            (instance, [], []), False,
                                            lu.op.reason)
        result.Raise(
            add_prefix("Could not start helper VM with image %s" %
                       (vm_image, )))

        # First wait for the instance to start up
        running_check = lambda: IsInstanceRunning(lu, instance, prereq=False)
        instance_up = retry.SimpleRetry(True, running_check, 5.0,
                                        startup_timeout)
        if not instance_up:
            raise errors.OpExecError(
                add_prefix("Could not boot instance using"
                           " image %s" % (vm_image, )))

        log_feedback("Helper VM is up")

        def cleanup():
            log_feedback("Waiting for helper VM to finish")

            # Then for it to be finished, detected by its shutdown
            instance_up = retry.SimpleRetry(False, running_check, 20.0,
                                            vm_timeout)
            if instance_up:
                lu.LogWarning(
                    add_prefix("Helper VM has not finished within the"
                               " timeout; shutting it down forcibly"))
                return \
                  lu.rpc.call_instance_shutdown(instance.primary_node,
                                                instance,
                                                constants.DEFAULT_SHUTDOWN_TIMEOUT,
                                                lu.op.reason)
            else:
                return None

        # Run the inner block and handle possible errors
        try:
            yield
        except Exception:
            # if the cleanup failed for some reason, log it and just re-raise
            result = cleanup()
            if result:
                result.Warn(
                    add_prefix("Could not shut down helper VM with image"
                               " %s within timeout" % (vm_image, )))
                log_feedback("Error running helper VM with image %s" %
                             (vm_image, ))
            raise
        else:
            result = cleanup()
            # if the cleanup failed for some reason, throw an exception
            if result:
                result.Raise(
                    add_prefix("Could not shut down helper VM with image %s"
                               " within timeout" % (vm_image, )))
                raise errors.OpExecError(
                    "Error running helper VM with image %s" % (vm_image, ))

    log_feedback("Helper VM execution completed")