Esempio n. 1
0
    def Exec(self, feedback_fn):
        """Start the instance.

    """
        if not self.op.no_remember:
            self.instance = self.cfg.MarkInstanceUp(self.instance.uuid)

        if self.primary_offline:
            assert self.op.ignore_offline_nodes
            self.LogInfo("Primary node offline, marked instance as started")
        else:
            if self.requires_cleanup:
                result = self.rpc.call_instance_shutdown(
                    self.instance.primary_node, self.instance,
                    self.op.shutdown_timeout, self.op.reason)
                result.Raise("Could not shutdown instance '%s'" %
                             self.instance.name)

                ShutdownInstanceDisks(self, self.instance)

            StartInstanceDisks(self, self.instance, self.op.force)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)

            result = \
              self.rpc.call_instance_start(self.instance.primary_node,
                                           (self.instance, self.op.hvparams,
                                            self.op.beparams),
                                           self.op.startup_paused, self.op.reason)
            if result.fail_msg:
                ShutdownInstanceDisks(self, self.instance)
                result.Raise("Could not start instance '%s'" %
                             self.instance.name)
Esempio n. 2
0
    def Exec(self, feedback_fn):
        """Reboot the instance.

    """
        cluster = self.cfg.GetClusterInfo()
        remote_info = self.rpc.call_instance_info(
            self.instance.primary_node, self.instance.name,
            self.instance.hypervisor,
            cluster.hvparams[self.instance.hypervisor])
        remote_info.Raise("Error checking node %s" %
                          self.cfg.GetNodeName(self.instance.primary_node))
        instance_running = bool(remote_info.payload)

        current_node_uuid = self.instance.primary_node

        if instance_running and \
            self.op.reboot_type in [constants.INSTANCE_REBOOT_SOFT,
                                    constants.INSTANCE_REBOOT_HARD]:
            result = self.rpc.call_instance_reboot(current_node_uuid,
                                                   self.instance,
                                                   self.op.reboot_type,
                                                   self.op.shutdown_timeout,
                                                   self.op.reason)
            result.Raise("Could not reboot instance")
        else:
            if instance_running:
                result = self.rpc.call_instance_shutdown(
                    current_node_uuid, self.instance, self.op.shutdown_timeout,
                    self.op.reason)
                result.Raise("Could not shutdown instance for full reboot")
                ShutdownInstanceDisks(self, self.instance)
                self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
            else:
                self.LogInfo("Instance %s was already stopped, starting now",
                             self.instance.name)
            StartInstanceDisks(self, self.instance, self.op.ignore_secondaries)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
            result = self.rpc.call_instance_start(current_node_uuid,
                                                  (self.instance, None, None),
                                                  False, self.op.reason)
            msg = result.fail_msg
            if msg:
                ShutdownInstanceDisks(self, self.instance)
                self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
                raise errors.OpExecError("Could not start instance for"
                                         " full reboot: %s" % msg)

        self.cfg.MarkInstanceUp(self.instance.uuid)
Esempio n. 3
0
    def Exec(self, feedback_fn):
        """Shutdown the instance.

    """
        # If the instance is offline we shouldn't mark it as down, as that
        # resets the offline flag.
        if not self.op.no_remember and self.instance.admin_state in INSTANCE_ONLINE:
            self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)

            if self.op.admin_state_source == constants.ADMIN_SOURCE:
                self.cfg.MarkInstanceDown(self.instance.uuid)
            elif self.op.admin_state_source == constants.USER_SOURCE:
                self.cfg.MarkInstanceUserDown(self.instance.uuid)

        if self.primary_offline:
            assert self.op.ignore_offline_nodes
            self.LogInfo("Primary node offline, marked instance as stopped")
        else:
            result = self.rpc.call_instance_shutdown(
                self.instance.primary_node, self.instance, self.op.timeout,
                self.op.reason)
            result.Raise("Could not shutdown instance '%s'" %
                         self.instance.name)

            ShutdownInstanceDisks(self, self.instance)
Esempio n. 4
0
    def StartInstance(self, feedback_fn, src_node_uuid):
        """Send the node instructions to start the instance.

    @raise errors.OpExecError: If the instance didn't start up.

    """
        assert self.instance.disks_active
        feedback_fn("Starting instance %s" % self.instance.name)
        result = self.rpc.call_instance_start(src_node_uuid,
                                              (self.instance, None, None),
                                              False, self.op.reason)
        msg = result.fail_msg
        if msg:
            feedback_fn("Failed to start instance: %s" % msg)
            ShutdownInstanceDisks(self, self.instance)
            raise errors.OpExecError("Could not start instance: %s" % msg)
Esempio n. 5
0
    def Exec(self, feedback_fn):
        """Reinstall the instance.

    """
        os_image = objects.GetOSImage(self.op.osparams)

        if os_image is not None:
            feedback_fn("Using OS image '%s'" % os_image)
        else:
            os_image = objects.GetOSImage(self.instance.osparams)

        os_type = self.op.os_type

        if os_type is not None:
            feedback_fn("Changing OS scripts to '%s'..." % os_type)
            self.instance.os = os_type
            self.cfg.Update(self.instance, feedback_fn)
        else:
            os_type = self.instance.os

        if not os_image and not os_type:
            self.LogInfo("No OS scripts or OS image specified or found in the"
                         " instance's configuration, nothing to install")
        else:
            if self.op.osparams is not None:
                self.instance.osparams = self.op.osparams
            if self.op.osparams_private is not None:
                self.instance.osparams_private = self.op.osparams_private
            self.cfg.Update(self.instance, feedback_fn)
            StartInstanceDisks(self, self.instance, None)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
            try:
                if os_image:
                    ImageDisks(self, self.instance, os_image)

                if os_type:
                    self._ReinstallOSScripts(self.instance, self.osparams,
                                             self.op.debug_level)

                UpdateMetadata(feedback_fn,
                               self.rpc,
                               self.instance,
                               osparams_public=self.osparams,
                               osparams_private=self.osparams_private,
                               osparams_secret=self.osparams_secret)
            finally:
                ShutdownInstanceDisks(self, self.instance)
Esempio n. 6
0
    def Exec(self, feedback_fn):
        """Shutdown the instance.

    """
        # If the instance is offline we shouldn't mark it as down, as that
        # resets the offline flag.
        if not self.op.no_remember and self.instance.admin_state in INSTANCE_ONLINE:
            self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)

        if self.primary_offline:
            assert self.op.ignore_offline_nodes
            self.LogInfo("Primary node offline, marked instance as stopped")
        else:
            result = self.rpc.call_instance_shutdown(
                self.instance.primary_node, self.instance, self.op.timeout,
                self.op.reason)
            msg = result.fail_msg
            if msg:
                self.LogWarning("Could not shutdown instance: %s", msg)

            ShutdownInstanceDisks(self, self.instance)
Esempio n. 7
0
    def Exec(self, feedback_fn):
        """Start the instance.

    """
        if not self.op.no_remember:
            self.instance = self.cfg.MarkInstanceUp(self.instance.uuid)

        if self.primary_offline:
            assert self.op.ignore_offline_nodes
            self.LogInfo("Primary node offline, marked instance as started")
        else:
            StartInstanceDisks(self, self.instance, self.op.force)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)

            result = \
              self.rpc.call_instance_start(self.instance.primary_node,
                                           (self.instance, self.op.hvparams,
                                            self.op.beparams),
                                           self.op.startup_paused, self.op.reason)
            msg = result.fail_msg
            if msg:
                ShutdownInstanceDisks(self, self.instance)
                raise errors.OpExecError("Could not start instance: %s" % msg)
Esempio n. 8
0
    def Exec(self, feedback_fn):
        """Export an instance to an image in the cluster.

    """
        assert self.op.mode in constants.EXPORT_MODES

        src_node_uuid = self.instance.primary_node

        if self.op.shutdown:
            # shutdown the instance, but not the disks
            feedback_fn("Shutting down instance %s" % self.instance.name)
            result = self.rpc.call_instance_shutdown(src_node_uuid,
                                                     self.instance,
                                                     self.op.shutdown_timeout,
                                                     self.op.reason)
            # TODO: Maybe ignore failures if ignore_remove_failures is set
            result.Raise(
                "Could not shutdown instance %s on"
                " node %s" %
                (self.instance.name, self.cfg.GetNodeName(src_node_uuid)))

        if self.op.zero_free_space:
            self.ZeroFreeSpace(feedback_fn)

        activate_disks = not self.instance.disks_active

        if activate_disks:
            # Activate the instance disks if we're exporting a stopped instance
            feedback_fn("Activating disks for %s" % self.instance.name)
            StartInstanceDisks(self, self.instance, None)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)

        try:
            helper = masterd.instance.ExportInstanceHelper(
                self, feedback_fn, self.instance)

            helper.CreateSnapshots()
            try:
                if (self.op.shutdown
                        and self.instance.admin_state == constants.ADMINST_UP
                        and not self.op.remove_instance):
                    assert self.instance.disks_active
                    feedback_fn("Starting instance %s" % self.instance.name)
                    result = self.rpc.call_instance_start(
                        src_node_uuid, (self.instance, None, None), False,
                        self.op.reason)
                    msg = result.fail_msg
                    if msg:
                        feedback_fn("Failed to start instance: %s" % msg)
                        ShutdownInstanceDisks(self, self.instance)
                        raise errors.OpExecError(
                            "Could not start instance: %s" % msg)

                if self.op.mode == constants.EXPORT_MODE_LOCAL:
                    (fin_resu,
                     dresults) = helper.LocalExport(self.dst_node,
                                                    self.op.compress)
                elif self.op.mode == constants.EXPORT_MODE_REMOTE:
                    connect_timeout = constants.RIE_CONNECT_TIMEOUT
                    timeouts = masterd.instance.ImportExportTimeouts(
                        connect_timeout)

                    (key_name, _, _) = self.x509_key_name

                    dest_ca_pem = \
                      OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
                                                      self.dest_x509_ca)

                    (fin_resu, dresults) = helper.RemoteExport(
                        self.dest_disk_info, key_name, dest_ca_pem,
                        self.op.compress, timeouts)
            finally:
                helper.Cleanup()

            # Check for backwards compatibility
            assert len(dresults) == len(self.instance.disks)
            assert compat.all(isinstance(i, bool) for i in dresults), \
                   "Not all results are boolean: %r" % dresults

        finally:
            if activate_disks:
                feedback_fn("Deactivating disks for %s" % self.instance.name)
                ShutdownInstanceDisks(self, self.instance)

        if not (compat.all(dresults) and fin_resu):
            failures = []
            if not fin_resu:
                failures.append("export finalization")
            if not compat.all(dresults):
                fdsk = utils.CommaJoin(idx
                                       for (idx, dsk) in enumerate(dresults)
                                       if not dsk)
                failures.append("disk export: disk(s) %s" % fdsk)

            raise errors.OpExecError("Export failed, errors in %s" %
                                     utils.CommaJoin(failures))

        # At this point, the export was successful, we can cleanup/finish

        # Remove instance if requested
        if self.op.remove_instance:
            feedback_fn("Removing instance %s" % self.instance.name)
            RemoveInstance(self, feedback_fn, self.instance,
                           self.op.ignore_remove_failures)

        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            self._CleanupExports(feedback_fn)

        return fin_resu, dresults
Esempio n. 9
0
  def _ExecFailover(self):
    """Failover an instance.

    The failover is done by shutting it down on its present node and
    starting it on the secondary.

    """
    if self.instance.forthcoming:
      self.feedback_fn("Instance is forthcoming, just updating the"
                       "  configuration")
      self.cfg.SetInstancePrimaryNode(self.instance.uuid,
                                      self.target_node_uuid)
      return

    primary_node = self.cfg.GetNodeInfo(self.instance.primary_node)

    source_node_uuid = self.instance.primary_node

    if self.instance.disks_active:
      self.feedback_fn("* checking disk consistency between source and target")
      inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
      for (idx, dev) in enumerate(inst_disks):
        # for drbd, these are drbd over lvm
        if not CheckDiskConsistency(self.lu, self.instance, dev,
                                    self.target_node_uuid, False):
          if primary_node.offline:
            self.feedback_fn("Node %s is offline, ignoring degraded disk %s on"
                             " target node %s" %
                             (primary_node.name, idx,
                              self.cfg.GetNodeName(self.target_node_uuid)))
          elif not self.ignore_consistency:
            raise errors.OpExecError("Disk %s is degraded on target node,"
                                     " aborting failover" % idx)
    else:
      self.feedback_fn("* not checking disk consistency as instance is not"
                       " running")

    self.feedback_fn("* shutting down instance on source node")
    logging.info("Shutting down instance %s on node %s",
                 self.instance.name, self.cfg.GetNodeName(source_node_uuid))

    result = self.rpc.call_instance_shutdown(source_node_uuid, self.instance,
                                             self.shutdown_timeout,
                                             self.lu.op.reason)
    msg = result.fail_msg
    if msg:
      if self.ignore_consistency or primary_node.offline:
        self.lu.LogWarning("Could not shutdown instance %s on node %s,"
                           " proceeding anyway; please make sure node"
                           " %s is down; error details: %s",
                           self.instance.name,
                           self.cfg.GetNodeName(source_node_uuid),
                           self.cfg.GetNodeName(source_node_uuid), msg)
      else:
        raise errors.OpExecError("Could not shutdown instance %s on"
                                 " node %s: %s" %
                                 (self.instance.name,
                                  self.cfg.GetNodeName(source_node_uuid), msg))

    disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
    if disk_template in constants.DTS_EXT_MIRROR:
      self._CloseInstanceDisks(source_node_uuid)

    self.feedback_fn("* deactivating the instance's disks on source node")
    if not ShutdownInstanceDisks(self.lu, self.instance, ignore_primary=True):
      raise errors.OpExecError("Can't shut down the instance's disks")

    self.cfg.SetInstancePrimaryNode(self.instance.uuid, self.target_node_uuid)
    self.instance = self.cfg.GetInstanceInfo(self.instance_uuid)

    # Only start the instance if it's marked as up
    if self.instance.admin_state == constants.ADMINST_UP:
      self.feedback_fn("* activating the instance's disks on target node %s" %
                       self.cfg.GetNodeName(self.target_node_uuid))
      logging.info("Starting instance %s on node %s", self.instance.name,
                   self.cfg.GetNodeName(self.target_node_uuid))

      disks_ok, _, _ = AssembleInstanceDisks(self.lu, self.instance,
                                             ignore_secondaries=True)
      if not disks_ok:
        ShutdownInstanceDisks(self.lu, self.instance)
        raise errors.OpExecError("Can't activate the instance's disks")

      self.feedback_fn("* starting the instance on the target node %s" %
                       self.cfg.GetNodeName(self.target_node_uuid))
      result = self.rpc.call_instance_start(self.target_node_uuid,
                                            (self.instance, None, None), False,
                                            self.lu.op.reason)
      msg = result.fail_msg
      if msg:
        ShutdownInstanceDisks(self.lu, self.instance)
        raise errors.OpExecError("Could not start instance %s on node %s: %s" %
                                 (self.instance.name,
                                  self.cfg.GetNodeName(self.target_node_uuid),
                                  msg))
Esempio n. 10
0
    def Exec(self, feedback_fn):
        """Move an instance.

    The move is done by shutting it down on its present node, copying
    the data over (slow) and starting it on the new node.

    """
        source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
        target_node = self.cfg.GetNodeInfo(self.target_node_uuid)

        self.LogInfo("Shutting down instance %s on source node %s",
                     self.instance.name, source_node.name)

        assert (self.owned_locks(locking.LEVEL_NODE) == self.owned_locks(
            locking.LEVEL_NODE_RES))

        result = self.rpc.call_instance_shutdown(source_node.uuid,
                                                 self.instance,
                                                 self.op.shutdown_timeout,
                                                 self.op.reason)
        if self.op.ignore_consistency:
            result.Warn(
                "Could not shutdown instance %s on node %s. Proceeding"
                " anyway. Please make sure node %s is down. Error details" %
                (self.instance.name, source_node.name, source_node.name),
                self.LogWarning)
        else:
            result.Raise("Could not shutdown instance %s on node %s" %
                         (self.instance.name, source_node.name))

        # create the target disks
        try:
            CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
        except errors.OpExecError:
            self.LogWarning("Device creation failed")
            for disk_uuid in self.instance.disks:
                self.cfg.ReleaseDRBDMinors(disk_uuid)
            raise

        errs = []
        transfers = []
        # activate, get path, create transfer jobs
        disks = self.cfg.GetInstanceDisks(self.instance.uuid)
        for idx, disk in enumerate(disks):
            # FIXME: pass debug option from opcode to backend
            dt = masterd.instance.DiskTransfer("disk/%s" % idx,
                                               constants.IEIO_RAW_DISK,
                                               (disk, self.instance),
                                               constants.IEIO_RAW_DISK,
                                               (disk, self.instance), None)
            transfers.append(dt)
            self.cfg.Update(disk, feedback_fn)

        import_result = \
          masterd.instance.TransferInstanceData(self, feedback_fn,
                                                source_node.uuid,
                                                target_node.uuid,
                                                target_node.secondary_ip,
                                                self.op.compress,
                                                self.instance, transfers)
        if not compat.all(import_result):
            errs.append("Failed to transfer instance data")

        if errs:
            self.LogWarning("Some disks failed to copy, aborting")
            try:
                RemoveDisks(self,
                            self.instance,
                            target_node_uuid=target_node.uuid)
            finally:
                for disk_uuid in self.instance.disks:
                    self.cfg.ReleaseDRBDMinors(disk_uuid)
                raise errors.OpExecError("Errors during disk copy: %s" %
                                         (",".join(errs), ))

        self.instance.primary_node = target_node.uuid
        self.cfg.Update(self.instance, feedback_fn)
        for disk in disks:
            self.cfg.SetDiskNodes(disk.uuid, [target_node.uuid])

        self.LogInfo("Removing the disks on the original node")
        RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)

        # Only start the instance if it's marked as up
        if self.instance.admin_state == constants.ADMINST_UP:
            self.LogInfo("Starting instance %s on node %s", self.instance.name,
                         target_node.name)

            disks_ok, _, _ = AssembleInstanceDisks(self,
                                                   self.instance,
                                                   ignore_secondaries=True)
            if not disks_ok:
                ShutdownInstanceDisks(self, self.instance)
                raise errors.OpExecError("Can't activate the instance's disks")

            result = self.rpc.call_instance_start(target_node.uuid,
                                                  (self.instance, None, None),
                                                  False, self.op.reason)
            msg = result.fail_msg
            if msg:
                ShutdownInstanceDisks(self, self.instance)
                raise errors.OpExecError(
                    "Could not start instance %s on node %s: %s" %
                    (self.instance.name, target_node.name, msg))
Esempio n. 11
0
    def Exec(self, feedback_fn):
        """Rename the instance.

    """
        old_name = self.instance.name

        rename_file_storage = False
        disks = self.cfg.GetInstanceDisks(self.instance.uuid)
        renamed_storage = [
            d for d in disks if (d.dev_type in constants.DTS_FILEBASED
                                 and d.dev_type != constants.DT_GLUSTER)
        ]
        if (renamed_storage and self.op.new_name != self.instance.name):
            disks = self.cfg.GetInstanceDisks(self.instance.uuid)
            old_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
            rename_file_storage = True

        self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)

        # Assert that we have both the locks needed
        assert old_name in self.owned_locks(locking.LEVEL_INSTANCE)
        assert self.op.new_name in self.owned_locks(locking.LEVEL_INSTANCE)

        # re-read the instance from the configuration after rename
        renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
        disks = self.cfg.GetInstanceDisks(renamed_inst.uuid)

        if self.instance.forthcoming:
            return renamed_inst.name

        if rename_file_storage:
            new_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
            result = self.rpc.call_file_storage_dir_rename(
                renamed_inst.primary_node, old_file_storage_dir,
                new_file_storage_dir)
            result.Raise("Could not rename on node %s directory '%s' to '%s'"
                         " (but the instance has been renamed in Ganeti)" %
                         (self.cfg.GetNodeName(renamed_inst.primary_node),
                          old_file_storage_dir, new_file_storage_dir))

        StartInstanceDisks(self, renamed_inst, None)
        renamed_inst = self.cfg.GetInstanceInfo(renamed_inst.uuid)

        # update info on disks
        info = GetInstanceInfoText(renamed_inst)
        for (idx, disk) in enumerate(disks):
            for node_uuid in self.cfg.GetInstanceNodes(renamed_inst.uuid):
                result = self.rpc.call_blockdev_setinfo(
                    node_uuid, (disk, renamed_inst), info)
                result.Warn(
                    "Error setting info on node %s for disk %s" %
                    (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
        try:
            result = self.rpc.call_instance_run_rename(
                renamed_inst.primary_node, renamed_inst, old_name,
                self.op.debug_level)
            result.Warn(
                "Could not run OS rename script for instance %s on node %s"
                " (but the instance has been renamed in Ganeti)" %
                (renamed_inst.name,
                 self.cfg.GetNodeName(renamed_inst.primary_node)),
                self.LogWarning)
        finally:
            ShutdownInstanceDisks(self, renamed_inst)

        return renamed_inst.name