예제 #1
0
    def testVerifyCertificate(self):
        cert_pem = testutils.ReadTestData("cert1.pem")
        cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM,
                                               cert_pem)

        # Not checking return value as this certificate is expired
        utils.VerifyX509Certificate(cert, 30, 7)
예제 #2
0
    def testClockSkew(self):
        SKEW = constants.NODE_MAX_CLOCK_SKEW
        # Create private and public key
        key = OpenSSL.crypto.PKey()
        key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)

        validity = 7 * 86400
        # skew small enough, accepting cert; note that this is a timed
        # test, and could fail if the machine is so loaded that the next
        # few lines take more than NODE_MAX_CLOCK_SKEW / 2
        for before in [-1, 0, SKEW / 4, SKEW / 2]:
            cert = self._GenCert(key, before, validity)
            result = utils.VerifyX509Certificate(cert, 1, 2)
            self.assertEqual(result, (None, None))

        # skew too great, not accepting certs
        for before in [SKEW * 2, SKEW * 10]:
            cert = self._GenCert(key, before, validity)
            (status, msg) = utils.VerifyX509Certificate(cert, 1, 2)
            self.assertEqual(status, utils.CERT_WARNING)
            self.assertTrue(msg.startswith("Certificate not yet valid"))
예제 #3
0
class LUBackupExport(LogicalUnit):
    """Export an instance to an image in the cluster.

  """
    HPATH = "instance-export"
    HTYPE = constants.HTYPE_INSTANCE
    REQ_BGL = False

    def CheckArguments(self):
        """Check the arguments.

    """
        self.x509_key_name = self.op.x509_key_name
        self.dest_x509_ca_pem = self.op.destination_x509_ca

        if self.op.mode == constants.EXPORT_MODE_REMOTE:
            if not self.x509_key_name:
                raise errors.OpPrereqError(
                    "Missing X509 key name for encryption", errors.ECODE_INVAL)

            if not self.dest_x509_ca_pem:
                raise errors.OpPrereqError("Missing destination X509 CA",
                                           errors.ECODE_INVAL)

        if self.op.zero_free_space and not self.op.compress:
            raise errors.OpPrereqError(
                "Zeroing free space does not make sense "
                "unless compression is used")

        if self.op.zero_free_space and not self.op.shutdown:
            raise errors.OpPrereqError(
                "Unless the instance is shut down, zeroing "
                "cannot be used.")

    def ExpandNames(self):
        self._ExpandAndLockInstance()

        # In case we are zeroing, a node lock is required as we will be creating and
        # destroying a disk - allocations should be stopped, but not on the entire
        # cluster
        if self.op.zero_free_space:
            self.recalculate_locks = {
                locking.LEVEL_NODE: constants.LOCKS_REPLACE
            }
            self._LockInstancesNodes(primary_only=True)

        # Lock all nodes for local exports
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            (self.op.target_node_uuid, self.op.target_node) = \
              ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
                                    self.op.target_node)
            # FIXME: lock only instance primary and destination node
            #
            # Sad but true, for now we have do lock all nodes, as we don't know where
            # the previous export might be, and in this LU we search for it and
            # remove it from its current node. In the future we could fix this by:
            #  - making a tasklet to search (share-lock all), then create the
            #    new one, then one to remove, after
            #  - removing the removal operation altogether
            self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET

            # Allocations should be stopped while this LU runs with node locks, but
            # it doesn't have to be exclusive
            self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
            self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET

    def DeclareLocks(self, level):
        """Last minute lock declaration."""
        # All nodes are locked anyway, so nothing to do here.

    def BuildHooksEnv(self):
        """Build hooks env.

    This will run on the master, primary node and target node.

    """
        env = {
            "EXPORT_MODE": self.op.mode,
            "EXPORT_NODE": self.op.target_node,
            "EXPORT_DO_SHUTDOWN": self.op.shutdown,
            "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
            # TODO: Generic function for boolean env variables
            "REMOVE_INSTANCE": str(bool(self.op.remove_instance)),
        }

        env.update(
            BuildInstanceHookEnvByObject(self,
                                         self.instance,
                                         secondary_nodes=self.secondary_nodes,
                                         disks=self.inst_disks))

        return env

    def BuildHooksNodes(self):
        """Build hooks nodes.

    """
        nl = [self.cfg.GetMasterNode(), self.instance.primary_node]

        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            nl.append(self.op.target_node_uuid)

        return (nl, nl)

    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance and node names are valid.

    """
        self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
        assert self.instance is not None, \
              "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckNodeOnline(self, self.instance.primary_node)

        if (self.op.remove_instance
                and self.instance.admin_state == constants.ADMINST_UP
                and not self.op.shutdown):
            raise errors.OpPrereqError(
                "Can not remove instance without shutting it"
                " down before", errors.ECODE_STATE)

        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            self.dst_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
            assert self.dst_node is not None

            CheckNodeOnline(self, self.dst_node.uuid)
            CheckNodeNotDrained(self, self.dst_node.uuid)

            self._cds = None
            self.dest_disk_info = None
            self.dest_x509_ca = None

        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
            self.dst_node = None

            if len(self.op.target_node) != len(self.instance.disks):
                raise errors.OpPrereqError(
                    ("Received destination information for %s"
                     " disks, but instance %s has %s disks") %
                    (len(self.op.target_node), self.op.instance_name,
                     len(self.instance.disks)), errors.ECODE_INVAL)

            cds = GetClusterDomainSecret()

            # Check X509 key name
            try:
                (key_name, hmac_digest, hmac_salt) = self.x509_key_name
            except (TypeError, ValueError), err:
                raise errors.OpPrereqError(
                    "Invalid data for X509 key name: %s" % err,
                    errors.ECODE_INVAL)

            if not utils.VerifySha1Hmac(
                    cds, key_name, hmac_digest, salt=hmac_salt):
                raise errors.OpPrereqError("HMAC for X509 key name is wrong",
                                           errors.ECODE_INVAL)

            # Load and verify CA
            try:
                (cert,
                 _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem,
                                                      cds)
            except OpenSSL.crypto.Error, err:
                raise errors.OpPrereqError(
                    "Unable to load destination X509 CA (%s)" % (err, ),
                    errors.ECODE_INVAL)

            (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
            if errcode is not None:
                raise errors.OpPrereqError(
                    "Invalid destination X509 CA (%s)" % (msg, ),
                    errors.ECODE_INVAL)

            self.dest_x509_ca = cert

            # Verify target information
            disk_info = []
            for idx, disk_data in enumerate(self.op.target_node):
                try:
                    (host, port, magic) = \
                      masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
                except errors.GenericError, err:
                    raise errors.OpPrereqError(
                        "Target info for disk %s: %s" % (idx, err),
                        errors.ECODE_INVAL)

                disk_info.append((host, port, magic))
예제 #4
0
    def CheckPrereq(self):
        """Check prerequisites.

    This checks that the instance and node names are valid.

    """
        self.instance = self.cfg.GetInstanceInfoByName(self.op.instance_name)
        assert self.instance is not None, \
              "Cannot retrieve locked instance %s" % self.op.instance_name
        CheckNodeOnline(self, self.instance.primary_node)

        if (self.op.remove_instance
                and self.instance.admin_state == constants.ADMINST_UP
                and not self.op.shutdown):
            raise errors.OpPrereqError(
                "Can not remove instance without shutting it"
                " down before", errors.ECODE_STATE)

        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            self.dst_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
            assert self.dst_node is not None

            CheckNodeOnline(self, self.dst_node.uuid)
            CheckNodeNotDrained(self, self.dst_node.uuid)

            self._cds = None
            self.dest_disk_info = None
            self.dest_x509_ca = None

        elif self.op.mode == constants.EXPORT_MODE_REMOTE:
            self.dst_node = None

            if len(self.op.target_node) != len(self.instance.disks):
                raise errors.OpPrereqError(
                    ("Received destination information for %s"
                     " disks, but instance %s has %s disks") %
                    (len(self.op.target_node), self.op.instance_name,
                     len(self.instance.disks)), errors.ECODE_INVAL)

            cds = GetClusterDomainSecret()

            # Check X509 key name
            try:
                (key_name, hmac_digest, hmac_salt) = self.x509_key_name
            except (TypeError, ValueError) as err:
                raise errors.OpPrereqError(
                    "Invalid data for X509 key name: %s" % err,
                    errors.ECODE_INVAL)

            if not utils.VerifySha1Hmac(
                    cds, key_name, hmac_digest, salt=hmac_salt):
                raise errors.OpPrereqError("HMAC for X509 key name is wrong",
                                           errors.ECODE_INVAL)

            # Load and verify CA
            try:
                (cert,
                 _) = utils.LoadSignedX509Certificate(self.dest_x509_ca_pem,
                                                      cds)
            except OpenSSL.crypto.Error as err:
                raise errors.OpPrereqError(
                    "Unable to load destination X509 CA (%s)" % (err, ),
                    errors.ECODE_INVAL)

            (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
            if errcode is not None:
                raise errors.OpPrereqError(
                    "Invalid destination X509 CA (%s)" % (msg, ),
                    errors.ECODE_INVAL)

            self.dest_x509_ca = cert

            # Verify target information
            disk_info = []
            for idx, disk_data in enumerate(self.op.target_node):
                try:
                    (host, port, magic) = \
                      masterd.instance.CheckRemoteExportDiskInfo(cds, idx, disk_data)
                except errors.GenericError as err:
                    raise errors.OpPrereqError(
                        "Target info for disk %s: %s" % (idx, err),
                        errors.ECODE_INVAL)

                disk_info.append((host, port, magic))

            assert len(disk_info) == len(self.op.target_node)
            self.dest_disk_info = disk_info

        else:
            raise errors.ProgrammerError("Unhandled export mode %r" %
                                         self.op.mode)

        # Check prerequisites for zeroing
        if self.op.zero_free_space:
            # Check that user shutdown detection has been enabled
            hvparams = self.cfg.GetClusterInfo().FillHV(self.instance)
            if self.instance.hypervisor == constants.HT_KVM and \
               not hvparams.get(constants.HV_KVM_USER_SHUTDOWN, False):
                raise errors.OpPrereqError(
                    "Instance shutdown detection must be "
                    "enabled for zeroing to work", errors.ECODE_INVAL)

            # Check that the instance is set to boot from the disk
            if constants.HV_BOOT_ORDER in hvparams and \
               hvparams[constants.HV_BOOT_ORDER] != constants.HT_BO_DISK:
                raise errors.OpPrereqError(
                    "Booting from disk must be set for zeroing "
                    "to work", errors.ECODE_INVAL)

            # Check that the zeroing image is set
            if not self.cfg.GetZeroingImage():
                raise errors.OpPrereqError(
                    "A zeroing image must be set for zeroing to"
                    " work", errors.ECODE_INVAL)

            if self.op.zeroing_timeout_fixed is None:
                self.op.zeroing_timeout_fixed = constants.HELPER_VM_STARTUP

            if self.op.zeroing_timeout_per_mib is None:
                self.op.zeroing_timeout_per_mib = constants.ZEROING_TIMEOUT_PER_MIB

        else:
            if (self.op.zeroing_timeout_fixed is not None
                    or self.op.zeroing_timeout_per_mib is not None):
                raise errors.OpPrereqError(
                    "Zeroing timeout options can only be used"
                    " only with the --zero-free-space option",
                    errors.ECODE_INVAL)

        if self.op.long_sleep and not self.op.shutdown:
            raise errors.OpPrereqError(
                "The long sleep option only makes sense when"
                " the instance can be shut down.", errors.ECODE_INVAL)

        self.secondary_nodes = \
          self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
        self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)

        # Check if the compression tool is whitelisted
        CheckCompressionTool(self, self.op.compress)