def ExpandNames(self): self.group_uuid = None self.needed_locks = {} if self.op.kind == constants.TAG_NODE: (self.node_uuid, _) = \ ExpandNodeUuidAndName(self.cfg, None, self.op.name) lock_level = locking.LEVEL_NODE lock_name = self.node_uuid elif self.op.kind == constants.TAG_INSTANCE: (self.inst_uuid, inst_name) = \ ExpandInstanceUuidAndName(self.cfg, None, self.op.name) lock_level = locking.LEVEL_INSTANCE lock_name = inst_name elif self.op.kind == constants.TAG_NODEGROUP: self.group_uuid = self.cfg.LookupNodeGroup(self.op.name) lock_level = locking.LEVEL_NODEGROUP lock_name = self.group_uuid elif self.op.kind == constants.TAG_NETWORK: self.network_uuid = self.cfg.LookupNetwork(self.op.name) lock_level = locking.LEVEL_NETWORK lock_name = self.network_uuid else: lock_level = None lock_name = None if lock_level and getattr(self.op, "use_locking", True): self.needed_locks[lock_level] = lock_name
def _ExpandAndLockInstance(self, allow_forthcoming=False): """Helper function to expand and lock an instance. Many LUs that work on an instance take its name in self.op.instance_name and need to expand it and then declare the expanded name for locking. This function does it, and then updates self.op.instance_name to the expanded name. It also initializes needed_locks as a dict, if this hasn't been done before. @param allow_forthcoming: if True, do not insist that the intsance be real; the default behaviour is to raise a prerequisite error if the specified instance is forthcoming. """ if self.needed_locks is None: self.needed_locks = {} else: assert locking.LEVEL_INSTANCE not in self.needed_locks, \ "_ExpandAndLockInstance called with instance-level locks set" (self.op.instance_uuid, self.op.instance_name) = \ ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid, self.op.instance_name) self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name if not allow_forthcoming: if self.cfg.GetInstanceInfo(self.op.instance_uuid).forthcoming: raise errors.OpPrereqError( "forthcoming instances not supported for this operation")
def CheckPrereq(self): """Check prerequisites. This checks that the instance is in the cluster and is not running. """ (self.op.instance_uuid, self.op.instance_name) = \ ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid, self.op.instance_name) instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) assert instance is not None # It should actually not happen that an instance is running with a disabled # disk template, but in case it does, the renaming of file-based instances # will fail horribly. Thus, we test it before. for disk in self.cfg.GetInstanceDisks(instance.uuid): if (disk.dev_type in constants.DTS_FILEBASED and self.op.new_name != instance.name): # TODO: when disks are separate objects, this should check for disk # types, not disk templates. CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(), disk.dev_type) CheckNodeOnline(self, instance.primary_node) CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING, msg="cannot rename") self.instance = instance self._PerformChecksAndResolveNewName() if self.op.new_name != instance.name: CheckInstanceExistence(self, self.op.new_name)
def CheckPrereq(self): """Check prerequisites. This checks the opcode parameters depending on the director and mode test. """ if self.op.mode in (constants.IALLOCATOR_MODE_ALLOC, constants.IALLOCATOR_MODE_MULTI_ALLOC): (self.inst_uuid, iname) = self.cfg.ExpandInstanceName(self.op.name) if iname is not None: raise errors.OpPrereqError( "Instance '%s' already in the cluster" % iname, errors.ECODE_EXISTS) for row in self.op.disks: if (not isinstance(row, dict) or constants.IDISK_SIZE not in row or not isinstance(row[constants.IDISK_SIZE], int) or constants.IDISK_MODE not in row or row[constants.IDISK_MODE] not in constants.DISK_ACCESS_SET): raise errors.OpPrereqError( "Invalid contents of the 'disks'" " parameter", errors.ECODE_INVAL) if self.op.hypervisor is None: self.op.hypervisor = self.cfg.GetHypervisorType() elif self.op.mode == constants.IALLOCATOR_MODE_RELOC: (self.inst_uuid, self.op.name) = ExpandInstanceUuidAndName(self.cfg, None, self.op.name) self.relocate_from_node_uuids = \ list(self.cfg.GetInstanceSecondaryNodes(self.inst_uuid)) elif self.op.mode in (constants.IALLOCATOR_MODE_CHG_GROUP, constants.IALLOCATOR_MODE_NODE_EVAC): if not self.op.instances: raise errors.OpPrereqError("Missing instances", errors.ECODE_INVAL) (_, self.op.instances) = GetWantedInstances(self, self.op.instances) else: raise errors.OpPrereqError( "Invalid test allocator mode '%s'" % self.op.mode, errors.ECODE_INVAL) if self.op.direction == constants.IALLOCATOR_DIR_OUT: if self.op.iallocator is None: raise errors.OpPrereqError("Missing allocator name", errors.ECODE_INVAL)
def _ExpandAndLockInstance(self): """Helper function to expand and lock an instance. Many LUs that work on an instance take its name in self.op.instance_name and need to expand it and then declare the expanded name for locking. This function does it, and then updates self.op.instance_name to the expanded name. It also initializes needed_locks as a dict, if this hasn't been done before. """ if self.needed_locks is None: self.needed_locks = {} else: assert locking.LEVEL_INSTANCE not in self.needed_locks, \ "_ExpandAndLockInstance called with instance-level locks set" (self.op.instance_uuid, self.op.instance_name) = \ ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid, self.op.instance_name) self.needed_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
def CheckPrereq(self): """Check prerequisites. This checks that the instance is in the cluster. """ (self.instance_uuid, self.instance_name) = \ ExpandInstanceUuidAndName(self.lu.cfg, self.instance_uuid, self.instance_name) self.instance = self.cfg.GetInstanceInfo(self.instance_uuid) assert self.instance is not None cluster = self.cfg.GetClusterInfo() if (not self.cleanup and not self.instance.admin_state == constants.ADMINST_UP and not self.failover and self.fallback): self.lu.LogInfo("Instance is marked down or offline, fallback allowed," " switching to failover") self.failover = True disks = self.cfg.GetInstanceDisks(self.instance.uuid) if not utils.AllDiskOfType(disks, constants.DTS_MIRRORED): if self.failover: text = "failovers" else: text = "migrations" invalid_disks = set(d.dev_type for d in disks if d.dev_type not in constants.DTS_MIRRORED) raise errors.OpPrereqError("Instance's disk layout '%s' does not allow" " %s" % (utils.CommaJoin(invalid_disks), text), errors.ECODE_STATE) # TODO allow heterogeneous disk types if all are mirrored in some way. if utils.AllDiskOfType(disks, constants.DTS_EXT_MIRROR): CheckIAllocatorOrNode(self.lu, "iallocator", "target_node") if self.lu.op.iallocator: self._RunAllocator() else: # We set set self.target_node_uuid as it is required by # BuildHooksEnv self.target_node_uuid = self.lu.op.target_node_uuid # Check that the target node is correct in terms of instance policy nodeinfo = self.cfg.GetNodeInfo(self.target_node_uuid) group_info = self.cfg.GetNodeGroup(nodeinfo.group) ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo, self.cfg, ignore=self.ignore_ipolicy) # self.target_node is already populated, either directly or by the # iallocator run target_node_uuid = self.target_node_uuid if self.target_node_uuid == self.instance.primary_node: raise errors.OpPrereqError( "Cannot migrate instance %s to its primary (%s)" % (self.instance.name, self.cfg.GetNodeName(self.instance.primary_node)), errors.ECODE_STATE) if len(self.lu.tasklets) == 1: # It is safe to release locks only when we're the only tasklet # in the LU ReleaseLocks(self.lu, locking.LEVEL_NODE, keep=[self.instance.primary_node, self.target_node_uuid]) elif utils.AllDiskOfType(disks, constants.DTS_INT_MIRROR): templates = [d.dev_type for d in disks] secondary_node_uuids = \ self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) if not secondary_node_uuids: raise errors.ConfigurationError("No secondary node but using" " %s disk types" % utils.CommaJoin(set(templates))) self.target_node_uuid = target_node_uuid = secondary_node_uuids[0] if self.lu.op.iallocator or \ (self.lu.op.target_node_uuid and self.lu.op.target_node_uuid != target_node_uuid): if self.failover: text = "failed over" else: text = "migrated" raise errors.OpPrereqError("Instances with disk types %s cannot" " be %s to arbitrary nodes" " (neither an iallocator nor a target" " node can be passed)" % (utils.CommaJoin(set(templates)), text), errors.ECODE_INVAL) nodeinfo = self.cfg.GetNodeInfo(target_node_uuid) group_info = self.cfg.GetNodeGroup(nodeinfo.group) ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) CheckTargetNodeIPolicy(self.lu, ipolicy, self.instance, nodeinfo, self.cfg, ignore=self.ignore_ipolicy) else: raise errors.OpPrereqError("Instance mixes internal and external " "mirroring. This is not currently supported.") i_be = cluster.FillBE(self.instance) # check memory requirements on the secondary node if (not self.cleanup and (not self.failover or self.instance.admin_state == constants.ADMINST_UP)): self.tgt_free_mem = CheckNodeFreeMemory( self.lu, target_node_uuid, "migrating instance %s" % self.instance.name, i_be[constants.BE_MINMEM], self.instance.hypervisor, self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor]) else: self.lu.LogInfo("Not checking memory on the secondary node as" " instance will not be started") # check if failover must be forced instead of migration if (not self.cleanup and not self.failover and i_be[constants.BE_ALWAYS_FAILOVER]): self.lu.LogInfo("Instance configured to always failover; fallback" " to failover") self.failover = True # check bridge existance CheckInstanceBridgesExist(self.lu, self.instance, node_uuid=target_node_uuid) if not self.cleanup: CheckNodeNotDrained(self.lu, target_node_uuid) if not self.failover: result = self.rpc.call_instance_migratable(self.instance.primary_node, self.instance) if result.fail_msg and self.fallback: self.lu.LogInfo("Can't migrate, instance offline, fallback to" " failover") self.failover = True else: result.Raise("Can't migrate, please use failover", prereq=True, ecode=errors.ECODE_STATE) assert not (self.failover and self.cleanup) if not self.failover: if self.lu.op.live is not None and self.lu.op.mode is not None: raise errors.OpPrereqError("Only one of the 'live' and 'mode'" " parameters are accepted", errors.ECODE_INVAL) if self.lu.op.live is not None: if self.lu.op.live: self.lu.op.mode = constants.HT_MIGRATION_LIVE else: self.lu.op.mode = constants.HT_MIGRATION_NONLIVE # reset the 'live' parameter to None so that repeated # invocations of CheckPrereq do not raise an exception self.lu.op.live = None elif self.lu.op.mode is None: # read the default value from the hypervisor i_hv = cluster.FillHV(self.instance, skip_globals=False) self.lu.op.mode = i_hv[constants.HV_MIGRATION_MODE] self.live = self.lu.op.mode == constants.HT_MIGRATION_LIVE else: # Failover is never live self.live = False if not (self.failover or self.cleanup): remote_info = self.rpc.call_instance_info( self.instance.primary_node, self.instance.name, self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor]) remote_info.Raise("Error checking instance on node %s" % self.cfg.GetNodeName(self.instance.primary_node), prereq=True) instance_running = bool(remote_info.payload) if instance_running: self.current_mem = int(remote_info.payload["memory"])