Beispiel #1
0
  def CheckPrereq(self):
    owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))

    assert self.group_uuid in owned_groups

    # Check if locked instances are still correct
    owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
    if self.op.conflicts_check:
      CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)

    self.netparams = {
      constants.NIC_MODE: self.network_mode,
      constants.NIC_LINK: self.network_link,
      }
    objects.NIC.CheckParameterSyntax(self.netparams)

    self.group = self.cfg.GetNodeGroup(self.group_uuid)
    #if self.network_mode == constants.NIC_MODE_BRIDGED:
    #  _CheckNodeGroupBridgesExist(self, self.network_link, self.group_uuid)
    self.connected = False
    if self.network_uuid in self.group.networks:
      self.LogWarning("Network '%s' is already mapped to group '%s'" %
                      (self.network_name, self.group.name))
      self.connected = True

    # check only if not already connected
    elif self.op.conflicts_check:
      pool = network.AddressPool(self.cfg.GetNetwork(self.network_uuid))

      _NetworkConflictCheck(
        self, lambda nic: pool.Contains(nic.ip), "connect to",
        [instance_info for (_, instance_info) in
         self.cfg.GetMultiInstanceInfoByName(owned_instance_names)])
Beispiel #2
0
    def CheckPrereq(self):
        owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))

        assert self.group_uuid in owned_groups

        # Check if locked instances are still correct
        owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
        CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instances)

        self.group = self.cfg.GetNodeGroup(self.group_uuid)
        self.connected = True
        if self.network_uuid not in self.group.networks:
            self.LogWarning("Network '%s' is not mapped to group '%s'",
                            self.network_name, self.group.name)
            self.connected = False

        # We need this check only if network is not already connected
        else:
            _NetworkConflictCheck(
                self, lambda nic: nic.network == self.network_uuid,
                "disconnect from", [
                    instance_info for (_, instance_info) in
                    self.cfg.GetMultiInstanceInfoByName(owned_instances)
                ])
            self.netparams = self.group.networks.get(self.network_uuid)
Beispiel #3
0
    def CheckPrereq(self):
        owned_instance_names = frozenset(
            self.owned_locks(locking.LEVEL_INSTANCE))
        owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
        owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))

        assert owned_groups.issuperset(self.req_target_uuids)
        assert self.group_uuid in owned_groups

        # Check if locked instances are still correct
        CheckNodeGroupInstances(self.cfg, self.group_uuid,
                                owned_instance_names)

        # Get instance information
        self.instances = \
          dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))

        # Check if node groups for locked instances are still correct
        CheckInstancesNodeGroups(self.cfg, self.instances, owned_groups,
                                 owned_node_uuids, self.group_uuid)

        if self.req_target_uuids:
            # User requested specific target groups
            self.target_uuids = self.req_target_uuids
        else:
            # All groups except the one to be evacuated are potential targets
            self.target_uuids = [
                group_uuid for group_uuid in owned_groups
                if group_uuid != self.group_uuid
            ]

            if not self.target_uuids:
                raise errors.OpPrereqError(
                    "There are no possible target groups", errors.ECODE_INVAL)
Beispiel #4
0
    def CheckPrereq(self):
        """Check prerequisites.

    """
        owned_instance_names = frozenset(
            self.owned_locks(locking.LEVEL_INSTANCE))

        # Check if locked instances are still correct
        CheckNodeGroupInstances(self.cfg, self.group_uuid,
                                owned_instance_names)

        self.group = self.cfg.GetNodeGroup(self.group_uuid)
        cluster = self.cfg.GetClusterInfo()

        if self.group is None:
            raise errors.OpExecError(
                "Could not retrieve group '%s' (UUID: %s)" %
                (self.op.group_name, self.group_uuid))

        if self.op.ndparams:
            new_ndparams = GetUpdatedParams(self.group.ndparams,
                                            self.op.ndparams)
            utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
            self.new_ndparams = new_ndparams

        if self.op.diskparams:
            diskparams = self.group.diskparams
            uavdp = self._UpdateAndVerifyDiskParams
            # For each disktemplate subdict update and verify the values
            new_diskparams = dict(
                (dt, uavdp(diskparams.get(dt, {}), self.op.diskparams[dt]))
                for dt in constants.DISK_TEMPLATES if dt in self.op.diskparams)
            # As we've all subdicts of diskparams ready, lets merge the actual
            # dict with all updated subdicts
            self.new_diskparams = objects.FillDict(diskparams, new_diskparams)

            try:
                utils.VerifyDictOptions(self.new_diskparams,
                                        constants.DISK_DT_DEFAULTS)
                CheckDiskAccessModeConsistency(self.new_diskparams,
                                               self.cfg,
                                               group=self.group)
            except errors.OpPrereqError as err:
                raise errors.OpPrereqError(
                    "While verify diskparams options: %s" % err,
                    errors.ECODE_INVAL)

        if self.op.hv_state:
            self.new_hv_state = MergeAndVerifyHvState(
                self.op.hv_state, self.group.hv_state_static)

        if self.op.disk_state:
            self.new_disk_state = \
              MergeAndVerifyDiskState(self.op.disk_state,
                                      self.group.disk_state_static)

        self._CheckIpolicy(cluster, owned_instance_names)
Beispiel #5
0
    def CheckPrereq(self):
        owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
        owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
        owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))

        assert self.group_uuid in owned_groups

        # Check if locked instances are still correct
        CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)

        # Get instance information
        self.instances = dict(
            self.cfg.GetMultiInstanceInfoByName(owned_inst_names))

        # Check if node groups for locked instances are still correct
        CheckInstancesNodeGroups(self.cfg, self.instances, owned_groups,
                                 owned_node_uuids, self.group_uuid)