예제 #1
0
파일: instance.py 프로젝트: dimara/ganeti
    def Exec(self, feedback_fn):
        """Executes the opcode.

    """
        jobs = []
        if self.op.iallocator:
            op2inst = dict((op.instance_name, op) for op in self.op.instances)
            (allocatable, failed) = self.ia_result

            for (name, node_names) in allocatable:
                op = op2inst.pop(name)

                (op.pnode_uuid, op.pnode) = \
                  ExpandNodeUuidAndName(self.cfg, None, node_names[0])
                if len(node_names) > 1:
                    (op.snode_uuid, op.snode) = \
                      ExpandNodeUuidAndName(self.cfg, None, node_names[1])

                    jobs.append([op])

                missing = set(op2inst.keys()) - set(failed)
                assert not missing, \
                  "Iallocator did return incomplete result: %s" % \
                  utils.CommaJoin(missing)
        else:
            jobs.extend([op] for op in self.op.instances)

        return ResultWithJobs(jobs, **self._ConstructPartialResult())
예제 #2
0
파일: instance.py 프로젝트: dimara/ganeti
    def ExpandNames(self):
        """Calculate the locks.

    """
        self.share_locks = ShareAll()
        self.needed_locks = {}

        if self.op.iallocator:
            self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
            self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET

            if self.op.opportunistic_locking:
                self.opportunistic_locks[locking.LEVEL_NODE] = True
                self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
        else:
            nodeslist = []
            for inst in self.op.instances:
                (inst.pnode_uuid, inst.pnode) = \
                  ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
                nodeslist.append(inst.pnode_uuid)
                if inst.snode is not None:
                    (inst.snode_uuid, inst.snode) = \
                      ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
                    nodeslist.append(inst.snode_uuid)

            self.needed_locks[locking.LEVEL_NODE] = nodeslist
            # Lock resources of instance's primary and secondary nodes (copy to
            # prevent accidential modification)
            self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
예제 #3
0
    def ExpandNames(self):
        self._ExpandAndLockInstance()

        # In case we are zeroing, a node lock is required as we will be creating and
        # destroying a disk - allocations should be stopped, but not on the entire
        # cluster
        if self.op.zero_free_space:
            self.recalculate_locks = {
                locking.LEVEL_NODE: constants.LOCKS_REPLACE
            }
            self._LockInstancesNodes(primary_only=True)

        # Lock all nodes for local exports
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            (self.op.target_node_uuid, self.op.target_node) = \
              ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
                                    self.op.target_node)
            # FIXME: lock only instance primary and destination node
            #
            # Sad but true, for now we have do lock all nodes, as we don't know where
            # the previous export might be, and in this LU we search for it and
            # remove it from its current node. In the future we could fix this by:
            #  - making a tasklet to search (share-lock all), then create the
            #    new one, then one to remove, after
            #  - removing the removal operation altogether
            self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET

            # Allocations should be stopped while this LU runs with node locks, but
            # it doesn't have to be exclusive
            self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
            self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
예제 #4
0
    def ExpandNames(self):
        self.group_uuid = None
        self.needed_locks = {}

        if self.op.kind == constants.TAG_NODE:
            (self.node_uuid, _) = \
              ExpandNodeUuidAndName(self.cfg, None, self.op.name)
            lock_level = locking.LEVEL_NODE
            lock_name = self.node_uuid
        elif self.op.kind == constants.TAG_INSTANCE:
            (self.inst_uuid, inst_name) = \
              ExpandInstanceUuidAndName(self.cfg, None, self.op.name)
            lock_level = locking.LEVEL_INSTANCE
            lock_name = inst_name
        elif self.op.kind == constants.TAG_NODEGROUP:
            self.group_uuid = self.cfg.LookupNodeGroup(self.op.name)
            lock_level = locking.LEVEL_NODEGROUP
            lock_name = self.group_uuid
        elif self.op.kind == constants.TAG_NETWORK:
            self.network_uuid = self.cfg.LookupNetwork(self.op.name)
            lock_level = locking.LEVEL_NETWORK
            lock_name = self.network_uuid
        else:
            lock_level = None
            lock_name = None

        if lock_level and getattr(self.op, "use_locking", True):
            self.needed_locks[lock_level] = lock_name
예제 #5
0
파일: instance.py 프로젝트: dimara/ganeti
 def ExpandNames(self):
     self._ExpandAndLockInstance()
     (self.op.target_node_uuid, self.op.target_node) = \
       ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
                             self.op.target_node)
     self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid]
     self.needed_locks[locking.LEVEL_NODE_RES] = []
     self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
예제 #6
0
    def ExpandNames(self):
        self.node_uuid, _ = ExpandNodeUuidAndName(self.cfg, None,
                                                  self.op.node_name)

        self.needed_locks = {
            locking.LEVEL_NODE: self.node_uuid,
        }
        self.share_locks = {
            locking.LEVEL_NODE: False,
        }
예제 #7
0
def _ExpandNamesForMigration(lu):
  """Expands names for use with L{TLMigrateInstance}.

  @type lu: L{LogicalUnit}

  """
  if lu.op.target_node is not None:
    (lu.op.target_node_uuid, lu.op.target_node) = \
      ExpandNodeUuidAndName(lu.cfg, lu.op.target_node_uuid, lu.op.target_node)

  lu.needed_locks[locking.LEVEL_NODE] = []
  lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
  lu.dont_collate_locks[locking.LEVEL_NODE] = True

  lu.needed_locks[locking.LEVEL_NODE_RES] = []
  lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
  lu.dont_collate_locks[locking.LEVEL_NODE_RES] = True
예제 #8
0
def _ExpandNamesForMigration(lu):
  """Expands names for use with L{TLMigrateInstance}.

  @type lu: L{LogicalUnit}

  """
  if lu.op.target_node is not None:
    (lu.op.target_node_uuid, lu.op.target_node) = \
      ExpandNodeUuidAndName(lu.cfg, lu.op.target_node_uuid, lu.op.target_node)

  lu.needed_locks[locking.LEVEL_NODE] = []
  lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE

  lu.needed_locks[locking.LEVEL_NODE_RES] = []
  lu.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE

  # The node allocation lock is actually only needed for externally replicated
  # instances (e.g. sharedfile or RBD) and if an iallocator is used.
  lu.needed_locks[locking.LEVEL_NODE_ALLOC] = []
예제 #9
0
    def ExpandNames(self):
        self._ExpandAndLockInstance()

        # Lock all nodes for local exports
        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            (self.op.target_node_uuid, self.op.target_node) = \
              ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid,
                                    self.op.target_node)
            # FIXME: lock only instance primary and destination node
            #
            # Sad but true, for now we have do lock all nodes, as we don't know where
            # the previous export might be, and in this LU we search for it and
            # remove it from its current node. In the future we could fix this by:
            #  - making a tasklet to search (share-lock all), then create the
            #    new one, then one to remove, after
            #  - removing the removal operation altogether
            self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET

            # Allocations should be stopped while this LU runs with node locks, but
            # it doesn't have to be exclusive
            self.share_locks[locking.LEVEL_NODE_ALLOC] = 1
            self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET