Example #1
0
from ganeti import errors
from ganeti import runtime
from ganeti import ssconf
from ganeti import utils
from ganeti import cli
from ganeti import pathutils
from ganeti import compat


(DIR,
 FILE,
 QUEUE_DIR) = range(1, 4)

ALL_TYPES = compat.UniqueFrozenset([
  DIR,
  FILE,
  QUEUE_DIR,
  ])


def RecursiveEnsure(path, uid, gid, dir_perm, file_perm):
  """Ensures permissions recursively down a directory.

  This functions walks the path and sets permissions accordingly.

  @param path: The absolute path to walk
  @param uid: The uid used as owner
  @param gid: The gid used as group
  @param dir_perm: The permission bits set for directories
  @param file_perm: The permission bits set for files
Example #2
0
# Feature string for instance creation request data version 1
_INST_CREATE_REQV1 = "instance-create-reqv1"

# Feature string for instance reinstall request version 1
_INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"

# Feature string for node migration version 1
_NODE_MIGRATE_REQV1 = "node-migrate-reqv1"

# Feature string for node evacuation with LU-generated jobs
_NODE_EVAC_RES1 = "node-evac-res1"

ALL_FEATURES = compat.UniqueFrozenset([
    _INST_CREATE_REQV1,
    _INST_REINSTALL_REQV1,
    _NODE_MIGRATE_REQV1,
    _NODE_EVAC_RES1,
])

# Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
_WFJC_TIMEOUT = 10


# FIXME: For compatibility we update the beparams/memory field. Needs to be
#        removed in Ganeti 2.8
def _UpdateBeparams(inst):
    """Updates the beparams dict of inst to support the memory field.

  @param inst: Inst dict
  @return: Updated inst dict
Example #3
0
            for peer in self._peers[1:]:
                if netutils.IPAddress.GetAddressFamily(peer) != self._family:
                    raise errors.ConfdClientError(
                        "Peers must be of same address family")
        except errors.IPAddressError:
            raise errors.ConfdClientError("Peer address %s invalid" % peer)


# UPCALL_REPLY: server reply upcall
# has all ConfdUpcallPayload fields populated
UPCALL_REPLY = 1
# UPCALL_EXPIRE: internal library request expire
# has only salt, type, orig_request and extra_args
UPCALL_EXPIRE = 2
CONFD_UPCALL_TYPES = compat.UniqueFrozenset([
    UPCALL_REPLY,
    UPCALL_EXPIRE,
])


class ConfdUpcallPayload(objects.ConfigObject):
    """Callback argument for confd replies

  @type salt: string
  @ivar salt: salt associated with the query
  @type type: one of confd.client.CONFD_UPCALL_TYPES
  @ivar type: upcall type (server reply, expired request, ...)
  @type orig_request: L{objects.ConfdRequest}
  @ivar orig_request: original request
  @type server_reply: L{objects.ConfdReply}
  @ivar server_reply: server reply
  @type server_ip: string
Example #4
0
LOG_OS_DIR = LOG_DIR + "/os"
LOG_ES_DIR = LOG_DIR + "/extstorage"
#: Directory for storing Xen config files after failed instance starts
LOG_XEN_DIR = LOG_DIR + "/xen"

# Job queue paths
JOB_QUEUE_LOCK_FILE = QUEUE_DIR + "/lock"
JOB_QUEUE_VERSION_FILE = QUEUE_DIR + "/version"
JOB_QUEUE_SERIAL_FILE = QUEUE_DIR + "/serial"
JOB_QUEUE_ARCHIVE_DIR = QUEUE_DIR + "/archive"
JOB_QUEUE_DRAIN_FILE = QUEUE_DIR + "/drain"

ALL_CERT_FILES = compat.UniqueFrozenset([
    NODED_CERT_FILE,
    RAPI_CERT_FILE,
    SPICE_CERT_FILE,
    SPICE_CACERT_FILE,
])


def GetLogFilename(daemon_name):
    """Returns the full path for a daemon's log file.

  """
    return "%s/%s.log" % (LOG_DIR, daemon_name)


LOG_WATCHER = GetLogFilename("watcher")
LOG_COMMANDS = GetLogFilename("commands")
LOG_BURNIN = GetLogFilename("burnin")
Example #5
0
RAPI_OPCODE_EXCLUDE = compat.UniqueFrozenset([
  # Not yet implemented
  opcodes.OpBackupRemove,
  opcodes.OpClusterConfigQuery,
  opcodes.OpClusterRepairDiskSizes,
  opcodes.OpClusterVerify,
  opcodes.OpClusterVerifyDisks,
  opcodes.OpInstanceChangeGroup,
  opcodes.OpInstanceMove,
  opcodes.OpNodeQueryvols,
  opcodes.OpOobCommand,
  opcodes.OpTagsSearch,
  opcodes.OpClusterActivateMasterIp,
  opcodes.OpClusterDeactivateMasterIp,
  opcodes.OpExtStorageDiagnose,

  # Difficult if not impossible
  opcodes.OpClusterDestroy,
  opcodes.OpClusterPostInit,
  opcodes.OpClusterRename,
  opcodes.OpNodeAdd,
  opcodes.OpNodeRemove,

  # Very sensitive in nature
  opcodes.OpRestrictedCommand,
  opcodes.OpClusterRenewCrypto,

  # Helper opcodes (e.g. submitted by LUs)
  opcodes.OpClusterVerifyConfig,
  opcodes.OpClusterVerifyGroup,
  opcodes.OpGroupEvacuate,
  opcodes.OpGroupVerifyDisks,

  # Test opcodes
  opcodes.OpTestAllocator,
  opcodes.OpTestDelay,
  opcodes.OpTestDummy,
  opcodes.OpTestJqueue,
  ])
Example #6
0
from ganeti import luxi
from ganeti import rapi
from ganeti import utils

import ganeti.rapi.testutils
import ganeti.rapi.client

import testutils

KNOWN_UNUSED_LUXI = compat.UniqueFrozenset([
    luxi.REQ_SUBMIT_MANY_JOBS,
    luxi.REQ_SUBMIT_JOB_TO_DRAINED_QUEUE,
    luxi.REQ_ARCHIVE_JOB,
    luxi.REQ_AUTO_ARCHIVE_JOBS,
    luxi.REQ_CHANGE_JOB_PRIORITY,
    luxi.REQ_PICKUP_JOB,
    luxi.REQ_QUERY_EXPORTS,
    luxi.REQ_QUERY_CONFIG_VALUES,
    luxi.REQ_QUERY_NETWORKS,
    luxi.REQ_QUERY_TAGS,
    luxi.REQ_SET_DRAIN_FLAG,
    luxi.REQ_SET_WATCHER_PAUSE,
])

# Global variable for storing used LUXI calls
_used_luxi_calls = None


class TestHideInternalErrors(unittest.TestCase):
    def test(self):
        def inner():
            raise errors.GenericError("error")
from ganeti import locking
from ganeti import constants
from ganeti.constants import \
    LOCK_ATTEMPTS_TIMEOUT, \
    LOCK_ATTEMPTS_MAXWAIT, \
    LOCK_ATTEMPTS_MINWAIT

import testutils

# FIXME: Document what BGL whitelist means
REQ_BGL_WHITELIST = compat.UniqueFrozenset([
    opcodes.OpClusterActivateMasterIp,
    opcodes.OpClusterDeactivateMasterIp,
    opcodes.OpClusterDestroy,
    opcodes.OpClusterPostInit,
    opcodes.OpClusterRename,
    opcodes.OpClusterRenewCrypto,
    opcodes.OpNodeAdd,
    opcodes.OpNodeRemove,
    opcodes.OpTestAllocator,
])


class TestLockAttemptTimeoutStrategy(unittest.TestCase):
    def testConstants(self):
        tpa = mcpu.LockAttemptTimeoutStrategy._TIMEOUT_PER_ATTEMPT
        self.assert_(len(tpa) > LOCK_ATTEMPTS_TIMEOUT / LOCK_ATTEMPTS_MAXWAIT)
        self.assert_(sum(tpa) >= LOCK_ATTEMPTS_TIMEOUT)

        self.assertTrue(LOCK_ATTEMPTS_TIMEOUT >= 1800,
                        msg="Waiting less than half an hour per priority")
Example #8
0
class DRBD8Status(object):  # pylint: disable=R0902
    """A DRBD status representation class.

  Note that this class is meant to be used to parse one of the entries returned
  from L{DRBD8Info._JoinLinesPerMinor}.

  """
    UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
    LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
                         r"\s+ds:([^/]+)/(\S+)\s+.*$")
    SYNC_RE = re.compile(
        r"^.*\ssync'ed:\s*([0-9.]+)%.*"
        # Due to a bug in drbd in the kernel, introduced in
        # commit 4b0715f096 (still unfixed as of 2011-08-22)
        r"(?:\s|M)"
        r"finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")

    CS_UNCONFIGURED = "Unconfigured"
    CS_STANDALONE = "StandAlone"
    CS_WFCONNECTION = "WFConnection"
    CS_WFREPORTPARAMS = "WFReportParams"
    CS_CONNECTED = "Connected"
    CS_STARTINGSYNCS = "StartingSyncS"
    CS_STARTINGSYNCT = "StartingSyncT"
    CS_WFBITMAPS = "WFBitMapS"
    CS_WFBITMAPT = "WFBitMapT"
    CS_WFSYNCUUID = "WFSyncUUID"
    CS_SYNCSOURCE = "SyncSource"
    CS_SYNCTARGET = "SyncTarget"
    CS_PAUSEDSYNCS = "PausedSyncS"
    CS_PAUSEDSYNCT = "PausedSyncT"
    CSET_SYNC = compat.UniqueFrozenset([
        CS_WFREPORTPARAMS,
        CS_STARTINGSYNCS,
        CS_STARTINGSYNCT,
        CS_WFBITMAPS,
        CS_WFBITMAPT,
        CS_WFSYNCUUID,
        CS_SYNCSOURCE,
        CS_SYNCTARGET,
        CS_PAUSEDSYNCS,
        CS_PAUSEDSYNCT,
    ])

    DS_DISKLESS = "Diskless"
    DS_ATTACHING = "Attaching"  # transient state
    DS_FAILED = "Failed"  # transient state, next: diskless
    DS_NEGOTIATING = "Negotiating"  # transient state
    DS_INCONSISTENT = "Inconsistent"  # while syncing or after creation
    DS_OUTDATED = "Outdated"
    DS_DUNKNOWN = "DUnknown"  # shown for peer disk when not connected
    DS_CONSISTENT = "Consistent"
    DS_UPTODATE = "UpToDate"  # normal state

    RO_PRIMARY = "Primary"
    RO_SECONDARY = "Secondary"
    RO_UNKNOWN = "Unknown"

    def __init__(self, procline):
        u = self.UNCONF_RE.match(procline)
        if u:
            self.cstatus = self.CS_UNCONFIGURED
            self.lrole = self.rrole = self.ldisk = self.rdisk = None
        else:
            m = self.LINE_RE.match(procline)
            if not m:
                raise errors.BlockDeviceError("Can't parse input data '%s'" %
                                              procline)
            self.cstatus = m.group(1)
            self.lrole = m.group(2)
            self.rrole = m.group(3)
            self.ldisk = m.group(4)
            self.rdisk = m.group(5)

        # end reading of data from the LINE_RE or UNCONF_RE

        self.is_standalone = self.cstatus == self.CS_STANDALONE
        self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
        self.is_connected = self.cstatus == self.CS_CONNECTED
        self.is_unconfigured = self.cstatus == self.CS_UNCONFIGURED
        self.is_primary = self.lrole == self.RO_PRIMARY
        self.is_secondary = self.lrole == self.RO_SECONDARY
        self.peer_primary = self.rrole == self.RO_PRIMARY
        self.peer_secondary = self.rrole == self.RO_SECONDARY
        self.both_primary = self.is_primary and self.peer_primary
        self.both_secondary = self.is_secondary and self.peer_secondary

        self.is_diskless = self.ldisk == self.DS_DISKLESS
        self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
        self.peer_disk_uptodate = self.rdisk == self.DS_UPTODATE

        self.is_in_resync = self.cstatus in self.CSET_SYNC
        self.is_in_use = self.cstatus != self.CS_UNCONFIGURED

        m = self.SYNC_RE.match(procline)
        if m:
            self.sync_percent = float(m.group(1))
            hours = int(m.group(2))
            minutes = int(m.group(3))
            seconds = int(m.group(4))
            self.est_time = hours * 3600 + minutes * 60 + seconds
        else:
            # we have (in this if branch) no percent information, but if
            # we're resyncing we need to 'fake' a sync percent information,
            # as this is how cmdlib determines if it makes sense to wait for
            # resyncing or not
            if self.is_in_resync:
                self.sync_percent = 0
            else:
                self.sync_percent = None
            self.est_time = None

    def __repr__(self):
        return ("<%s: cstatus=%s, lrole=%s, rrole=%s, ldisk=%s, rdisk=%s>" %
                (self.__class__, self.cstatus, self.lrole, self.rrole,
                 self.ldisk, self.rdisk))
Example #9
0
 def testEmpty(self):
     self.assertEqual(compat.UniqueFrozenset([]), frozenset([]))
Example #10
0
USAGE = ("\tburnin -o OS_NAME [options...] instance_name ...")

MAX_RETRIES = 3
LOG_HEADERS = {
  0: "- ",
  1: "* ",
  2: "",
  }

#: Disk templates supporting a single node
_SINGLE_NODE_DISK_TEMPLATES = compat.UniqueFrozenset([
  constants.DT_DISKLESS,
  constants.DT_PLAIN,
  constants.DT_FILE,
  constants.DT_SHARED_FILE,
  constants.DT_EXT,
  constants.DT_RBD,
  constants.DT_GLUSTER
  ])

_SUPPORTED_DISK_TEMPLATES = compat.UniqueFrozenset([
  constants.DT_DISKLESS,
  constants.DT_DRBD8,
  constants.DT_EXT,
  constants.DT_FILE,
  constants.DT_PLAIN,
  constants.DT_RBD,
  constants.DT_SHARED_FILE,
  constants.DT_GLUSTER
  ])
Example #11
0
import ganeti.rpc.errors as rpcerr
from ganeti import rapi
from ganeti import http
from ganeti import errors
from ganeti import compat
from ganeti import constants
from ganeti import utils


# Dummy value to detect unchanged parameters
_DEFAULT = object()

#: Supported HTTP methods
_SUPPORTED_METHODS = compat.UniqueFrozenset([
  http.HTTP_DELETE,
  http.HTTP_GET,
  http.HTTP_POST,
  http.HTTP_PUT,
  ])


class OpcodeAttributes(object):
  """Acts as a structure containing the per-method attribute names.

  """
  __slots__ = [
    "method",
    "opcode",
    "rename",
    "aliases",
    "forbidden",
    "get_input",
Example #12
0
LEVELS = [
    LEVEL_CLUSTER,
    LEVEL_INSTANCE,
    LEVEL_NODE_ALLOC,
    LEVEL_NODEGROUP,
    LEVEL_NODE,
    LEVEL_NODE_RES,
    LEVEL_NETWORK,
]

# Lock levels which are modifiable
LEVELS_MOD = compat.UniqueFrozenset([
    LEVEL_NODE_RES,
    LEVEL_NODE,
    LEVEL_NODEGROUP,
    LEVEL_INSTANCE,
    LEVEL_NETWORK,
])

#: Lock level names (make sure to use singular form)
LEVEL_NAMES = {
    LEVEL_CLUSTER: "cluster",
    LEVEL_INSTANCE: "instance",
    LEVEL_NODE_ALLOC: "node-alloc",
    LEVEL_NODEGROUP: "nodegroup",
    LEVEL_NODE: "node",
    LEVEL_NODE_RES: "node-res",
    LEVEL_NETWORK: "network",
}
Example #13
0
  def test(self):
    """Check whether all RAPI resources are documented.

    """
    rapidoc = _ReadDocFile("rapi.rst")

    node_name = re.escape("[node_name]")
    instance_name = re.escape("[instance_name]")
    group_name = re.escape("[group_name]")
    network_name = re.escape("[network_name]")
    job_id = re.escape("[job_id]")
    disk_index = re.escape("[disk_index]")
    filter_uuid = re.escape("[filter_uuid]")
    query_res = re.escape("[resource]")

    resources = connector.GetHandlers(node_name, instance_name,
                                      group_name, network_name,
                                      job_id, disk_index, filter_uuid,
                                      query_res)

    handler_dups = utils.FindDuplicates(resources.values())
    self.assertFalse(handler_dups,
                     msg=("Resource handlers used more than once: %r" %
                          handler_dups))

    uri_check_fixup = {
      re.compile(node_name): "node1examplecom",
      re.compile(instance_name): "inst1examplecom",
      re.compile(group_name): "group4440",
      re.compile(network_name): "network5550",
      re.compile(job_id): "9409",
      re.compile(disk_index): "123",
      re.compile(filter_uuid): "c863fbb5-f248-47bf-869b-cea259890061",
      re.compile(query_res): "lock",
      }

    assert compat.all(VALID_URI_RE.match(value)
                      for value in uri_check_fixup.values()), \
           "Fixup values must be valid URIs, too"

    titles = []

    prevline = None
    for line in rapidoc.splitlines():
      if re.match(r"^\++$", line):
        titles.append(prevline)

      prevline = line

    prefix_exception = compat.UniqueFrozenset(["/", "/version", "/2"])

    undocumented = []
    used_uris = []

    for key, handler in resources.iteritems():
      # Regex objects
      if hasattr(key, "match"):
        self.assert_(key.pattern.startswith("^/2/"),
                     msg="Pattern %r does not start with '^/2/'" % key.pattern)
        self.assertEqual(key.pattern[-1], "$")

        found = False
        for title in titles:
          if title.startswith("``") and title.endswith("``"):
            uri = title[2:-2]
            if key.match(uri):
              self._CheckRapiResource(uri, uri_check_fixup, handler)
              used_uris.append(uri)
              found = True
              break

        if not found:
          # TODO: Find better way of identifying resource
          undocumented.append(key.pattern)

      else:
        self.assert_(key.startswith("/2/") or key in prefix_exception,
                     msg="Path %r does not start with '/2/'" % key)

        if ("``%s``" % key) in titles:
          self._CheckRapiResource(key, {}, handler)
          used_uris.append(key)
        else:
          undocumented.append(key)

    self.failIf(undocumented,
                msg=("Missing RAPI resource documentation for %s" %
                     utils.CommaJoin(undocumented)))

    uri_dups = utils.FindDuplicates(used_uris)
    self.failIf(uri_dups,
                msg=("URIs matched by more than one resource: %s" %
                     utils.CommaJoin(uri_dups)))

    self._FindRapiMissing(resources.values())
    self._CheckTagHandlers(resources.values())
Example #14
0
class TestHooksDocs(unittest.TestCase):
  HOOK_PATH_OK = compat.UniqueFrozenset([
    "master-ip-turnup",
    "master-ip-turndown",
    ])

  def test(self):
    """Check whether all hooks are documented.

    """
    hooksdoc = _ReadDocFile("hooks.rst")

    # Reverse mapping from LU to opcode
    lu2opcode = dict((lu, op)
                     for (op, lu) in mcpu.Processor.DISPATCH_TABLE.items())
    assert len(lu2opcode) == len(mcpu.Processor.DISPATCH_TABLE), \
      "Found duplicate entries"

    hooks_paths = frozenset(re.findall("^:directory:\s*(.+)\s*$", hooksdoc,
                                       re.M))
    self.assertTrue(self.HOOK_PATH_OK.issubset(hooks_paths),
                    msg="Whitelisted path not found in documentation")

    raw_hooks_ops = re.findall("^OP_(?!CODE$).+$", hooksdoc, re.M)
    hooks_ops = set()
    duplicate_ops = set()
    for op in raw_hooks_ops:
      if op in hooks_ops:
        duplicate_ops.add(op)
      else:
        hooks_ops.add(op)

    self.assertFalse(duplicate_ops,
                     msg="Found duplicate opcode documentation: %s" %
                         utils.CommaJoin(duplicate_ops))

    seen_paths = set()
    seen_ops = set()

    self.assertFalse(duplicate_ops,
                     msg="Found duplicated hook documentation: %s" %
                         utils.CommaJoin(duplicate_ops))

    for name in dir(cmdlib):
      lucls = getattr(cmdlib, name)

      if (isinstance(lucls, type) and
          issubclass(lucls, cmdlib.LogicalUnit) and
          hasattr(lucls, "HPATH")):
        if lucls.HTYPE is None:
          continue

        opcls = lu2opcode.get(lucls, None)

        if opcls:
          seen_ops.add(opcls.OP_ID)
          self.assertTrue(opcls.OP_ID in hooks_ops,
                          msg="Missing hook documentation for %s" %
                              opcls.OP_ID)
        self.assertTrue(lucls.HPATH in hooks_paths,
                        msg="Missing documentation for hook %s/%s" %
                            (lucls.HTYPE, lucls.HPATH))
        seen_paths.add(lucls.HPATH)

    missed_ops = hooks_ops - seen_ops
    missed_paths = hooks_paths - seen_paths - self.HOOK_PATH_OK

    self.assertFalse(missed_ops,
                     msg="Op documents hook not existing anymore: %s" %
                         utils.CommaJoin(missed_ops))

    self.assertFalse(missed_paths,
                     msg="Hook path does not exist in opcode: %s" %
                         utils.CommaJoin(missed_paths))
Example #15
0
from ganeti import netutils
from ganeti import qlang
from ganeti import ssconf
from ganeti import ht
from ganeti import pathutils

import ganeti.rapi.client # pylint: disable=W0611
from ganeti.rapi.client import UsesRapiClient

from ganeti.watcher import nodemaint
from ganeti.watcher import state


MAXTRIES = 5
BAD_STATES = compat.UniqueFrozenset([
  constants.INSTST_ERRORDOWN,
  ])
HELPLESS_STATES = compat.UniqueFrozenset([
  constants.INSTST_NODEDOWN,
  constants.INSTST_NODEOFFLINE,
  ])
NOTICE = "NOTICE"
ERROR = "ERROR"

#: Number of seconds to wait between starting child processes for node groups
CHILD_PROCESS_DELAY = 1.0

#: How many seconds to wait for instance status file lock
INSTANCE_STATUS_LOCK_TIMEOUT = 10.0

Example #16
0
 def testUnique(self):
     self.assertEqual(compat.UniqueFrozenset([1, 2, 3]),
                      frozenset([1, 2, 3]))
Example #17
0
  cli_option("-t", "--storage-type",
             dest="user_storage_type",
             choices=_USER_STORAGE_TYPE.keys(),
             default=None,
             metavar="STORAGE_TYPE",
             help=("Storage type (%s)" %
                   utils.CommaJoin(_USER_STORAGE_TYPE.keys())))

_REPAIRABLE_STORAGE_TYPES = \
  [st for st, so in constants.VALID_STORAGE_OPERATIONS.iteritems()
   if constants.SO_FIX_CONSISTENCY in so]

_MODIFIABLE_STORAGE_TYPES = constants.MODIFIABLE_STORAGE_FIELDS.keys()

_OOB_COMMAND_ASK = compat.UniqueFrozenset([
  constants.OOB_POWER_OFF,
  constants.OOB_POWER_CYCLE,
  ])

_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])

NONODE_SETUP_OPT = cli_option("--no-node-setup", default=True,
                              action="store_false", dest="node_setup",
                              help=("Do not make initial SSH setup on remote"
                                    " node (needs to be done manually)"))

IGNORE_STATUS_OPT = cli_option("--ignore-status", default=False,
                               action="store_true", dest="ignore_status",
                               help=("Ignore the Node(s) offline status"
                                     " (potentially DANGEROUS)"))

Example #18
0
 def testGenerator(self):
     seq = ("Foo%s" % i for i in range(10))
     self.assertTrue(inspect.isgenerator(seq))
     self.assertFalse(isinstance(seq, (list, tuple)))
     self.assertEqual(compat.UniqueFrozenset(seq),
                      frozenset(["Foo%s" % i for i in range(10)]))
Example #19
0
_VALID_KEYS = compat.UniqueFrozenset([
    constants.SS_CLUSTER_NAME,
    constants.SS_CLUSTER_TAGS,
    constants.SS_FILE_STORAGE_DIR,
    constants.SS_SHARED_FILE_STORAGE_DIR,
    constants.SS_GLUSTER_STORAGE_DIR,
    constants.SS_MASTER_CANDIDATES,
    constants.SS_MASTER_CANDIDATES_IPS,
    constants.SS_MASTER_CANDIDATES_CERTS,
    constants.SS_MASTER_IP,
    constants.SS_MASTER_NETDEV,
    constants.SS_MASTER_NETMASK,
    constants.SS_MASTER_NODE,
    constants.SS_NODE_LIST,
    constants.SS_NODE_PRIMARY_IPS,
    constants.SS_NODE_SECONDARY_IPS,
    constants.SS_OFFLINE_NODES,
    constants.SS_ONLINE_NODES,
    constants.SS_PRIMARY_IP_FAMILY,
    constants.SS_INSTANCE_LIST,
    constants.SS_RELEASE_VERSION,
    constants.SS_HYPERVISOR_LIST,
    constants.SS_MAINTAIN_NODE_HEALTH,
    constants.SS_UID_POOL,
    constants.SS_NODEGROUPS,
    constants.SS_NETWORKS,
    constants.SS_HVPARAMS_XEN_PVM,
    constants.SS_HVPARAMS_XEN_FAKE,
    constants.SS_HVPARAMS_XEN_HVM,
    constants.SS_HVPARAMS_XEN_KVM,
    constants.SS_HVPARAMS_XEN_CHROOT,
    constants.SS_HVPARAMS_XEN_LXC,
])
Example #20
0
    r"(?P<address>.+):(?P<port>\d+)$", re.I)

#: Used to recognize point at which socat(1) is sending data over the wire
TRANSFER_LOOP_RE = re.compile(r"^starting data transfer loop with FDs\s+.*$",
                              re.I)

SOCAT_LOG_DEBUG = "D"
SOCAT_LOG_INFO = "I"
SOCAT_LOG_NOTICE = "N"
SOCAT_LOG_WARNING = "W"
SOCAT_LOG_ERROR = "E"
SOCAT_LOG_FATAL = "F"

SOCAT_LOG_IGNORE = compat.UniqueFrozenset([
    SOCAT_LOG_DEBUG,
    SOCAT_LOG_INFO,
    SOCAT_LOG_NOTICE,
])

#: Used to parse GNU dd(1) statistics
DD_INFO_RE = re.compile(
    r"^(?P<bytes>\d+)\s*byte(?:|s)\s.*\scopied,\s*"
    r"(?P<seconds>[\d.]+)\s*s(?:|econds),.*$", re.I)

#: Used to ignore "N+N records in/out" on dd(1)'s stderr
DD_STDERR_IGNORE = re.compile(r"^\d+\+\d+\s*records\s+(?:in|out)$", re.I)

#: Signal upon which dd(1) will print statistics (on some platforms, SIGINFO is
#: unavailable and SIGUSR1 is used instead)
DD_INFO_SIGNAL = getattr(signal, "SIGINFO", signal.SIGUSR1)
Example #21
0

INSTANCE_FIELDS = ("name", "os", "pnode", "snodes",
                   "admin_state",
                   "disk_template", "disk.sizes", "disk.spindles",
                   "nic.ips", "nic.macs", "nic.modes", "nic.links",
                   "beparams", "hvparams",
                   "oper_state", "oper_ram", "oper_vcpus", "status", "tags")

NODE_FIELDS = ("name", "dtotal", "dfree", "sptotal", "spfree",
               "mtotal", "mnode", "mfree",
               "pinst_cnt", "sinst_cnt", "tags")

GROUP_FIELDS = compat.UniqueFrozenset([
  "name", "uuid",
  "alloc_policy",
  "node_cnt", "node_list",
  ])

JOB_FIELDS = compat.UniqueFrozenset([
  "id", "ops", "status", "summary",
  "opstatus", "opresult", "oplog",
  "received_ts", "start_ts", "end_ts",
  ])

FILTER_FIELDS = compat.UniqueFrozenset([
  "watermark",
  "priority",
  "predicates",
  "action",
  "reason_trail",
Example #22
0
from cStringIO import StringIO

from ganeti.cli import *
from ganeti import constants
from ganeti import opcodes
from ganeti import utils
from ganeti import compat
from ganeti.client import base

#: default list of fields for L{ListGroups}
_LIST_DEF_FIELDS = [
    "name", "node_cnt", "pinst_cnt", "alloc_policy", "ndparams"
]

_ENV_OVERRIDE = compat.UniqueFrozenset(["list"])


def AddGroup(opts, args):
    """Add a node group to the cluster.

  @param opts: the command line options selected by the user
  @type args: list
  @param args: a list of length 1 with the name of the group to create
  @rtype: int
  @return: the desired exit code

  """
    ipolicy = CreateIPolicyFromOpts(
        minmax_ispecs=opts.ipolicy_bounds_specs,
        ipolicy_vcpu_ratio=opts.ipolicy_vcpu_ratio,
Example #23
0
def TestJobqueue(opts, _):
    """Runs a few tests on the job queue.

  """
    _TestJobSubmission(opts)
    _TestJobDependency(opts)

    (TM_SUCCESS, TM_MULTISUCCESS, TM_FAIL, TM_PARTFAIL) = range(4)
    TM_ALL = compat.UniqueFrozenset([
        TM_SUCCESS,
        TM_MULTISUCCESS,
        TM_FAIL,
        TM_PARTFAIL,
    ])

    for mode in TM_ALL:
        test_messages = [
            "Testing mode %s" % mode,
            "Hello World",
            "A",
            "",
            "B",
            "Foo|bar|baz",
            utils.TimestampForFilename(),
        ]

        fail = mode in (TM_FAIL, TM_PARTFAIL)

        if mode == TM_PARTFAIL:
            ToStdout("Testing partial job failure")
            ops = [
                opcodes.OpTestJqueue(notify_waitlock=True,
                                     notify_exec=True,
                                     log_messages=test_messages,
                                     fail=False),
                opcodes.OpTestJqueue(notify_waitlock=True,
                                     notify_exec=True,
                                     log_messages=test_messages,
                                     fail=False),
                opcodes.OpTestJqueue(notify_waitlock=True,
                                     notify_exec=True,
                                     log_messages=test_messages,
                                     fail=True),
                opcodes.OpTestJqueue(notify_waitlock=True,
                                     notify_exec=True,
                                     log_messages=test_messages,
                                     fail=False),
            ]
            expect_messages = 3 * [test_messages]
            expect_opstatus = [
                constants.OP_STATUS_SUCCESS,
                constants.OP_STATUS_SUCCESS,
                constants.OP_STATUS_ERROR,
                constants.OP_STATUS_ERROR,
            ]
            expect_resultlen = 2
        elif mode == TM_MULTISUCCESS:
            ToStdout("Testing multiple successful opcodes")
            ops = [
                opcodes.OpTestJqueue(notify_waitlock=True,
                                     notify_exec=True,
                                     log_messages=test_messages,
                                     fail=False),
                opcodes.OpTestJqueue(notify_waitlock=True,
                                     notify_exec=True,
                                     log_messages=test_messages,
                                     fail=False),
            ]
            expect_messages = 2 * [test_messages]
            expect_opstatus = [
                constants.OP_STATUS_SUCCESS,
                constants.OP_STATUS_SUCCESS,
            ]
            expect_resultlen = 2
        else:
            if mode == TM_SUCCESS:
                ToStdout("Testing job success")
                expect_opstatus = [constants.OP_STATUS_SUCCESS]
            elif mode == TM_FAIL:
                ToStdout("Testing job failure")
                expect_opstatus = [constants.OP_STATUS_ERROR]
            else:
                raise errors.ProgrammerError("Unknown test mode %s" % mode)

            ops = [
                opcodes.OpTestJqueue(notify_waitlock=True,
                                     notify_exec=True,
                                     log_messages=test_messages,
                                     fail=fail),
            ]
            expect_messages = [test_messages]
            expect_resultlen = 1

        cl = cli.GetClient()
        cli.SetGenericOpcodeOpts(ops, opts)

        # Send job to master daemon
        job_id = cli.SendJob(ops, cl=cl)

        reporter = _JobQueueTestReporter()
        results = None

        try:
            results = cli.PollJob(job_id, cl=cl, reporter=reporter)
        except errors.OpExecError, err:
            if not fail:
                raise
            ToStdout("Ignoring error for 'job fail' test: %s", err)
        else:
            if fail:
                raise errors.OpExecError("Job didn't fail when it should")

        # Check length of result
        if fail:
            if results is not None:
                raise errors.OpExecError("Received result from failed job")
        elif len(results) != expect_resultlen:
            raise errors.OpExecError("Received %s results (%s), expected %s" %
                                     (len(results), results, expect_resultlen))

        # Check received log messages
        all_messages = [i for j in expect_messages for i in j]
        if reporter.GetTestMessages() != all_messages:
            raise errors.OpExecError(
                "Received test messages don't match input"
                " (input %r, received %r)" %
                (all_messages, reporter.GetTestMessages()))

        # Check final status
        reported_job_id = reporter.GetJobId()
        if reported_job_id != job_id:
            raise errors.OpExecError("Reported job ID %s doesn't match"
                                     "submission job ID %s" %
                                     (reported_job_id, job_id))

        jobdetails = cli.GetClient().QueryJobs([job_id],
                                               ["status", "opstatus"])[0]
        if not jobdetails:
            raise errors.OpExecError("Can't find job %s" % job_id)

        if fail:
            exp_status = constants.JOB_STATUS_ERROR
        else:
            exp_status = constants.JOB_STATUS_SUCCESS

        (final_status, final_opstatus) = jobdetails
        if final_status != exp_status:
            raise errors.OpExecError(
                "Final job status is %s, not %s as expected" %
                (final_status, exp_status))
        if len(final_opstatus) != len(ops):
            raise errors.OpExecError(
                "Did not receive status for all opcodes (got %s,"
                " expected %s)" % (len(final_opstatus), len(ops)))
        if final_opstatus != expect_opstatus:
            raise errors.OpExecError("Opcode status is %s, expected %s" %
                                     (final_opstatus, expect_opstatus))
Example #24
0
    return secret


INSTANCE_FIELDS = ("name", "os", "pnode", "snodes", "admin_state",
                   "disk_template", "disk.sizes", "disk.spindles", "nic.ips",
                   "nic.macs", "nic.modes", "nic.links", "beparams",
                   "hvparams", "oper_state", "oper_ram", "oper_vcpus",
                   "status", "tags")

NODE_FIELDS = ("name", "dtotal", "dfree", "sptotal", "spfree", "mtotal",
               "mnode", "mfree", "pinst_cnt", "sinst_cnt", "tags")

GROUP_FIELDS = compat.UniqueFrozenset([
    "name",
    "uuid",
    "alloc_policy",
    "node_cnt",
    "node_list",
])

JOB_FIELDS = compat.UniqueFrozenset([
    "id",
    "ops",
    "status",
    "summary",
    "opstatus",
    "opresult",
    "oplog",
    "received_ts",
    "start_ts",
    "end_ts",