Пример #1
0
  def AddManyTasks(self, tasks, priority=_DEFAULT_PRIORITY):
    """Add a list of tasks to the queue.

    @type tasks: list of tuples
    @param tasks: list of args passed to L{BaseWorker.RunTask}
    @type priority: number or list of numbers
    @param priority: Priority for all added tasks or a list with the priority
                     for each task

    """
    assert compat.all(isinstance(task, (tuple, list)) for task in tasks), \
      "Each task must be a sequence"

    assert (isinstance(priority, (int, long)) or
            compat.all(isinstance(prio, (int, long)) for prio in priority)), \
           "Priority must be numeric or be a list of numeric values"

    if isinstance(priority, (int, long)):
      priority = [priority] * len(tasks)
    elif len(priority) != len(tasks):
      raise errors.ProgrammerError("Number of priorities (%s) doesn't match"
                                   " number of tasks (%s)" %
                                   (len(priority), len(tasks)))

    self._lock.acquire()
    try:
      self._WaitWhileQuiescingUnlocked()

      assert compat.all(isinstance(prio, (int, long)) for prio in priority)
      assert len(tasks) == len(priority)

      for args, priority in zip(tasks, priority):
        self._AddTaskUnlocked(args, priority)
    finally:
      self._lock.release()
Пример #2
0
  def _Check(self, ops, name):
    expcls = [
      opcodes.OpInstanceShutdown,
      opcodes.OpInstanceReinstall,
      opcodes.OpInstanceStartup,
      ]

    self.assert_(compat.all(isinstance(op, exp)
                            for op, exp in zip(ops, expcls)))
    self.assert_(compat.all(op.instance_name == name for op in ops))
Пример #3
0
        def _ProcessRequests(multi, handles):
            self.assertTrue(isinstance(multi, self._DummyCurlMulti))
            self.assertEqual(len(requests), len(handles))
            self.assertTrue(
                compat.all(isinstance(curl, _FakeCurl) for curl in handles))

            # Prepare for lock check
            for req in requests:
                assert req.completion_cb is NotImplementedError
                if use_monitor:
                    req.completion_cb = \
                      compat.partial(_LockCheckReset, lock_monitor_cb.GetMonitor())

            for idx, curl in enumerate(handles):
                try:
                    port = curl.opts["__port__"]
                except KeyError:
                    self.fail("Per-request config function was not called")

                if use_monitor:
                    # Check if lock information is correct
                    lock_info = lock_monitor_cb.GetMonitor().GetLockInfo(None)
                    expected = \
                      [("rpc/%s" % (_BuildNiceName(handle.opts["__port__"],
                                                   default=("localhost/version%s" %
                                                            handle.opts["__port__"]))),
                        None,
                        [threading.currentThread().getName()], None)
                       for handle in handles[idx:]]
                    self.assertEqual(sorted(lock_info), sorted(expected))

                if port % 3 == 0:
                    response_code = http.HTTP_OK
                    msg = None
                else:
                    response_code = http.HttpNotFound.code
                    msg = "test error"

                curl.info = {
                    pycurl.RESPONSE_CODE: response_code,
                }
                if hasattr(pycurl, 'LOCAL_IP'):
                    curl.info[pycurl.LOCAL_IP] = '127.0.0.1'
                if hasattr(pycurl, 'LOCAL_PORT'):
                    curl.info[pycurl.LOCAL_PORT] = port

                # Prepare for reset
                self.assertFalse(curl.opts.pop(pycurl.POSTFIELDS))
                self.assertTrue(callable(curl.opts.pop(pycurl.WRITEFUNCTION)))

                yield (curl, msg)

            if use_monitor:
                self.assertTrue(compat.all(req.lockcheck__
                                           for req in requests))
Пример #4
0
    def _ProcessRequests(multi, handles):
      self.assertTrue(isinstance(multi, self._DummyCurlMulti))
      self.assertEqual(len(requests), len(handles))
      self.assertTrue(compat.all(isinstance(curl, _FakeCurl)
                                 for curl in handles))

      # Prepare for lock check
      for req in requests:
        assert req.completion_cb is NotImplementedError
        if use_monitor:
          req.completion_cb = \
            compat.partial(_LockCheckReset, lock_monitor_cb.GetMonitor())

      for idx, curl in enumerate(handles):
        try:
          port = curl.opts["__port__"]
        except KeyError:
          self.fail("Per-request config function was not called")

        if use_monitor:
          # Check if lock information is correct
          lock_info = lock_monitor_cb.GetMonitor().GetLockInfo(None)
          expected = \
            [("rpc/%s" % (_BuildNiceName(handle.opts["__port__"],
                                         default=("localhost/version%s" %
                                                  handle.opts["__port__"]))),
              None,
              [threading.currentThread().getName()], None)
             for handle in handles[idx:]]
          self.assertEqual(sorted(lock_info), sorted(expected))

        if port % 3 == 0:
          response_code = http.HTTP_OK
          msg = None
        else:
          response_code = http.HttpNotFound.code
          msg = "test error"

        curl.info = {
          pycurl.RESPONSE_CODE: response_code,
        }
        if hasattr(pycurl, 'LOCAL_IP'):
          curl.info[pycurl.LOCAL_IP] = '127.0.0.1'
        if hasattr(pycurl, 'LOCAL_PORT'):
          curl.info[pycurl.LOCAL_PORT] = port

        # Prepare for reset
        self.assertFalse(curl.opts.pop(pycurl.POSTFIELDS))
        self.assertTrue(callable(curl.opts.pop(pycurl.WRITEFUNCTION)))

        yield (curl, msg)

      if use_monitor:
        self.assertTrue(compat.all(req.lockcheck__ for req in requests))
Пример #5
0
def _TestJobSubmission(opts):
  """Tests submitting jobs.

  """
  ToStdout("Testing job submission")

  testdata = [
    (0, 0, constants.OP_PRIO_LOWEST),
    (0, 0, constants.OP_PRIO_HIGHEST),
    ]

  for priority in (constants.OP_PRIO_SUBMIT_VALID |
                   frozenset([constants.OP_PRIO_LOWEST,
                              constants.OP_PRIO_HIGHEST])):
    for offset in [-1, +1]:
      testdata.extend([
        (0, 0, priority + offset),
        (3, 0, priority + offset),
        (0, 3, priority + offset),
        (4, 2, priority + offset),
        ])

  cl = cli.GetClient()

  for before, after, failpriority in testdata:
    ops = []
    ops.extend([opcodes.OpTestDelay(duration=0) for _ in range(before)])
    ops.append(opcodes.OpTestDelay(duration=0, priority=failpriority))
    ops.extend([opcodes.OpTestDelay(duration=0) for _ in range(after)])

    try:
      cl.SubmitJob(ops)
    except errors.GenericError, err:
      if opts.debug:
        ToStdout("Ignoring error: %s", err)
    else:
      raise errors.OpExecError("Submitting opcode with priority %s did not"
                               " fail when it should (allowed are %s)" %
                               (failpriority, constants.OP_PRIO_SUBMIT_VALID))

    jobs = [
      [opcodes.OpTestDelay(duration=0),
       opcodes.OpTestDelay(duration=0, dry_run=False),
       opcodes.OpTestDelay(duration=0, dry_run=True)],
      ops,
      ]
    result = cl.SubmitManyJobs(jobs)
    if not (len(result) == 2 and
            compat.all(len(i) == 2 for i in result) and
            compat.all(isinstance(i[1], basestring) for i in result) and
            result[0][0] and not result[1][0]):
      raise errors.OpExecError("Submitting multiple jobs did not work as"
                               " expected, result %s" % result)
    assert len(result) == 2
Пример #6
0
def ProcessRequests(requests, lock_monitor_cb=None, _curl=pycurl.Curl,
                    _curl_multi=pycurl.CurlMulti,
                    _curl_process=_ProcessCurlRequests):
  """Processes any number of HTTP client requests.

  @type requests: list of L{HttpClientRequest}
  @param requests: List of all requests
  @param lock_monitor_cb: Callable for registering with lock monitor

  """
  assert compat.all((req.error is None and
                     req.success is None and
                     req.resp_status_code is None and
                     req.resp_body is None)
                    for req in requests)

  # Prepare all requests
  curl_to_client = \
    dict((client.GetCurlHandle(), client)
         for client in [_StartRequest(_curl(), req) for req in requests])

  assert len(curl_to_client) == len(requests)

  if lock_monitor_cb:
    monitor = _PendingRequestMonitor(threading.currentThread(),
                                     curl_to_client.values)
    lock_monitor_cb(monitor)
  else:
    monitor = _NoOpRequestMonitor

  # Process all requests and act based on the returned values
  for (curl, msg) in _curl_process(_curl_multi(), curl_to_client.keys()):
    monitor.acquire(shared=0)
    try:
      curl_to_client.pop(curl).Done(msg)
    finally:
      monitor.release()

  assert not curl_to_client, "Not all requests were processed"

  # Don't try to read information anymore as all requests have been processed
  monitor.Disable()

  assert compat.all(req.error is not None or
                    (req.success and
                     req.resp_status_code is not None and
                     req.resp_body is not None)
                    for req in requests)
Пример #7
0
def ProcessRequests(requests,
                    lock_monitor_cb=None,
                    _curl=pycurl.Curl,
                    _curl_multi=pycurl.CurlMulti,
                    _curl_process=_ProcessCurlRequests):
    """Processes any number of HTTP client requests.

  @type requests: list of L{HttpClientRequest}
  @param requests: List of all requests
  @param lock_monitor_cb: Callable for registering with lock monitor

  """
    assert compat.all(
        (req.error is None and req.success is None
         and req.resp_status_code is None and req.resp_body is None)
        for req in requests)

    # Prepare all requests
    curl_to_client = \
      dict((client.GetCurlHandle(), client)
           for client in [_StartRequest(_curl(), req) for req in requests])

    assert len(curl_to_client) == len(requests)

    if lock_monitor_cb:
        monitor = _PendingRequestMonitor(threading.currentThread(),
                                         curl_to_client.values)
        lock_monitor_cb(monitor)
    else:
        monitor = _NoOpRequestMonitor

    # Process all requests and act based on the returned values
    for (curl, msg) in _curl_process(_curl_multi(), curl_to_client.keys()):
        monitor.acquire(shared=0)
        try:
            curl_to_client.pop(curl).Done(msg)
        finally:
            monitor.release()

    assert not curl_to_client, "Not all requests were processed"

    # Don't try to read information anymore as all requests have been processed
    monitor.Disable()

    assert compat.all(
        req.error is not None or (req.success and req.resp_status_code
                                  is not None and req.resp_body is not None)
        for req in requests)
Пример #8
0
    def AddManyTasks(self, tasks, priority=_DEFAULT_PRIORITY, task_id=None):
        """Add a list of tasks to the queue.

    @type tasks: list of tuples
    @param tasks: list of args passed to L{BaseWorker.RunTask}
    @type priority: number or list of numbers
    @param priority: Priority for all added tasks or a list with the priority
                     for each task
    @type task_id: list
    @param task_id: List with the ID for each task
    @note: See L{AddTask} for a note on task IDs.

    """
        assert compat.all(isinstance(task, (tuple, list)) for task in tasks), \
               "Each task must be a sequence"
        assert (isinstance(priority, (int, long)) or
                compat.all(isinstance(prio, (int, long)) for prio in priority)), \
               "Priority must be numeric or be a list of numeric values"
        assert task_id is None or isinstance(task_id, (tuple, list)), \
               "Task IDs must be in a sequence"

        if isinstance(priority, (int, long)):
            priority = [priority] * len(tasks)
        elif len(priority) != len(tasks):
            raise errors.ProgrammerError(
                "Number of priorities (%s) doesn't match"
                " number of tasks (%s)" % (len(priority), len(tasks)))

        if task_id is None:
            task_id = [None] * len(tasks)
        elif len(task_id) != len(tasks):
            raise errors.ProgrammerError(
                "Number of task IDs (%s) doesn't match"
                " number of tasks (%s)" % (len(task_id), len(tasks)))

        self._lock.acquire()
        try:
            self._WaitWhileQuiescingUnlocked()

            assert compat.all(
                isinstance(prio, (int, long)) for prio in priority)
            assert len(tasks) == len(priority)
            assert len(tasks) == len(task_id)

            for (args, prio, tid) in zip(tasks, priority, task_id):
                self._AddTaskUnlocked(args, prio, tid)
        finally:
            self._lock.release()
  def testInstanceUpdate(self):
    clfactory = _FakeClientFactory(_FakeClient)
    data = {
      "instances": [{
        "name": "bar",
        "mode": "create",
        "disks": [{"size": 1024}],
        "disk_template": "plain",
        "nics": [{}],
        }, {
        "name": "foo",
        "mode": "create",
        "disks": [{"size": 1024}],
        "disk_template": "drbd",
        "nics": [{}],
        }],
      }
    handler = _CreateHandler(rlib2.R_2_instances_multi_alloc, [], {}, data,
                             clfactory)

    (body, _) = handler.GetPostOpInput()

    self.assertTrue(compat.all(
      [isinstance(inst, opcodes.OpInstanceCreate) for inst in body["instances"]]
    ))
Пример #10
0
  def testIPv6(self):
    for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
      opts = CmdBuilderConfig(host="localhost", port=6789,
                              ipv4=False, ipv6=False)
      builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
      cmd = builder._GetSocatCommand()
      self.assert_(compat.all("pf=" not in i for i in cmd))

      # IPv4
      opts = CmdBuilderConfig(host="localhost", port=6789,
                              ipv4=True, ipv6=False)
      builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
      cmd = builder._GetSocatCommand()
      self.assert_(compat.any(",pf=ipv4" in i for i in cmd))

      # IPv6
      opts = CmdBuilderConfig(host="localhost", port=6789,
                              ipv4=False, ipv6=True)
      builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
      cmd = builder._GetSocatCommand()
      self.assert_(compat.any(",pf=ipv6" in i for i in cmd))

      # IPv4 and IPv6
      opts = CmdBuilderConfig(host="localhost", port=6789,
                              ipv4=True, ipv6=True)
      builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
      self.assertRaises(AssertionError, builder._GetSocatCommand)
Пример #11
0
  def testResult(self):
    good_results = [
      # First result (all instances "allocate")
      [
        [["foo", ["a", "b"]],
         ["bar", ["c"]],
         ["baz", []]],
        []
      ],
      # Second result (partial "allocate", partial "fail")
      [
        [["bar", ["c", "b"]],
         ["baz", ["a"]]],
        ["foo"]
      ],
      # Third result (all instances "fail")
      [
        [],
        ["foo", "bar", "baz"]
      ],
      ]
    bad_results = [
      "foobar",
      1234,
      [],
      [[]],
      [[], [], []],
      ]

    result_fn = iallocator.IAReqMultiInstanceAlloc.REQ_RESULT

    self.assertTrue(compat.all(map(result_fn, good_results)))
    self.assertFalse(compat.any(map(result_fn, bad_results)))
Пример #12
0
    def testPriority(self):
        job_id = 4283
        ops = [opcodes.OpGetTags(priority=constants.OP_PRIO_DEFAULT), opcodes.OpTestDelay()]

        def _Check(job):
            self.assertEqual(job.id, job_id)
            self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_QUEUED)
            self.assert_(repr(job).startswith("<"))

        job = jqueue._QueuedJob(None, job_id, ops)
        _Check(job)
        self.assert_(compat.all(op.priority == constants.OP_PRIO_DEFAULT for op in job.ops))
        self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT)

        # Increase first
        job.ops[0].priority -= 1
        _Check(job)
        self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT - 1)

        # Mark opcode as finished
        job.ops[0].status = constants.OP_STATUS_SUCCESS
        _Check(job)
        self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT)

        # Increase second
        job.ops[1].priority -= 10
        self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT - 10)

        # Test increasing first
        job.ops[0].status = constants.OP_STATUS_RUNNING
        job.ops[0].priority -= 19
        self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT - 20)
Пример #13
0
  def testParams(self):
    supported_by_all = set(["debug_level", "dry_run", "priority"])

    self.assertTrue(opcodes.BaseOpCode not in opcodes.OP_MAPPING.values())
    self.assertTrue(opcodes.OpCode not in opcodes.OP_MAPPING.values())

    for cls in opcodes.OP_MAPPING.values() + [opcodes.OpCode]:
      all_slots = cls.GetAllSlots()

      self.assertEqual(len(set(all_slots) & supported_by_all), 3,
                       msg=("Opcode %s doesn't support all base"
                            " parameters (%r)" % (cls.OP_ID, supported_by_all)))

      # All opcodes must have OP_PARAMS
      self.assert_(hasattr(cls, "OP_PARAMS"),
                   msg="%s doesn't have OP_PARAMS" % cls.OP_ID)

      param_names = [name for (name, _, _, _) in cls.GetAllParams()]

      self.assertEqual(all_slots, param_names)

      # Without inheritance
      self.assertEqual(cls.__slots__,
                       [name for (name, _, _, _) in cls.OP_PARAMS])

      # This won't work if parameters are converted to a dictionary
      duplicates = utils.FindDuplicates(param_names)
      self.assertFalse(duplicates,
                       msg=("Found duplicate parameters %r in %s" %
                            (duplicates, cls.OP_ID)))

      # Check parameter definitions
      for attr_name, aval, test, doc in cls.GetAllParams():
        self.assert_(attr_name)
        self.assert_(test is None or test is ht.NoType or callable(test),
                     msg=("Invalid type check for %s.%s" %
                          (cls.OP_ID, attr_name)))
        self.assertTrue(doc is None or isinstance(doc, basestring))

        if callable(aval):
          default_value = aval()
          self.assertFalse(callable(default_value),
                           msg=("Default value of %s.%s returned by function"
                                " is callable" % (cls.OP_ID, attr_name)))
        else:
          self.assertFalse(isinstance(aval, (list, dict, set)),
                           msg=("Default value of %s.%s is mutable (%s)" %
                                (cls.OP_ID, attr_name, repr(aval))))

          default_value = aval

        if aval is not ht.NoDefault and test is not ht.NoType:
          self.assertTrue(test(default_value),
                          msg=("Default value of %s.%s does not verify" %
                               (cls.OP_ID, attr_name)))

      # If any parameter has documentation, all others need to have it as well
      has_doc = [doc is not None for (_, _, _, doc) in cls.OP_PARAMS]
      self.assertTrue(not compat.any(has_doc) or compat.all(has_doc),
                      msg="%s does not document all parameters" % cls)
Пример #14
0
    def _PrepareRequests(hosts, port, procedure, body, read_timeout):
        """Prepares requests by sorting offline hosts into separate list.

    @type body: dict
    @param body: a dictionary with per-host body data

    """
        results = {}
        requests = {}

        assert isinstance(body, dict)
        assert len(body) == len(hosts)
        assert compat.all(isinstance(v, str) for v in body.values())
        assert frozenset(map(lambda x: x[2], hosts)) == frozenset(body.keys()), \
            "%s != %s" % (hosts, body.keys())

        for (name, ip, original_name) in hosts:
            if ip is _OFFLINE:
                # Node is marked as offline
                results[original_name] = RpcResult(node=name,
                                                   offline=True,
                                                   call=procedure)
            else:
                requests[original_name] = \
                  http.client.HttpClientRequest(str(ip), port,
                                                http.HTTP_POST, str("/%s" % procedure),
                                                headers=_RPC_CLIENT_HEADERS,
                                                post_data=body[original_name],
                                                read_timeout=read_timeout,
                                                nicename="%s/%s" % (name, procedure),
                                                curl_config_fn=_ConfigRpcCurl)

        return (results, requests)
Пример #15
0
 def _NewJob():
   job = jqueue._QueuedJob(None, 1,
                           [opcodes.OpTestDelay() for _ in range(10)])
   self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_QUEUED)
   self.assert_(compat.all(op.status == constants.OP_STATUS_QUEUED
                           for op in job.ops))
   return job
  def testResult(self):
    good_results = [
      # First result (all instances "allocate")
      [
        [["foo", ["a", "b"]],
         ["bar", ["c"]],
         ["baz", []]],
        []
      ],
      # Second result (partial "allocate", partial "fail")
      [
        [["bar", ["c", "b"]],
         ["baz", ["a"]]],
        ["foo"]
      ],
      # Third result (all instances "fail")
      [
        [],
        ["foo", "bar", "baz"]
      ],
      ]
    bad_results = [
      "foobar",
      1234,
      [],
      [[]],
      [[], [], []],
      ]

    result_fn = iallocator.IAReqMultiInstanceAlloc.REQ_RESULT

    self.assertTrue(compat.all(map(result_fn, good_results)))
    self.assertFalse(compat.any(map(result_fn, bad_results)))
Пример #17
0
    def testIPv6(self):
        for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
            opts = CmdBuilderConfig(host="localhost",
                                    port=6789,
                                    ipv4=False,
                                    ipv6=False)
            builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
            cmd = builder._GetSocatCommand()
            self.assert_(compat.all("pf=" not in i for i in cmd))

            # IPv4
            opts = CmdBuilderConfig(host="localhost",
                                    port=6789,
                                    ipv4=True,
                                    ipv6=False)
            builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
            cmd = builder._GetSocatCommand()
            self.assert_(compat.any(",pf=ipv4" in i for i in cmd))

            # IPv6
            opts = CmdBuilderConfig(host="localhost",
                                    port=6789,
                                    ipv4=False,
                                    ipv6=True)
            builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
            cmd = builder._GetSocatCommand()
            self.assert_(compat.any(",pf=ipv6" in i for i in cmd))

            # IPv4 and IPv6
            opts = CmdBuilderConfig(host="localhost",
                                    port=6789,
                                    ipv4=True,
                                    ipv6=True)
            builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
            self.assertRaises(AssertionError, builder._GetSocatCommand)
  def test(self):
    for method in baserlib._SUPPORTED_METHODS:
      # Empty handler
      obj = self._MakeClass(method, {})(None, {}, None)
      for attr in itertools.chain(*baserlib.OPCODE_ATTRS):
        self.assertFalse(hasattr(obj, attr))

      # Direct handler function
      obj = self._MakeClass(method, {
        method: lambda _: None,
        })(None, {}, None)
      self.assertFalse(compat.all(hasattr(obj, attr)
                                  for i in baserlib._SUPPORTED_METHODS
                                  for attr in self._GetMethodAttributes(i)))

      # Let metaclass define handler function
      for opcls in [None, object()]:
        obj = self._MakeClass(method, {
          "%s_OPCODE" % method: opcls,
          })(None, {}, None)
        self.assertTrue(callable(getattr(obj, method)))
        self.assertEqual(getattr(obj, "%s_OPCODE" % method), opcls)
        self.assertFalse(hasattr(obj, "%s_RENAME" % method))
        self.assertFalse(compat.any(hasattr(obj, attr)
                                    for i in baserlib._SUPPORTED_METHODS
                                      if i != method
                                    for attr in self._GetMethodAttributes(i)))
Пример #19
0
    def _RunWrapper(self, node_list, hpath, phase, phase_env):
        """Simple wrapper over self.callfn.

    This method fixes the environment before doing the rpc call.

    """
        cfg = self.lu.cfg

        env = {
            "PATH": "/sbin:/bin:/usr/sbin:/usr/bin",
            "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
            "GANETI_OP_CODE": self.op.OP_ID,
            "GANETI_DATA_DIR": constants.DATA_DIR,
            "GANETI_HOOKS_PHASE": phase,
            "GANETI_HOOKS_PATH": hpath,
        }

        if self.lu.HTYPE:
            env["GANETI_OBJECT_TYPE"] = self.lu.HTYPE

        if cfg is not None:
            env["GANETI_CLUSTER"] = cfg.GetClusterName()
            env["GANETI_MASTER"] = cfg.GetMasterNode()

        if phase_env:
            assert not (set(env) & set(phase_env)), "Environment variables conflict"
            env.update(phase_env)

        # Convert everything to strings
        env = dict([(str(key), str(val)) for key, val in env.iteritems()])

        assert compat.all(key == "PATH" or key.startswith("GANETI_") for key in env)

        return self.callfn(node_list, hpath, phase, env)
Пример #20
0
  def _RunWrapper(self, node_list, hpath, phase, phase_env):
    """Simple wrapper over self.callfn.

    This method fixes the environment before executing the hooks.

    """
    env = {
      "PATH": constants.HOOKS_PATH,
      "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
      "GANETI_OP_CODE": self.opcode,
      "GANETI_DATA_DIR": pathutils.DATA_DIR,
      "GANETI_HOOKS_PHASE": phase,
      "GANETI_HOOKS_PATH": hpath,
      }

    if self.htype:
      env["GANETI_OBJECT_TYPE"] = self.htype

    if self.cluster_name is not None:
      env["GANETI_CLUSTER"] = self.cluster_name

    if self.master_name is not None:
      env["GANETI_MASTER"] = self.master_name

    if phase_env:
      env = utils.algo.JoinDisjointDicts(env, phase_env)

    # Convert everything to strings
    env = dict([(str(key), str(val)) for key, val in env.iteritems()])

    assert compat.all(key == "PATH" or key.startswith("GANETI_")
                      for key in env)

    return self.hooks_execution_fn(node_list, hpath, phase, env)
    def testGetStdPvSize(self):
        """Test cases for bdev.LogicalVolume._GetStdPvSize()"""
        rnd = random.Random(9517)
        for _ in range(0, 50):
            # Identical volumes
            pvi = self._GenerateRandomPvInfo(rnd, "disk", "myvg")
            onesize = bdev.LogicalVolume._GetStdPvSize([pvi])
            self.assertTrue(onesize <= pvi.size)
            self.assertTrue(onesize > pvi.size * (1 - self._MARGIN))
            for length in range(2, 10):
                n_size = bdev.LogicalVolume._GetStdPvSize([pvi] * length)
                self.assertEqual(onesize, n_size)

            # Mixed volumes
            for length in range(1, 10):
                pvlist = [
                    self._GenerateRandomPvInfo(rnd, "disk", "myvg")
                    for _ in range(0, length)
                ]
                std_size = bdev.LogicalVolume._GetStdPvSize(pvlist)
                self.assertTrue(
                    compat.all(std_size <= pvi.size for pvi in pvlist))
                self.assertTrue(
                    compat.any(std_size > pvi.size * (1 - self._MARGIN)
                               for pvi in pvlist))
                pvlist.append(pvlist[0])
                p1_size = bdev.LogicalVolume._GetStdPvSize(pvlist)
                self.assertEqual(std_size, p1_size)
Пример #22
0
    def _TestPartial(self, job, successcount):
        self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_QUEUED)
        self.assertEqual(job.start_timestamp, job.ops[0].start_timestamp)

        queue = _FakeQueueForProc()
        opexec = _FakeExecOpCodeForProc(None, None)

        for remaining in reversed(range(len(job.ops) - successcount)):
            result = jqueue._JobProcessor(queue, opexec, job)()

            if remaining == 0:
                # Last opcode
                self.assert_(result)
                break

            self.assertFalse(result)

            self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_QUEUED)

        self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_SUCCESS)
        self.assertEqual(job.GetInfo(["status"]), [constants.JOB_STATUS_SUCCESS])
        self.assertEqual(job.GetInfo(["opresult"]), [[op.input.result for op in job.ops]])
        self.assertEqual(job.GetInfo(["opstatus"]), [[constants.OP_STATUS_SUCCESS for _ in job.ops]])
        self.assert_(compat.all(op.start_timestamp and op.end_timestamp for op in job.ops))

        self._GenericCheckJob(job)

        # Finished jobs can't be processed any further
        self.assertRaises(errors.ProgrammerError, jqueue._JobProcessor(queue, opexec, job))

        # ... also after being restored
        job2 = jqueue._QueuedJob.Restore(queue, job.Serialize())
        self.assertRaises(errors.ProgrammerError, jqueue._JobProcessor(queue, opexec, job2))
Пример #23
0
    def test(self):
        for method in baserlib._SUPPORTED_METHODS:
            # Empty handler
            obj = self._MakeClass(method, {})(None, {}, None)
            for m_attr in baserlib.OPCODE_ATTRS:
                for attr in m_attr.GetAll():
                    self.assertFalse(hasattr(obj, attr))

            # Direct handler function
            obj = self._MakeClass(method, {
                method: lambda _: None,
            })(None, {}, None)
            self.assertFalse(
                compat.all(
                    hasattr(obj, attr) for i in baserlib._SUPPORTED_METHODS
                    for attr in self._GetMethodAttributes(i)))

            # Let metaclass define handler function
            for opcls in [None, object()]:
                obj = self._MakeClass(method, {
                    "%s_OPCODE" % method: opcls,
                })(None, {}, None)
                self.assertTrue(callable(getattr(obj, method)))
                self.assertEqual(getattr(obj, "%s_OPCODE" % method), opcls)
                self.assertFalse(hasattr(obj, "%s_RENAME" % method))
                self.assertFalse(
                    compat.any(
                        hasattr(obj, attr) for i in baserlib._SUPPORTED_METHODS
                        if i != method
                        for attr in self._GetMethodAttributes(i)))
Пример #24
0
    def testCancelWhileInQueue(self):
        queue = _FakeQueueForProc()

        ops = [opcodes.OpTestDummy(result="Res%s" % i, fail=False) for i in range(5)]

        # Create job
        job_id = 17045
        job = self._CreateJob(queue, job_id, ops)

        self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_QUEUED)

        # Mark as cancelled
        (success, _) = job.Cancel()
        self.assert_(success)

        self.assert_(compat.all(op.status == constants.OP_STATUS_CANCELED for op in job.ops))

        opexec = _FakeExecOpCodeForProc(None, None)
        jqueue._JobProcessor(queue, opexec, job)()

        # Check result
        self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_CANCELED)
        self.assertEqual(job.GetInfo(["status"]), [constants.JOB_STATUS_CANCELED])
        self.assertFalse(job.start_timestamp)
        self.assert_(job.end_timestamp)
        self.assertFalse(compat.any(op.start_timestamp or op.end_timestamp for op in job.ops))
        self.assertEqual(
            job.GetInfo(["opstatus", "opresult"]),
            [[constants.OP_STATUS_CANCELED for _ in job.ops], ["Job canceled by request" for _ in job.ops]],
        )
Пример #25
0
    def _RunWrapper(self, node_list, hpath, phase, phase_env):
        """Simple wrapper over self.callfn.

    This method fixes the environment before executing the hooks.

    """
        env = {
            "PATH": constants.HOOKS_PATH,
            "GANETI_HOOKS_VERSION": constants.HOOKS_VERSION,
            "GANETI_OP_CODE": self.opcode,
            "GANETI_DATA_DIR": pathutils.DATA_DIR,
            "GANETI_HOOKS_PHASE": phase,
            "GANETI_HOOKS_PATH": hpath,
        }

        if self.htype:
            env["GANETI_OBJECT_TYPE"] = self.htype

        if self.cluster_name is not None:
            env["GANETI_CLUSTER"] = self.cluster_name

        if self.master_name is not None:
            env["GANETI_MASTER"] = self.master_name

        if phase_env:
            env = utils.algo.JoinDisjointDicts(env, phase_env)

        # Convert everything to strings
        env = dict([(str(key), str(val)) for key, val in env.iteritems()])

        assert compat.all(key == "PATH" or key.startswith("GANETI_")
                          for key in env)

        return self.hooks_execution_fn(node_list, hpath, phase, env)
Пример #26
0
  def _PrepareRequests(hosts, port, procedure, body, read_timeout):
    """Prepares requests by sorting offline hosts into separate list.

    @type body: dict
    @param body: a dictionary with per-host body data

    """
    results = {}
    requests = {}

    assert isinstance(body, dict)
    assert len(body) == len(hosts)
    assert compat.all(isinstance(v, str) for v in body.values())
    assert frozenset(h[2] for h in hosts) == frozenset(body.keys()), \
        "%s != %s" % (hosts, body.keys())

    for (name, ip, original_name) in hosts:
      if ip is _OFFLINE:
        # Node is marked as offline
        results[original_name] = RpcResult(node=name,
                                           offline=True,
                                           call=procedure)
      else:
        requests[original_name] = \
          http.client.HttpClientRequest(str(ip), port,
                                        http.HTTP_POST, str("/%s" % procedure),
                                        headers=_RPC_CLIENT_HEADERS,
                                        post_data=body[original_name],
                                        read_timeout=read_timeout,
                                        nicename="%s/%s" % (name, procedure),
                                        curl_config_fn=_ConfigRpcCurl)

    return (results, requests)
Пример #27
0
 def _CreateJob(self, queue, job_id, ops):
     job = jqueue._QueuedJob(queue, job_id, ops)
     self.assertFalse(job.start_timestamp)
     self.assertFalse(job.end_timestamp)
     self.assertEqual(len(ops), len(job.ops))
     self.assert_(compat.all(op.input == inp for (op, inp) in zip(job.ops, ops)))
     self.assertEqual(job.GetInfo(["ops"]), [[op.__getstate__() for op in ops]])
     return job
Пример #28
0
        def _BeforeStart(timeout, priority):
            self.assertFalse(queue.IsAcquired())
            self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_WAITLOCK)

            # Mark as cancelled
            (success, _) = job.Cancel()
            self.assert_(success)

            self.assert_(compat.all(op.status == constants.OP_STATUS_CANCELING for op in job.ops))
Пример #29
0
    def _AddPendingToQueue(self):
        """Adds all pending import/export objects to the internal queue.

    """
        assert compat.all(diskie not in self._queue and diskie.loop == self for diskie in self._pending_add)

        self._queue.extend(self._pending_add)

        del self._pending_add[:]
Пример #30
0
def TestEnabled(tests):
  """Returns True if the given tests are enabled.

  @param tests: a single test, or a list of tests to check

  """
  if isinstance(tests, basestring):
    tests = [tests]
  return compat.all(cfg.get("tests", {}).get(t, True) for t in tests)
Пример #31
0
def _FormatHeaders(headers):
  """Formats HTTP headers.

  @type headers: sequence of strings
  @rtype: string

  """
  assert compat.all(": " in header for header in headers)
  return "\n".join(headers)
Пример #32
0
def _FormatHeaders(headers):
    """Formats HTTP headers.

  @type headers: sequence of strings
  @rtype: string

  """
    assert compat.all(": " in header for header in headers)
    return "\n".join(headers)
Пример #33
0
    def testOpcodeError(self):
        queue = _FakeQueueForProc()

        testdata = [(17077, 1, 0, 0), (1782, 5, 2, 2), (18179, 10, 9, 9), (4744, 10, 3, 8), (23816, 100, 39, 45)]

        for (job_id, opcount, failfrom, failto) in testdata:
            # Prepare opcodes
            ops = [
                opcodes.OpTestDummy(result="Res%s" % i, fail=(failfrom <= i and i <= failto)) for i in range(opcount)
            ]

            # Create job
            job = self._CreateJob(queue, job_id, ops)

            opexec = _FakeExecOpCodeForProc(None, None)

            for idx in range(len(ops)):
                result = jqueue._JobProcessor(queue, opexec, job)()

                if idx in (failfrom, len(ops) - 1):
                    # Last opcode
                    self.assert_(result)
                    break

                self.assertFalse(result)

                self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_QUEUED)

            # Check job status
            self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_ERROR)
            self.assertEqual(job.GetInfo(["id"]), [job_id])
            self.assertEqual(job.GetInfo(["status"]), [constants.JOB_STATUS_ERROR])

            # Check opcode status
            data = zip(job.ops, job.GetInfo(["opstatus"])[0], job.GetInfo(["opresult"])[0])

            for idx, (op, opstatus, opresult) in enumerate(data):
                if idx < failfrom:
                    assert not op.input.fail
                    self.assertEqual(opstatus, constants.OP_STATUS_SUCCESS)
                    self.assertEqual(opresult, op.input.result)
                elif idx <= failto:
                    assert op.input.fail
                    self.assertEqual(opstatus, constants.OP_STATUS_ERROR)
                    self.assertRaises(errors.OpExecError, errors.MaybeRaise, opresult)
                else:
                    assert not op.input.fail
                    self.assertEqual(opstatus, constants.OP_STATUS_ERROR)
                    self.assertRaises(errors.OpExecError, errors.MaybeRaise, opresult)

            self.assert_(compat.all(op.start_timestamp and op.end_timestamp for op in job.ops[:failfrom]))

            self._GenericCheckJob(job)

            # Finished jobs can't be processed any further
            self.assertRaises(errors.ProgrammerError, jqueue._JobProcessor(queue, opexec, job))
Пример #34
0
def _GetGroupData(cl, uuid):
  """Retrieves instances and nodes per node group.

  """
  job = [
    # Get all primary instances in group
    opcodes.OpQuery(what=constants.QR_INSTANCE,
                    fields=["name", "status", "disks_active", "snodes",
                            "pnode.group.uuid", "snodes.group.uuid"],
                    qfilter=[qlang.OP_EQUAL, "pnode.group.uuid", uuid],
                    use_locking=True),

    # Get all nodes in group
    opcodes.OpQuery(what=constants.QR_NODE,
                    fields=["name", "bootid", "offline"],
                    qfilter=[qlang.OP_EQUAL, "group.uuid", uuid],
                    use_locking=True),
    ]

  job_id = cl.SubmitJob(job)
  results = map(objects.QueryResponse.FromDict,
                cli.PollJob(job_id, cl=cl, feedback_fn=logging.debug))
  cl.ArchiveJob(job_id)

  results_data = map(operator.attrgetter("data"), results)

  # Ensure results are tuples with two values
  assert compat.all(map(ht.TListOf(ht.TListOf(ht.TIsLength(2))), results_data))

  # Extract values ignoring result status
  (raw_instances, raw_nodes) = [[map(compat.snd, values)
                                 for values in res]
                                for res in results_data]

  secondaries = {}
  instances = []

  # Load all instances
  for (name, status, disks_active, snodes, pnode_group_uuid,
       snodes_group_uuid) in raw_instances:
    if snodes and set([pnode_group_uuid]) != set(snodes_group_uuid):
      logging.error("Ignoring split instance '%s', primary group %s, secondary"
                    " groups %s", name, pnode_group_uuid,
                    utils.CommaJoin(snodes_group_uuid))
    else:
      instances.append(Instance(name, status, disks_active, snodes))

      for node in snodes:
        secondaries.setdefault(node, set()).add(name)

  # Load all nodes
  nodes = [Node(name, bootid, offline, secondaries.get(name, set()))
           for (name, bootid, offline) in raw_nodes]

  return (dict((node.name, node) for node in nodes),
          dict((inst.name, inst) for inst in instances))
Пример #35
0
  def _AddPendingToQueue(self):
    """Adds all pending import/export objects to the internal queue.

    """
    assert compat.all(diskie not in self._queue and diskie.loop == self
                      for diskie in self._pending_add)

    self._queue.extend(self._pending_add)

    del self._pending_add[:]
  def testTinySummary(self):
    self.assertFalse(
      utils.FindDuplicates(opcodes_base.SUMMARY_PREFIX.values()))
    self.assertTrue(compat.all(prefix.endswith("_") and supplement.endswith("_")
                               for (prefix, supplement) in
                                 opcodes_base.SUMMARY_PREFIX.items()))

    self.assertEqual(opcodes.OpClusterPostInit().TinySummary(), "C_POST_INIT")
    self.assertEqual(opcodes.OpNodeRemove().TinySummary(), "N_REMOVE")
    self.assertEqual(opcodes.OpInstanceMigrate().TinySummary(), "I_MIGRATE")
    self.assertEqual(opcodes.OpTestJqueue().TinySummary(), "TEST_JQUEUE")
Пример #37
0
  def _BuildEnv(self, phase):
    """Compute the environment and the target nodes.

    Based on the opcode and the current node list, this builds the
    environment for the hooks and the target node list for the run.

    """
    if phase == constants.HOOKS_PHASE_PRE:
      prefix = "GANETI_"
    elif phase == constants.HOOKS_PHASE_POST:
      prefix = "GANETI_POST_"
    else:
      raise AssertionError("Unknown phase '%s'" % phase)

    env = {}

    if self.hooks_path is not None:
      phase_env = self.build_env_fn()
      if phase_env:
        assert not compat.any(key.upper().startswith(prefix)
                              for key in phase_env)
        env.update(("%s%s" % (prefix, key), value)
                   for (key, value) in phase_env.items())

    if phase == constants.HOOKS_PHASE_PRE:
      assert compat.all((key.startswith("GANETI_") and
                         not key.startswith("GANETI_POST_"))
                        for key in env)

    elif phase == constants.HOOKS_PHASE_POST:
      assert compat.all(key.startswith("GANETI_POST_") for key in env)
      assert isinstance(self.pre_env, dict)

      # Merge with pre-phase environment
      assert not compat.any(key.startswith("GANETI_POST_")
                            for key in self.pre_env)
      env.update(self.pre_env)
    else:
      raise AssertionError("Unknown phase '%s'" % phase)

    return env
Пример #38
0
    def _BuildEnv(self, phase):
        """Compute the environment and the target nodes.

    Based on the opcode and the current node list, this builds the
    environment for the hooks and the target node list for the run.

    """
        if phase == constants.HOOKS_PHASE_PRE:
            prefix = "GANETI_"
        elif phase == constants.HOOKS_PHASE_POST:
            prefix = "GANETI_POST_"
        else:
            raise AssertionError("Unknown phase '%s'" % phase)

        env = {}

        if self.hooks_path is not None:
            phase_env = self.build_env_fn()
            if phase_env:
                assert not compat.any(key.upper().startswith(prefix)
                                      for key in phase_env)
                env.update(("%s%s" % (prefix, key), value)
                           for (key, value) in phase_env.items())

        if phase == constants.HOOKS_PHASE_PRE:
            assert compat.all((key.startswith("GANETI_")
                               and not key.startswith("GANETI_POST_"))
                              for key in env)

        elif phase == constants.HOOKS_PHASE_POST:
            assert compat.all(key.startswith("GANETI_POST_") for key in env)
            assert isinstance(self.pre_env, dict)

            # Merge with pre-phase environment
            assert not compat.any(
                key.startswith("GANETI_POST_") for key in self.pre_env)
            env.update(self.pre_env)
        else:
            raise AssertionError("Unknown phase '%s'" % phase)

        return env
Пример #39
0
def _LoadKnownGroups():
    """Returns a list of all node groups known by L{ssconf}.

  """
    groups = ssconf.SimpleStore().GetNodegroupList()

    result = list(line.split(None, 1)[0] for line in groups if line.strip())

    if not compat.all(map(utils.UUID_RE.match, result)):
        raise errors.GenericError("Ssconf contains invalid group UUID")

    return result
Пример #40
0
def _LoadKnownGroups():
    """Returns a list of all node groups known by L{ssconf}.

  """
    groups = ssconf.SimpleStore().GetNodegroupList()

    result = list(line.split(None, 1)[0] for line in groups if line.strip())

    if not compat.all(utils.UUID_RE.match(r) for r in result):
        raise errors.GenericError("Ssconf contains invalid group UUID")

    return result
Пример #41
0
def _TStrictDictCheck(require_all, exclusive, items, val):
    """Helper function for L{TStrictDict}.

  """
    notfound_fn = lambda _: not exclusive

    if require_all and not frozenset(val.keys()).issuperset(items.keys()):
        # Requires items not found in value
        return False

    return compat.all(
        items.get(key, notfound_fn)(value) for (key, value) in val.items())
Пример #42
0
def _TStrictDictCheck(require_all, exclusive, items, val):
  """Helper function for L{TStrictDict}.

  """
  notfound_fn = lambda _: not exclusive

  if require_all and not frozenset(val.keys()).issuperset(items.keys()):
    # Requires items not found in value
    return False

  return compat.all(items.get(key, notfound_fn)(value)
                    for (key, value) in val.items())
Пример #43
0
def TestClusterEpo():
    """gnt-cluster epo"""
    master = qa_config.GetMasterNode()

    # Assert that OOB is unavailable for all nodes
    result_output = GetCommandOutput(
        master.primary, "gnt-node list --verbose --no-headers -o"
        " powered")
    AssertEqual(
        compat.all(powered == "(unavail)"
                   for powered in result_output.splitlines()), True)

    # Conflicting
    AssertCommand(["gnt-cluster", "epo", "--groups", "--all"], fail=True)
    # --all doesn't expect arguments
    AssertCommand(["gnt-cluster", "epo", "--all", "some_arg"], fail=True)

    # Unless --all is given master is not allowed to be in the list
    AssertCommand(["gnt-cluster", "epo", "-f", master.primary], fail=True)

    # This shouldn't fail
    AssertCommand(["gnt-cluster", "epo", "-f", "--all"])

    # All instances should have been stopped now
    result_output = GetCommandOutput(
        master.primary, "gnt-instance list --no-headers -o status")
    # ERROR_down because the instance is stopped but not recorded as such
    AssertEqual(
        compat.all(status == "ERROR_down"
                   for status in result_output.splitlines()), True)

    # Now start everything again
    AssertCommand(["gnt-cluster", "epo", "--on", "-f", "--all"])

    # All instances should have been started now
    result_output = GetCommandOutput(
        master.primary, "gnt-instance list --no-headers -o status")
    AssertEqual(
        compat.all(status == "running"
                   for status in result_output.splitlines()), True)
Пример #44
0
def _GetGroupData(qcl, uuid):
    """Retrieves instances and nodes per node group.

  """
    queries = [
        (constants.QR_INSTANCE, [
            "name", "status", "disks_active", "snodes", "pnode.group.uuid",
            "snodes.group.uuid"
        ], [qlang.OP_EQUAL, "pnode.group.uuid", uuid]),
        (constants.QR_NODE, ["name", "bootid",
                             "offline"], [qlang.OP_EQUAL, "group.uuid", uuid]),
    ]

    results = []
    for what, fields, qfilter in queries:
        results.append(qcl.Query(what, fields, qfilter))

    results_data = map(operator.attrgetter("data"), results)

    # Ensure results are tuples with two values
    assert compat.all(
        map(ht.TListOf(ht.TListOf(ht.TIsLength(2))), results_data))

    # Extract values ignoring result status
    (raw_instances, raw_nodes) = [[map(compat.snd, values) for values in res]
                                  for res in results_data]

    secondaries = {}
    instances = []

    # Load all instances
    for (name, status, disks_active, snodes, pnode_group_uuid,
         snodes_group_uuid) in raw_instances:
        if snodes and set([pnode_group_uuid]) != set(snodes_group_uuid):
            logging.error(
                "Ignoring split instance '%s', primary group %s, secondary"
                " groups %s", name, pnode_group_uuid,
                utils.CommaJoin(snodes_group_uuid))
        else:
            instances.append(Instance(name, status, disks_active, snodes))

            for node in snodes:
                secondaries.setdefault(node, set()).add(name)

    # Load all nodes
    nodes = [
        Node(name, bootid, offline, secondaries.get(name, set()))
        for (name, bootid, offline) in raw_nodes
    ]

    return (dict((node.name, node) for node in nodes),
            dict((inst.name, inst) for inst in instances))
Пример #45
0
 def _Check(job):
     self.assertEqual(job.id, job_id)
     self.assertEqual(job.log_serial, 0)
     self.assert_(job.received_timestamp)
     self.assert_(job.start_timestamp is None)
     self.assert_(job.end_timestamp is None)
     self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_QUEUED)
     self.assertEqual(job.CalcPriority(), constants.OP_PRIO_DEFAULT)
     self.assert_(repr(job).startswith("<"))
     self.assertEqual(len(job.ops), len(ops))
     self.assert_(compat.all(inp.__getstate__() == op.input.__getstate__() for (inp, op) in zip(ops, job.ops)))
     self.assertRaises(errors.OpExecError, job.GetInfo, ["unknown-field"])
     self.assertEqual(job.GetInfo(["summary"]), [[op.input.Summary() for op in job.ops]])
Пример #46
0
def _StartRequest(curl, req):
    """Starts a request on a cURL object.

  @type curl: pycurl.Curl
  @param curl: cURL object
  @type req: L{HttpClientRequest}
  @param req: HTTP request

  """
    logging.debug("Starting request %r", req)

    url = req.url
    method = req.method
    post_data = req.post_data
    headers = req.headers

    # PycURL requires strings to be non-unicode
    assert isinstance(method, str)
    assert isinstance(url, str)
    assert isinstance(post_data, str)
    assert compat.all(isinstance(i, str) for i in headers)

    # Buffer for response
    resp_buffer = StringIO()

    # Configure client for request
    curl.setopt(pycurl.VERBOSE, False)
    curl.setopt(pycurl.NOSIGNAL, True)
    curl.setopt(pycurl.USERAGENT, http.HTTP_GANETI_VERSION)
    curl.setopt(pycurl.PROXY, "")
    curl.setopt(pycurl.CUSTOMREQUEST, str(method))
    curl.setopt(pycurl.URL, url)
    curl.setopt(pycurl.POSTFIELDS, post_data)
    curl.setopt(pycurl.HTTPHEADER, headers)

    if req.read_timeout is None:
        curl.setopt(pycurl.TIMEOUT, 0)
    else:
        curl.setopt(pycurl.TIMEOUT, int(req.read_timeout))

    # Disable SSL session ID caching (pycurl >= 7.16.0)
    if hasattr(pycurl, "SSL_SESSIONID_CACHE"):
        curl.setopt(pycurl.SSL_SESSIONID_CACHE, False)

    curl.setopt(pycurl.WRITEFUNCTION, resp_buffer.write)

    # Pass cURL object to external config function
    if req.curl_config_fn:
        req.curl_config_fn(curl)

    return _PendingRequest(curl, req, resp_buffer.getvalue)
Пример #47
0
def _StartRequest(curl, req):
  """Starts a request on a cURL object.

  @type curl: pycurl.Curl
  @param curl: cURL object
  @type req: L{HttpClientRequest}
  @param req: HTTP request

  """
  logging.debug("Starting request %r", req)

  url = req.url
  method = req.method
  post_data = req.post_data
  headers = req.headers

  # PycURL requires strings to be non-unicode
  assert isinstance(method, str)
  assert isinstance(url, str)
  assert isinstance(post_data, str)
  assert compat.all(isinstance(i, str) for i in headers)

  # Buffer for response
  resp_buffer = StringIO()

  # Configure client for request
  curl.setopt(pycurl.VERBOSE, False)
  curl.setopt(pycurl.NOSIGNAL, True)
  curl.setopt(pycurl.USERAGENT, http.HTTP_GANETI_VERSION)
  curl.setopt(pycurl.PROXY, "")
  curl.setopt(pycurl.CUSTOMREQUEST, str(method))
  curl.setopt(pycurl.URL, url)
  curl.setopt(pycurl.POSTFIELDS, post_data)
  curl.setopt(pycurl.HTTPHEADER, headers)

  if req.read_timeout is None:
    curl.setopt(pycurl.TIMEOUT, 0)
  else:
    curl.setopt(pycurl.TIMEOUT, int(req.read_timeout))

  # Disable SSL session ID caching (pycurl >= 7.16.0)
  if hasattr(pycurl, "SSL_SESSIONID_CACHE"):
    curl.setopt(pycurl.SSL_SESSIONID_CACHE, False)

  curl.setopt(pycurl.WRITEFUNCTION, resp_buffer.write)

  # Pass cURL object to external config function
  if req.curl_config_fn:
    req.curl_config_fn(curl)

  return _PendingRequest(curl, req, resp_buffer.getvalue)
Пример #48
0
    def testTinySummary(self):
        self.assertFalse(
            utils.FindDuplicates(opcodes_base.SUMMARY_PREFIX.values()))
        self.assertTrue(
            compat.all(
                prefix.endswith("_") and supplement.endswith("_")
                for (prefix,
                     supplement) in opcodes_base.SUMMARY_PREFIX.items()))

        self.assertEqual(opcodes.OpClusterPostInit().TinySummary(),
                         "C_POST_INIT")
        self.assertEqual(opcodes.OpNodeRemove().TinySummary(), "N_REMOVE")
        self.assertEqual(opcodes.OpInstanceMigrate().TinySummary(),
                         "I_MIGRATE")
        self.assertEqual(opcodes.OpTestJqueue().TinySummary(), "TEST_JQUEUE")
Пример #49
0
    def test(self):
        error_name = logging.getLevelName(logging.ERROR)
        warn_name = logging.getLevelName(logging.WARNING)
        info_name = logging.getLevelName(logging.INFO)
        debug_name = logging.getLevelName(logging.DEBUG)

        for debug in [False, True]:
            for verbose in [False, True]:
                logger = logging.Logger("TestLogger")
                buf = StringIO()

                utils.SetupToolLogging(debug,
                                       verbose,
                                       _root_logger=logger,
                                       _stream=buf)

                logger.error("level=error")
                logger.warning("level=warning")
                logger.info("level=info")
                logger.debug("level=debug")

                lines = buf.getvalue().splitlines()

                self.assertTrue(
                    compat.all(line.count(":") == 3 for line in lines))

                messages = [line.split(":", 3)[-1].strip() for line in lines]

                if debug:
                    self.assertEqual(messages, [
                        "%s level=error" % error_name,
                        "%s level=warning" % warn_name,
                        "%s level=info" % info_name,
                        "%s level=debug" % debug_name,
                    ])
                elif verbose:
                    self.assertEqual(messages, [
                        "%s level=error" % error_name,
                        "%s level=warning" % warn_name,
                        "%s level=info" % info_name,
                    ])
                else:
                    self.assertEqual(messages, [
                        "level=error",
                        "level=warning",
                    ])
Пример #50
0
def TItems(items):
  """Checks individual items of a container.

  If the verified value and the list of expected items differ in length, this
  check considers only as many items as are contained in the shorter list. Use
  L{TIsLength} to enforce a certain length.

  @type items: list
  @param items: List of checks

  """
  assert items, "Need items"

  text = ["Item", "item"]
  desc = WithDesc(utils.CommaJoin("%s %s is %s" %
                                  (text[int(idx > 0)], idx, Parens(check))
                                  for (idx, check) in enumerate(items)))

  return desc(lambda value: compat.all(check(i)
                                       for (check, i) in zip(items, value)))
Пример #51
0
    def testTimeout(self):
        def _CalcTimeout(args):
            (arg1, arg2) = args
            return arg1 + arg2

        def _VerifyRequest(exp_timeout, req):
            self.assertEqual(req.read_timeout, exp_timeout)

            req.success = True
            req.resp_status_code = http.HTTP_OK
            req.resp_body = serializer.DumpJson((True, hex(req.read_timeout)))

        resolver = rpc._StaticResolver([
            "192.0.2.1",
            "192.0.2.2",
        ])

        nodes = [
            "node1.example.com",
            "node2.example.com",
        ]

        tests = [(100, None, 100), (30, None, 30)]
        tests.extend((_CalcTimeout, i, i + 300) for i in [0, 5, 16485, 30516])

        for timeout, arg1, exp_timeout in tests:
            cdef = ("test_call", NotImplemented, None, timeout, [
                ("arg1", None, NotImplemented),
                ("arg2", None, NotImplemented),
            ], None, None, NotImplemented)

            http_proc = _FakeRequestProcessor(
                compat.partial(_VerifyRequest, exp_timeout))
            client = rpc._RpcClientBase(resolver,
                                        NotImplemented,
                                        _req_process_fn=http_proc)
            result = client._Call(cdef, nodes, [arg1, 300])
            self.assertEqual(len(result), len(nodes))
            self.assertTrue(
                compat.all(not res.fail_msg and res.payload == hex(exp_timeout)
                           for res in result.values()))
Пример #52
0
    def CheckArguments(self):
        """Check arguments.

    """
        nodes = []
        for inst in self.op.instances:
            if inst.iallocator is not None:
                raise errors.OpPrereqError(
                    "iallocator are not allowed to be set on"
                    " instance objects", errors.ECODE_INVAL)
            nodes.append(bool(inst.pnode))
            if inst.disk_template in constants.DTS_INT_MIRROR:
                nodes.append(bool(inst.snode))

        has_nodes = compat.any(nodes)
        if compat.all(nodes) ^ has_nodes:
            raise errors.OpPrereqError(
                "There are instance objects providing"
                " pnode/snode while others do not", errors.ECODE_INVAL)

        if not has_nodes and self.op.iallocator is None:
            default_iallocator = self.cfg.GetDefaultIAllocator()
            if default_iallocator:
                self.op.iallocator = default_iallocator
            else:
                raise errors.OpPrereqError(
                    "No iallocator or nodes on the instances"
                    " given and no cluster-wide default"
                    " iallocator found; please specify either"
                    " an iallocator or nodes on the instances"
                    " or set a cluster-wide default iallocator",
                    errors.ECODE_INVAL)

        CheckOpportunisticLocking(self.op)

        dups = utils.FindDuplicates(
            [op.instance_name for op in self.op.instances])
        if dups:
            raise errors.OpPrereqError(
                "There are duplicate instance names: %s" %
                utils.CommaJoin(dups), errors.ECODE_INVAL)
Пример #53
0
  def _CheckAllListening(self):
    """Checks whether all daemons are listening.

    If all daemons are listening, the information is sent to the client.

    """
    if not compat.all(dp is not None for dp in self._daemon_port):
      return

    host = self._external_address

    disks = []
    for idx, (port, magic) in enumerate(self._daemon_port):
      disks.append(ComputeRemoteImportDiskInfo(self._cds, self._salt,
                                               idx, host, port, magic))

    assert len(disks) == self._disk_count

    self._feedback_fn(constants.ELOG_REMOTE_IMPORT, {
      "disks": disks,
      "x509_ca": self._x509_cert_pem,
      })
Пример #54
0
 def fn(container):
     return (compat.all(key_type(v) for v in container.keys())
             and compat.all(val_type(v) for v in container.values()))
Пример #55
0
def TListOf(my_type):
    """Checks if a given value is a list with all elements of the same type.

  """
    desc = WithDesc("List of %s" % (Parens(my_type), ))
    return desc(TAnd(TList, lambda lst: compat.all(my_type(v) for v in lst)))
Пример #56
0
 def fn(val):
     return compat.all(t(val) for t in args)
Пример #57
0
    def test(self):
        """Check whether all RAPI resources are documented.

    """
        rapidoc = _ReadDocFile("rapi.rst")

        node_name = re.escape("[node_name]")
        instance_name = re.escape("[instance_name]")
        group_name = re.escape("[group_name]")
        network_name = re.escape("[network_name]")
        job_id = re.escape("[job_id]")
        disk_index = re.escape("[disk_index]")
        filter_uuid = re.escape("[filter_uuid]")
        query_res = re.escape("[resource]")

        resources = connector.GetHandlers(node_name, instance_name, group_name,
                                          network_name, job_id, disk_index,
                                          filter_uuid, query_res)

        handler_dups = utils.FindDuplicates(resources.values())
        self.assertFalse(handler_dups,
                         msg=("Resource handlers used more than once: %r" %
                              handler_dups))

        uri_check_fixup = {
            re.compile(node_name): "node1examplecom",
            re.compile(instance_name): "inst1examplecom",
            re.compile(group_name): "group4440",
            re.compile(network_name): "network5550",
            re.compile(job_id): "9409",
            re.compile(disk_index): "123",
            re.compile(filter_uuid): "c863fbb5-f248-47bf-869b-cea259890061",
            re.compile(query_res): "lock",
        }

        assert compat.all(VALID_URI_RE.match(value)
                          for value in uri_check_fixup.values()), \
               "Fixup values must be valid URIs, too"

        titles = []

        prevline = None
        for line in rapidoc.splitlines():
            if re.match(r"^\++$", line):
                titles.append(prevline)

            prevline = line

        prefix_exception = compat.UniqueFrozenset(["/", "/version", "/2"])

        undocumented = []
        used_uris = []

        for key, handler in resources.iteritems():
            # Regex objects
            if hasattr(key, "match"):
                self.assert_(key.pattern.startswith("^/2/"),
                             msg="Pattern %r does not start with '^/2/'" %
                             key.pattern)
                self.assertEqual(key.pattern[-1], "$")

                found = False
                for title in titles:
                    if title.startswith("``") and title.endswith("``"):
                        uri = title[2:-2]
                        if key.match(uri):
                            self._CheckRapiResource(uri, uri_check_fixup,
                                                    handler)
                            used_uris.append(uri)
                            found = True
                            break

                if not found:
                    # TODO: Find better way of identifying resource
                    undocumented.append(key.pattern)

            else:
                self.assert_(key.startswith("/2/") or key in prefix_exception,
                             msg="Path %r does not start with '/2/'" % key)

                if ("``%s``" % key) in titles:
                    self._CheckRapiResource(key, {}, handler)
                    used_uris.append(key)
                else:
                    undocumented.append(key)

        self.failIf(undocumented,
                    msg=("Missing RAPI resource documentation for %s" %
                         utils.CommaJoin(undocumented)))

        uri_dups = utils.FindDuplicates(used_uris)
        self.failIf(uri_dups,
                    msg=("URIs matched by more than one resource: %s" %
                         utils.CommaJoin(uri_dups)))

        self._FindRapiMissing(resources.values())
        self._CheckTagHandlers(resources.values())
Пример #58
0
                                                  ht.TJobId)])))

  cl = cli.GetClient()
  result = cl.SubmitManyJobs(jobs)
  if not check_fn(result):
    raise errors.OpExecError("Job submission doesn't match %s: %s" %
                             (check_fn, result))

  # Wait for jobs to finish
  jex = JobExecutor(cl=cl, opts=opts)

  for (status, job_id) in result:
    jex.AddJobId(None, status, job_id)

  job_results = jex.GetResults()
  if not compat.all(row[0] for row in job_results):
    raise errors.OpExecError("At least one of the submitted jobs failed: %s" %
                             job_results)

  # Get details about jobs
  data = cl.QueryJobs([job_id for (_, job_id) in result],
                      ["id", "opexec", "ops"])
  data_job_id = [job_id for (job_id, _, _) in data]
  data_opexec = [opexec for (_, opexec, _) in data]
  data_op = [[opcodes.OpCode.LoadOpCode(op) for op in ops]
             for (_, _, ops) in data]

  assert compat.all(not op.depends or len(op.depends) == 1
                    for ops in data_op
                    for op in ops)
Пример #59
0
def _GroupWatcher(opts):
    """Main function for per-group watcher process.

  """
    group_uuid = opts.nodegroup.lower()

    if not utils.UUID_RE.match(group_uuid):
        raise errors.GenericError(
            "Node group parameter (%s) must be given a UUID,"
            " got '%s'" % (cli.NODEGROUP_OPT_NAME, group_uuid))

    logging.info("Watcher for node group '%s'", group_uuid)

    known_groups = _LoadKnownGroups()

    # Check if node group is known
    if group_uuid not in known_groups:
        raise errors.GenericError("Node group '%s' is not known by ssconf" %
                                  group_uuid)

    # Group UUID has been verified and should not contain any dangerous
    # characters
    state_path = pathutils.WATCHER_GROUP_STATE_FILE % group_uuid
    inst_status_path = pathutils.WATCHER_GROUP_INSTANCE_STATUS_FILE % group_uuid

    logging.debug("Using state file %s", state_path)

    # Global watcher
    statefile = state.OpenStateFile(state_path)  # pylint: disable=E0602
    if not statefile:
        return constants.EXIT_FAILURE

    notepad = state.WatcherState(statefile)  # pylint: disable=E0602
    try:
        # Connect to master daemon
        client = GetLuxiClient(False)

        _CheckMaster(client)

        (nodes, instances, locks) = _GetGroupData(client, group_uuid)

        # Update per-group instance status file
        _UpdateInstanceStatus(inst_status_path, list(instances.values()))

        _MergeInstanceStatus(pathutils.INSTANCE_STATUS_FILE,
                             pathutils.WATCHER_GROUP_INSTANCE_STATUS_FILE,
                             known_groups)

        started = _CheckInstances(client, notepad, instances, locks)
        _CheckDisks(client, notepad, nodes, instances, started)

        # Check if the nodegroup only has ext storage type
        only_ext = compat.all(i.disk_template == constants.DT_EXT
                              for i in instances.values())

        # We skip current NodeGroup verification if there are only external storage
        # devices. Currently we provide an interface for external storage provider
        # for disk verification implementations, however current ExtStorageDevice
        # does not provide an API for this yet.
        #
        # This check needs to be revisited if ES_ACTION_VERIFY on ExtStorageDevice
        # is implemented.
        if not opts.no_verify_disks and not only_ext:
            _VerifyDisks(client, group_uuid, nodes, instances)
    except Exception as err:
        logging.info("Not updating status file due to failure: %s", err)
        raise
    else:
        # Save changes for next run
        notepad.Save(state_path)

    return constants.EXIT_SUCCESS
Пример #60
0
    def Exec(self, feedback_fn):
        """Export an instance to an image in the cluster.

    """
        assert self.op.mode in constants.EXPORT_MODES

        src_node_uuid = self.instance.primary_node

        if self.op.shutdown:
            # shutdown the instance, but not the disks
            feedback_fn("Shutting down instance %s" % self.instance.name)
            result = self.rpc.call_instance_shutdown(src_node_uuid,
                                                     self.instance,
                                                     self.op.shutdown_timeout,
                                                     self.op.reason)
            # TODO: Maybe ignore failures if ignore_remove_failures is set
            result.Raise(
                "Could not shutdown instance %s on"
                " node %s" %
                (self.instance.name, self.cfg.GetNodeName(src_node_uuid)))

        if self.op.zero_free_space:
            self.ZeroFreeSpace(feedback_fn)

        activate_disks = not self.instance.disks_active

        if activate_disks:
            # Activate the instance disks if we're exporting a stopped instance
            feedback_fn("Activating disks for %s" % self.instance.name)
            StartInstanceDisks(self, self.instance, None)
            self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)

        try:
            helper = masterd.instance.ExportInstanceHelper(
                self, feedback_fn, self.instance)

            helper.CreateSnapshots()
            try:
                if (self.op.shutdown
                        and self.instance.admin_state == constants.ADMINST_UP
                        and not self.op.remove_instance):
                    assert self.instance.disks_active
                    feedback_fn("Starting instance %s" % self.instance.name)
                    result = self.rpc.call_instance_start(
                        src_node_uuid, (self.instance, None, None), False,
                        self.op.reason)
                    msg = result.fail_msg
                    if msg:
                        feedback_fn("Failed to start instance: %s" % msg)
                        ShutdownInstanceDisks(self, self.instance)
                        raise errors.OpExecError(
                            "Could not start instance: %s" % msg)

                if self.op.mode == constants.EXPORT_MODE_LOCAL:
                    (fin_resu,
                     dresults) = helper.LocalExport(self.dst_node,
                                                    self.op.compress)
                elif self.op.mode == constants.EXPORT_MODE_REMOTE:
                    connect_timeout = constants.RIE_CONNECT_TIMEOUT
                    timeouts = masterd.instance.ImportExportTimeouts(
                        connect_timeout)

                    (key_name, _, _) = self.x509_key_name

                    dest_ca_pem = \
                      OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM,
                                                      self.dest_x509_ca)

                    (fin_resu, dresults) = helper.RemoteExport(
                        self.dest_disk_info, key_name, dest_ca_pem,
                        self.op.compress, timeouts)
            finally:
                helper.Cleanup()

            # Check for backwards compatibility
            assert len(dresults) == len(self.instance.disks)
            assert compat.all(isinstance(i, bool) for i in dresults), \
                   "Not all results are boolean: %r" % dresults

        finally:
            if activate_disks:
                feedback_fn("Deactivating disks for %s" % self.instance.name)
                ShutdownInstanceDisks(self, self.instance)

        if not (compat.all(dresults) and fin_resu):
            failures = []
            if not fin_resu:
                failures.append("export finalization")
            if not compat.all(dresults):
                fdsk = utils.CommaJoin(idx
                                       for (idx, dsk) in enumerate(dresults)
                                       if not dsk)
                failures.append("disk export: disk(s) %s" % fdsk)

            raise errors.OpExecError("Export failed, errors in %s" %
                                     utils.CommaJoin(failures))

        # At this point, the export was successful, we can cleanup/finish

        # Remove instance if requested
        if self.op.remove_instance:
            feedback_fn("Removing instance %s" % self.instance.name)
            RemoveInstance(self, feedback_fn, self.instance,
                           self.op.ignore_remove_failures)

        if self.op.mode == constants.EXPORT_MODE_LOCAL:
            self._CleanupExports(feedback_fn)

        return fin_resu, dresults