Exemplo n.º 1
0
    def testIPv6(self):
        for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
            opts = CmdBuilderConfig(host="localhost",
                                    port=6789,
                                    ipv4=False,
                                    ipv6=False)
            builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
            cmd = builder._GetSocatCommand()
            self.assert_(compat.all("pf=" not in i for i in cmd))

            # IPv4
            opts = CmdBuilderConfig(host="localhost",
                                    port=6789,
                                    ipv4=True,
                                    ipv6=False)
            builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
            cmd = builder._GetSocatCommand()
            self.assert_(compat.any(",pf=ipv4" in i for i in cmd))

            # IPv6
            opts = CmdBuilderConfig(host="localhost",
                                    port=6789,
                                    ipv4=False,
                                    ipv6=True)
            builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
            cmd = builder._GetSocatCommand()
            self.assert_(compat.any(",pf=ipv6" in i for i in cmd))

            # IPv4 and IPv6
            opts = CmdBuilderConfig(host="localhost",
                                    port=6789,
                                    ipv4=True,
                                    ipv6=True)
            builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
            self.assertRaises(AssertionError, builder._GetSocatCommand)
Exemplo n.º 2
0
  def testIPv6(self):
    for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
      opts = CmdBuilderConfig(host="localhost", port=6789,
                              ipv4=False, ipv6=False)
      builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
      cmd = builder._GetSocatCommand()
      self.assert_(compat.all("pf=" not in i for i in cmd))

      # IPv4
      opts = CmdBuilderConfig(host="localhost", port=6789,
                              ipv4=True, ipv6=False)
      builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
      cmd = builder._GetSocatCommand()
      self.assert_(compat.any(",pf=ipv4" in i for i in cmd))

      # IPv6
      opts = CmdBuilderConfig(host="localhost", port=6789,
                              ipv4=False, ipv6=True)
      builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
      cmd = builder._GetSocatCommand()
      self.assert_(compat.any(",pf=ipv6" in i for i in cmd))

      # IPv4 and IPv6
      opts = CmdBuilderConfig(host="localhost", port=6789,
                              ipv4=True, ipv6=True)
      builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)
      self.assertRaises(AssertionError, builder._GetSocatCommand)
Exemplo n.º 3
0
  def test(self):
    for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
      if mode == constants.IEM_IMPORT:
        comprcmd = "gunzip"
      elif mode == constants.IEM_EXPORT:
        comprcmd = "gzip"

      for compress in [constants.IEC_NONE, constants.IEC_GZIP]:
        for magic in [None, 10 * "-", "HelloWorld", "J9plh4nFo2",
                      "24A02A81-2264-4B51-A882-A2AB9D85B420"]:
          opts = CmdBuilderConfig(magic=magic, compress=compress)
          builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)

          magic_cmd = builder._GetMagicCommand()
          dd_cmd = builder._GetDdCommand()

          if magic:
            self.assert_(("M=%s" % magic) in magic_cmd)
            self.assert_(("M=%s" % magic) in dd_cmd)
          else:
            self.assertFalse(magic_cmd)

        for host in ["localhost", "198.51.100.4", "192.0.2.99"]:
          for port in [0, 1, 1234, 7856, 45452]:
            for cmd_prefix in [None, "PrefixCommandGoesHere|",
                               "dd if=/dev/hda bs=1048576 |"]:
              for cmd_suffix in [None, "< /some/file/name",
                                 "| dd of=/dev/null"]:
                opts = CmdBuilderConfig(host=host, port=port, compress=compress,
                                        cmd_prefix=cmd_prefix,
                                        cmd_suffix=cmd_suffix)

                builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)

                # Check complete command
                cmd = builder.GetCommand()
                self.assert_(isinstance(cmd, list))

                if compress == constants.IEC_GZIP:
                  self.assert_(CheckCmdWord(cmd, comprcmd))

                if cmd_prefix is not None:
                  self.assert_(compat.any(cmd_prefix in i for i in cmd))

                if cmd_suffix is not None:
                  self.assert_(compat.any(cmd_suffix in i for i in cmd))

                # Check socat command
                socat_cmd = builder._GetSocatCommand()

                if mode == constants.IEM_IMPORT:
                  ssl_addr = socat_cmd[-2].split(",")
                  self.assert_(("OPENSSL-LISTEN:%s" % port) in ssl_addr)
                elif mode == constants.IEM_EXPORT:
                  ssl_addr = socat_cmd[-1].split(",")
                  self.assert_(("OPENSSL:%s:%s" % (host, port)) in ssl_addr)

                self.assert_("verify=1" in ssl_addr)
Exemplo n.º 4
0
  def testAcquireAndReleaseInstance(self):
    self.assertFalse(compat.any(i.used for i in self.config["instances"]))

    inst = qa_config.AcquireInstance(_cfg=self.config)
    self.assertTrue(inst.used)
    self.assertTrue(inst.disk_template is None)

    inst.Release()

    self.assertFalse(inst.used)
    self.assertTrue(inst.disk_template is None)

    self.assertFalse(compat.any(i.used for i in self.config["instances"]))
Exemplo n.º 5
0
  def testAcquireAndReleaseInstance(self):
    self.assertFalse(compat.any(i.used for i in self.config["instances"]))

    inst = qa_config.AcquireInstance(_cfg=self.config)
    self.assertTrue(inst.used)
    self.assertTrue(inst.disk_template is None)

    inst.Release()

    self.assertFalse(inst.used)
    self.assertTrue(inst.disk_template is None)

    self.assertFalse(compat.any(i.used for i in self.config["instances"]))
Exemplo n.º 6
0
    def testEnv(self):
        # Check pre-phase hook
        self.lu.hook_env = {
            "FOO": "pre-foo-value",
        }
        hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
        hm.RunPhase(constants.HOOKS_PHASE_PRE)

        (node_list, hpath, phase, env) = self._rpcs.pop(0)
        self.assertEqual(node_list, set(["node_a.example.com"]))
        self.assertEqual(hpath, self.lu.HPATH)
        self.assertEqual(phase, constants.HOOKS_PHASE_PRE)
        self.assertEqual(env["GANETI_FOO"], "pre-foo-value")
        self.assertFalse(
            compat.any(key.startswith("GANETI_POST") for key in env))
        self._CheckEnv(env, constants.HOOKS_PHASE_PRE, self.lu.HPATH)

        # Check post-phase hook
        self.lu.hook_env = {
            "FOO": "post-value",
            "BAR": 123,
        }
        hm.RunPhase(constants.HOOKS_PHASE_POST)

        (node_list, hpath, phase, env) = self._rpcs.pop(0)
        self.assertEqual(node_list, set(["node_a.example.com"]))
        self.assertEqual(hpath, self.lu.HPATH)
        self.assertEqual(phase, constants.HOOKS_PHASE_POST)
        self.assertEqual(env["GANETI_FOO"], "pre-foo-value")
        self.assertEqual(env["GANETI_POST_FOO"], "post-value")
        self.assertEqual(env["GANETI_POST_BAR"], "123")
        self.assertFalse("GANETI_BAR" in env)
        self._CheckEnv(env, constants.HOOKS_PHASE_POST, self.lu.HPATH)

        self.assertRaises(IndexError, self._rpcs.pop)

        # Check configuration update hook
        hm.RunConfigUpdate()
        (node_list, hpath, phase, env) = self._rpcs.pop(0)
        self.assertEqual(set(node_list),
                         set([self.lu.cfg.GetMasterNodeName()]))
        self.assertEqual(hpath, constants.HOOKS_NAME_CFGUPDATE)
        self.assertEqual(phase, constants.HOOKS_PHASE_POST)
        self._CheckEnv(env, constants.HOOKS_PHASE_POST,
                       constants.HOOKS_NAME_CFGUPDATE)
        self.assertFalse(
            compat.any(key.startswith("GANETI_POST") for key in env))
        self.assertEqual(env["GANETI_FOO"], "pre-foo-value")
        self.assertRaises(IndexError, self._rpcs.pop)
Exemplo n.º 7
0
  def testAcquireAndReleaseInstance(self):
    self.assertFalse(compat.any(map(operator.attrgetter("used"),
                                    self.config["instances"])))

    inst = qa_config.AcquireInstance(_cfg=self.config)
    self.assertTrue(inst.used)
    self.assertTrue(inst.disk_template is None)

    inst.Release()

    self.assertFalse(inst.used)
    self.assertTrue(inst.disk_template is None)

    self.assertFalse(compat.any(map(operator.attrgetter("used"),
                                    self.config["instances"])))
Exemplo n.º 8
0
    def testNoHooksLU(self):
        self.lu = FakeNoHooksLU(FakeProc(), self.op, FakeContext(), None)
        self.assertRaises(AssertionError, self.lu.BuildHooksEnv)
        self.assertRaises(AssertionError, self.lu.BuildHooksNodes)

        hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
        self.assertEqual(hm.pre_env, {})
        self.assertRaises(IndexError, self._rpcs.pop)

        hm.RunPhase(constants.HOOKS_PHASE_PRE)
        self.assertRaises(IndexError, self._rpcs.pop)

        hm.RunPhase(constants.HOOKS_PHASE_POST)
        self.assertRaises(IndexError, self._rpcs.pop)

        hm.RunConfigUpdate()

        (node_list, hpath, phase, env) = self._rpcs.pop(0)
        self.assertEqual(set(node_list),
                         set([self.lu.cfg.GetMasterNodeName()]))
        self.assertEqual(hpath, constants.HOOKS_NAME_CFGUPDATE)
        self.assertEqual(phase, constants.HOOKS_PHASE_POST)
        self.assertFalse(
            compat.any(key.startswith("GANETI_POST") for key in env))
        self._CheckEnv(env, constants.HOOKS_PHASE_POST,
                       constants.HOOKS_NAME_CFGUPDATE)
        self.assertRaises(IndexError, self._rpcs.pop)

        assert isinstance(self.lu, FakeNoHooksLU), "LU was replaced"
Exemplo n.º 9
0
    def testCancelWhileInQueue(self):
        queue = _FakeQueueForProc()

        ops = [opcodes.OpTestDummy(result="Res%s" % i, fail=False) for i in range(5)]

        # Create job
        job_id = 17045
        job = self._CreateJob(queue, job_id, ops)

        self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_QUEUED)

        # Mark as cancelled
        (success, _) = job.Cancel()
        self.assert_(success)

        self.assert_(compat.all(op.status == constants.OP_STATUS_CANCELED for op in job.ops))

        opexec = _FakeExecOpCodeForProc(None, None)
        jqueue._JobProcessor(queue, opexec, job)()

        # Check result
        self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_CANCELED)
        self.assertEqual(job.GetInfo(["status"]), [constants.JOB_STATUS_CANCELED])
        self.assertFalse(job.start_timestamp)
        self.assert_(job.end_timestamp)
        self.assertFalse(compat.any(op.start_timestamp or op.end_timestamp for op in job.ops))
        self.assertEqual(
            job.GetInfo(["opstatus", "opresult"]),
            [[constants.OP_STATUS_CANCELED for _ in job.ops], ["Job canceled by request" for _ in job.ops]],
        )
Exemplo n.º 10
0
  def testResult(self):
    good_results = [
      # First result (all instances "allocate")
      [
        [["foo", ["a", "b"]],
         ["bar", ["c"]],
         ["baz", []]],
        []
      ],
      # Second result (partial "allocate", partial "fail")
      [
        [["bar", ["c", "b"]],
         ["baz", ["a"]]],
        ["foo"]
      ],
      # Third result (all instances "fail")
      [
        [],
        ["foo", "bar", "baz"]
      ],
      ]
    bad_results = [
      "foobar",
      1234,
      [],
      [[]],
      [[], [], []],
      ]

    result_fn = iallocator.IAReqMultiInstanceAlloc.REQ_RESULT

    self.assertTrue(compat.all(map(result_fn, good_results)))
    self.assertFalse(compat.any(map(result_fn, bad_results)))
  def testResult(self):
    good_results = [
      # First result (all instances "allocate")
      [
        [["foo", ["a", "b"]],
         ["bar", ["c"]],
         ["baz", []]],
        []
      ],
      # Second result (partial "allocate", partial "fail")
      [
        [["bar", ["c", "b"]],
         ["baz", ["a"]]],
        ["foo"]
      ],
      # Third result (all instances "fail")
      [
        [],
        ["foo", "bar", "baz"]
      ],
      ]
    bad_results = [
      "foobar",
      1234,
      [],
      [[]],
      [[], [], []],
      ]

    result_fn = iallocator.IAReqMultiInstanceAlloc.REQ_RESULT

    self.assertTrue(compat.all(map(result_fn, good_results)))
    self.assertFalse(compat.any(map(result_fn, bad_results)))
    def testGetStdPvSize(self):
        """Test cases for bdev.LogicalVolume._GetStdPvSize()"""
        rnd = random.Random(9517)
        for _ in range(0, 50):
            # Identical volumes
            pvi = self._GenerateRandomPvInfo(rnd, "disk", "myvg")
            onesize = bdev.LogicalVolume._GetStdPvSize([pvi])
            self.assertTrue(onesize <= pvi.size)
            self.assertTrue(onesize > pvi.size * (1 - self._MARGIN))
            for length in range(2, 10):
                n_size = bdev.LogicalVolume._GetStdPvSize([pvi] * length)
                self.assertEqual(onesize, n_size)

            # Mixed volumes
            for length in range(1, 10):
                pvlist = [
                    self._GenerateRandomPvInfo(rnd, "disk", "myvg")
                    for _ in range(0, length)
                ]
                std_size = bdev.LogicalVolume._GetStdPvSize(pvlist)
                self.assertTrue(
                    compat.all(std_size <= pvi.size for pvi in pvlist))
                self.assertTrue(
                    compat.any(std_size > pvi.size * (1 - self._MARGIN)
                               for pvi in pvlist))
                pvlist.append(pvlist[0])
                p1_size = bdev.LogicalVolume._GetStdPvSize(pvlist)
                self.assertEqual(std_size, p1_size)
Exemplo n.º 13
0
  def _upper_owned(self, level):
    """Check that we don't own any lock at a level greater than the given one.

    """
    # This way of checking only works if LEVELS[i] = i, which we check for in
    # the test cases.
    return compat.any((self._is_owned(l) for l in LEVELS[level + 1:]))
  def test(self):
    for method in baserlib._SUPPORTED_METHODS:
      # Empty handler
      obj = self._MakeClass(method, {})(None, {}, None)
      for attr in itertools.chain(*baserlib.OPCODE_ATTRS):
        self.assertFalse(hasattr(obj, attr))

      # Direct handler function
      obj = self._MakeClass(method, {
        method: lambda _: None,
        })(None, {}, None)
      self.assertFalse(compat.all(hasattr(obj, attr)
                                  for i in baserlib._SUPPORTED_METHODS
                                  for attr in self._GetMethodAttributes(i)))

      # Let metaclass define handler function
      for opcls in [None, object()]:
        obj = self._MakeClass(method, {
          "%s_OPCODE" % method: opcls,
          })(None, {}, None)
        self.assertTrue(callable(getattr(obj, method)))
        self.assertEqual(getattr(obj, "%s_OPCODE" % method), opcls)
        self.assertFalse(hasattr(obj, "%s_RENAME" % method))
        self.assertFalse(compat.any(hasattr(obj, attr)
                                    for i in baserlib._SUPPORTED_METHODS
                                      if i != method
                                    for attr in self._GetMethodAttributes(i)))
Exemplo n.º 15
0
    def test(self):
        for method in baserlib._SUPPORTED_METHODS:
            # Empty handler
            obj = self._MakeClass(method, {})(None, {}, None)
            for m_attr in baserlib.OPCODE_ATTRS:
                for attr in m_attr.GetAll():
                    self.assertFalse(hasattr(obj, attr))

            # Direct handler function
            obj = self._MakeClass(method, {
                method: lambda _: None,
            })(None, {}, None)
            self.assertFalse(
                compat.all(
                    hasattr(obj, attr) for i in baserlib._SUPPORTED_METHODS
                    for attr in self._GetMethodAttributes(i)))

            # Let metaclass define handler function
            for opcls in [None, object()]:
                obj = self._MakeClass(method, {
                    "%s_OPCODE" % method: opcls,
                })(None, {}, None)
                self.assertTrue(callable(getattr(obj, method)))
                self.assertEqual(getattr(obj, "%s_OPCODE" % method), opcls)
                self.assertFalse(hasattr(obj, "%s_RENAME" % method))
                self.assertFalse(
                    compat.any(
                        hasattr(obj, attr) for i in baserlib._SUPPORTED_METHODS
                        if i != method
                        for attr in self._GetMethodAttributes(i)))
Exemplo n.º 16
0
  def testNoHooksLU(self):
    self.lu = FakeNoHooksLU(FakeProc(), self.op, FakeContext(), None)
    self.assertRaises(AssertionError, self.lu.BuildHooksEnv)
    self.assertRaises(AssertionError, self.lu.BuildHooksNodes)

    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
    self.assertEqual(hm.pre_env, {})
    self.assertRaises(IndexError, self._rpcs.pop)

    hm.RunPhase(constants.HOOKS_PHASE_PRE)
    self.assertRaises(IndexError, self._rpcs.pop)

    hm.RunPhase(constants.HOOKS_PHASE_POST)
    self.assertRaises(IndexError, self._rpcs.pop)

    hm.RunConfigUpdate()

    (node_list, hpath, phase, env) = self._rpcs.pop(0)
    self.assertEqual(set(node_list), set([self.lu.cfg.GetMasterNodeName()]))
    self.assertEqual(hpath, constants.HOOKS_NAME_CFGUPDATE)
    self.assertEqual(phase, constants.HOOKS_PHASE_POST)
    self.assertFalse(compat.any(key.startswith("GANETI_POST") for key in env))
    self._CheckEnv(env, constants.HOOKS_PHASE_POST,
                   constants.HOOKS_NAME_CFGUPDATE)
    self.assertRaises(IndexError, self._rpcs.pop)

    assert isinstance(self.lu, FakeNoHooksLU), "LU was replaced"
Exemplo n.º 17
0
  def testParams(self):
    supported_by_all = set(["debug_level", "dry_run", "priority"])

    self.assertTrue(opcodes.BaseOpCode not in opcodes.OP_MAPPING.values())
    self.assertTrue(opcodes.OpCode not in opcodes.OP_MAPPING.values())

    for cls in opcodes.OP_MAPPING.values() + [opcodes.OpCode]:
      all_slots = cls.GetAllSlots()

      self.assertEqual(len(set(all_slots) & supported_by_all), 3,
                       msg=("Opcode %s doesn't support all base"
                            " parameters (%r)" % (cls.OP_ID, supported_by_all)))

      # All opcodes must have OP_PARAMS
      self.assert_(hasattr(cls, "OP_PARAMS"),
                   msg="%s doesn't have OP_PARAMS" % cls.OP_ID)

      param_names = [name for (name, _, _, _) in cls.GetAllParams()]

      self.assertEqual(all_slots, param_names)

      # Without inheritance
      self.assertEqual(cls.__slots__,
                       [name for (name, _, _, _) in cls.OP_PARAMS])

      # This won't work if parameters are converted to a dictionary
      duplicates = utils.FindDuplicates(param_names)
      self.assertFalse(duplicates,
                       msg=("Found duplicate parameters %r in %s" %
                            (duplicates, cls.OP_ID)))

      # Check parameter definitions
      for attr_name, aval, test, doc in cls.GetAllParams():
        self.assert_(attr_name)
        self.assert_(test is None or test is ht.NoType or callable(test),
                     msg=("Invalid type check for %s.%s" %
                          (cls.OP_ID, attr_name)))
        self.assertTrue(doc is None or isinstance(doc, basestring))

        if callable(aval):
          default_value = aval()
          self.assertFalse(callable(default_value),
                           msg=("Default value of %s.%s returned by function"
                                " is callable" % (cls.OP_ID, attr_name)))
        else:
          self.assertFalse(isinstance(aval, (list, dict, set)),
                           msg=("Default value of %s.%s is mutable (%s)" %
                                (cls.OP_ID, attr_name, repr(aval))))

          default_value = aval

        if aval is not ht.NoDefault and test is not ht.NoType:
          self.assertTrue(test(default_value),
                          msg=("Default value of %s.%s does not verify" %
                               (cls.OP_ID, attr_name)))

      # If any parameter has documentation, all others need to have it as well
      has_doc = [doc is not None for (_, _, _, doc) in cls.OP_PARAMS]
      self.assertTrue(not compat.any(has_doc) or compat.all(has_doc),
                      msg="%s does not document all parameters" % cls)
Exemplo n.º 18
0
    def _GetQueryData(self, lu):
        """Computes the list of nodes and their attributes.

    """
        # Locking is not used
        assert not (compat.any(
            lu.glm.is_owned(level)
            for level in locking.LEVELS if level != locking.LEVEL_CLUSTER)
                    or self.do_locking or self.use_locking)

        valid_node_uuids = [
            node.uuid for node in lu.cfg.GetAllNodesInfo().values()
            if not node.offline and node.vm_capable
        ]
        pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_node_uuids))
        cluster = lu.cfg.GetClusterInfo()

        data = {}

        for (os_name, os_data) in pol.items():
            info = query.OsInfo(name=os_name,
                                valid=True,
                                node_status=os_data,
                                hidden=(os_name in cluster.hidden_os),
                                blacklisted=(os_name
                                             in cluster.blacklisted_os))

            variants = set()
            parameters = set()
            api_versions = set()

            for idx, osl in enumerate(os_data.values()):
                info.valid = bool(info.valid and osl and osl[0][1])
                if not info.valid:
                    break

                (node_variants, node_params, node_api) = osl[0][3:6]
                if idx == 0:
                    # First entry
                    variants.update(node_variants)
                    parameters.update(node_params)
                    api_versions.update(node_api)
                else:
                    # Filter out inconsistent values
                    variants.intersection_update(node_variants)
                    parameters.intersection_update(node_params)
                    api_versions.intersection_update(node_api)

            info.variants = list(variants)
            info.parameters = list(parameters)
            info.api_versions = list(api_versions)

            data[os_name] = info

        # Prepare data in requested order
        return [
            data[name] for name in self._GetNames(lu, pol.keys(), None)
            if name in data
        ]
Exemplo n.º 19
0
def _CheckForOfflineNodes(nodes, instance):
  """Checks if given instances has any secondary in offline status.

  @param instance: The instance object
  @return: True if any of the secondary is offline, False otherwise

  """
  return compat.any(nodes[node_name].offline for node_name in instance.snodes)
    def testAcquireAndReleaseInstance(self):
        self.assertFalse(
            compat.any(
                map(operator.attrgetter("used"), self.config["instances"])))

        inst = qa_config.AcquireInstance(_cfg=self.config)
        self.assertTrue(inst.used)
        self.assertTrue(inst.disk_template is None)

        inst.Release()

        self.assertFalse(inst.used)
        self.assertTrue(inst.disk_template is None)

        self.assertFalse(
            compat.any(
                map(operator.attrgetter("used"), self.config["instances"])))
Exemplo n.º 21
0
def _CheckForOfflineNodes(nodes, instance):
  """Checks if given instances has any secondary in offline status.

  @param instance: The instance object
  @return: True if any of the secondary is offline, False otherwise

  """
  return compat.any(nodes[node_name].offline for node_name in instance.snodes)
Exemplo n.º 22
0
  def testEnv(self):
    # Check pre-phase hook
    self.lu.hook_env = {
      "FOO": "pre-foo-value",
      }
    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
    hm.RunPhase(constants.HOOKS_PHASE_PRE)

    (node_list, hpath, phase, env) = self._rpcs.pop(0)
    self.assertEqual(node_list, set(["node_a.example.com"]))
    self.assertEqual(hpath, self.lu.HPATH)
    self.assertEqual(phase, constants.HOOKS_PHASE_PRE)
    self.assertEqual(env["GANETI_FOO"], "pre-foo-value")
    self.assertFalse(compat.any(key.startswith("GANETI_POST") for key in env))
    self._CheckEnv(env, constants.HOOKS_PHASE_PRE, self.lu.HPATH)

    # Check post-phase hook
    self.lu.hook_env = {
      "FOO": "post-value",
      "BAR": 123,
      }
    hm.RunPhase(constants.HOOKS_PHASE_POST)

    (node_list, hpath, phase, env) = self._rpcs.pop(0)
    self.assertEqual(node_list, set(["node_a.example.com"]))
    self.assertEqual(hpath, self.lu.HPATH)
    self.assertEqual(phase, constants.HOOKS_PHASE_POST)
    self.assertEqual(env["GANETI_FOO"], "pre-foo-value")
    self.assertEqual(env["GANETI_POST_FOO"], "post-value")
    self.assertEqual(env["GANETI_POST_BAR"], "123")
    self.assertFalse("GANETI_BAR" in env)
    self._CheckEnv(env, constants.HOOKS_PHASE_POST, self.lu.HPATH)

    self.assertRaises(IndexError, self._rpcs.pop)

    # Check configuration update hook
    hm.RunConfigUpdate()
    (node_list, hpath, phase, env) = self._rpcs.pop(0)
    self.assertEqual(set(node_list), set([self.lu.cfg.GetMasterNodeName()]))
    self.assertEqual(hpath, constants.HOOKS_NAME_CFGUPDATE)
    self.assertEqual(phase, constants.HOOKS_PHASE_POST)
    self._CheckEnv(env, constants.HOOKS_PHASE_POST,
                   constants.HOOKS_NAME_CFGUPDATE)
    self.assertFalse(compat.any(key.startswith("GANETI_POST") for key in env))
    self.assertEqual(env["GANETI_FOO"], "pre-foo-value")
    self.assertRaises(IndexError, self._rpcs.pop)
Exemplo n.º 23
0
  def RegisterLock(self, lock):
    """Registers a new lock.

    """
    logging.debug("Registering lock %s", lock.name)
    assert lock not in self._locks, "Duplicate lock registration"
    assert not compat.any(lock.name == i.name for i in self._locks.keys()), \
           "Found duplicate lock name"
    self._locks[lock] = None
Exemplo n.º 24
0
  def testAcquireNodeNoneAdded(self):
    self.assertFalse(compat.any(n.added for n in self.config["nodes"]))

    # First call must return master node
    node = qa_config.AcquireNode(_cfg=self.config)
    self.assertEqual(node, self.config.GetMasterNode())

    # Next call with exclusion list fails
    self.assertRaises(qa_error.OutOfNodesError, qa_config.AcquireNode,
                      exclude=[node], _cfg=self.config)
Exemplo n.º 25
0
  def testAcquireNodeNoneAdded(self):
    self.assertFalse(compat.any(n.added for n in self.config["nodes"]))

    # First call must return master node
    node = qa_config.AcquireNode(_cfg=self.config)
    self.assertEqual(node, self.config.GetMasterNode())

    # Next call with exclusion list fails
    self.assertRaises(qa_error.OutOfNodesError, qa_config.AcquireNode,
                      exclude=[node], _cfg=self.config)
Exemplo n.º 26
0
    def test(self):
        self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, None)
        self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, "")
        self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, {})
        self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, {"OP_ID": ""})

        for cls in opcodes.OP_MAPPING.values():
            self.assert_(cls.OP_ID.startswith("OP_"))
            self.assert_(len(cls.OP_ID) > 3)
            self.assertEqual(cls.OP_ID, cls.OP_ID.upper())
            self.assertEqual(cls.OP_ID, opcodes_base._NameToId(cls.__name__))
            self.assertFalse(
                compat.any(
                    cls.OP_ID.startswith(prefix)
                    for prefix in opcodes_base.SUMMARY_PREFIX.keys()))
            self.assertTrue(callable(cls.OP_RESULT),
                            msg=("%s should have a result check" % cls.OP_ID))

            self.assertRaises(TypeError,
                              cls,
                              unsupported_parameter="some value")

            args = [
                # No variables
                {},

                # Variables supported by all opcodes
                {
                    "dry_run": False,
                    "debug_level": 0,
                },

                # All variables
                dict([(name, []) for name in cls.GetAllSlots()])
            ]

            for i in args:
                op = cls(**i)

                self.assertEqual(op.OP_ID, cls.OP_ID)
                self._checkSummary(op)

                # Try a restore
                state = op.__getstate__()
                self.assert_(isinstance(state, dict))

                restored = opcodes.OpCode.LoadOpCode(state)
                self.assert_(isinstance(restored, cls))
                self._checkSummary(restored)

                for name in ["x_y_z", "hello_world"]:
                    assert name not in cls.GetAllSlots()
                    for value in [None, True, False, [], "Hello World"]:
                        self.assertRaises(AttributeError, setattr, op, name,
                                          value)
Exemplo n.º 27
0
    def _BuildEnv(self, phase):
        """Compute the environment and the target nodes.

    Based on the opcode and the current node list, this builds the
    environment for the hooks and the target node list for the run.

    """
        if phase == constants.HOOKS_PHASE_PRE:
            prefix = "GANETI_"
        elif phase == constants.HOOKS_PHASE_POST:
            prefix = "GANETI_POST_"
        else:
            raise AssertionError("Unknown phase '%s'" % phase)

        env = {}

        if self.hooks_path is not None:
            phase_env = self.build_env_fn()
            if phase_env:
                assert not compat.any(key.upper().startswith(prefix)
                                      for key in phase_env)
                env.update(("%s%s" % (prefix, key), value)
                           for (key, value) in phase_env.items())

        if phase == constants.HOOKS_PHASE_PRE:
            assert compat.all((key.startswith("GANETI_")
                               and not key.startswith("GANETI_POST_"))
                              for key in env)

        elif phase == constants.HOOKS_PHASE_POST:
            assert compat.all(key.startswith("GANETI_POST_") for key in env)
            assert isinstance(self.pre_env, dict)

            # Merge with pre-phase environment
            assert not compat.any(
                key.startswith("GANETI_POST_") for key in self.pre_env)
            env.update(self.pre_env)
        else:
            raise AssertionError("Unknown phase '%s'" % phase)

        return env
Exemplo n.º 28
0
  def _BuildEnv(self, phase):
    """Compute the environment and the target nodes.

    Based on the opcode and the current node list, this builds the
    environment for the hooks and the target node list for the run.

    """
    if phase == constants.HOOKS_PHASE_PRE:
      prefix = "GANETI_"
    elif phase == constants.HOOKS_PHASE_POST:
      prefix = "GANETI_POST_"
    else:
      raise AssertionError("Unknown phase '%s'" % phase)

    env = {}

    if self.hooks_path is not None:
      phase_env = self.build_env_fn()
      if phase_env:
        assert not compat.any(key.upper().startswith(prefix)
                              for key in phase_env)
        env.update(("%s%s" % (prefix, key), value)
                   for (key, value) in phase_env.items())

    if phase == constants.HOOKS_PHASE_PRE:
      assert compat.all((key.startswith("GANETI_") and
                         not key.startswith("GANETI_POST_"))
                        for key in env)

    elif phase == constants.HOOKS_PHASE_POST:
      assert compat.all(key.startswith("GANETI_POST_") for key in env)
      assert isinstance(self.pre_env, dict)

      # Merge with pre-phase environment
      assert not compat.any(key.startswith("GANETI_POST_")
                            for key in self.pre_env)
      env.update(self.pre_env)
    else:
      raise AssertionError("Unknown phase '%s'" % phase)

    return env
Exemplo n.º 29
0
  def _ValidateName(cls, name):
    """Validates that a given name is valid as VG or LV name.

    The list of valid characters and restricted names is taken out of
    the lvm(8) manpage, with the simplification that we enforce both
    VG and LV restrictions on the names.

    """
    if (not cls._VALID_NAME_RE.match(name) or
        name in cls._INVALID_NAMES or
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
      base.ThrowError("Invalid LVM name '%s'", name)
Exemplo n.º 30
0
  def _ValidateName(cls, name):
    """Validates that a given name is valid as VG or LV name.

    The list of valid characters and restricted names is taken out of
    the lvm(8) manpage, with the simplification that we enforce both
    VG and LV restrictions on the names.

    """
    if (not cls._VALID_NAME_RE.match(name) or
        name in cls._INVALID_NAMES or
        compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
      base.ThrowError("Invalid LVM name '%s'", name)
Exemplo n.º 31
0
  def _GetQueryData(self, lu):
    """Computes the list of nodes and their attributes.

    """
    # Locking is not used
    assert not (compat.any(lu.glm.is_owned(level)
                           for level in locking.LEVELS
                           if level != locking.LEVEL_CLUSTER) or
                self.do_locking or self.use_locking)

    valid_node_uuids = [node.uuid
                        for node in lu.cfg.GetAllNodesInfo().values()
                        if not node.offline and node.vm_capable]
    pol = self._DiagnoseByOS(lu.rpc.call_os_diagnose(valid_node_uuids))
    cluster = lu.cfg.GetClusterInfo()

    data = {}

    for (os_name, os_data) in pol.items():
      info = query.OsInfo(name=os_name, valid=True, node_status=os_data,
                          hidden=(os_name in cluster.hidden_os),
                          blacklisted=(os_name in cluster.blacklisted_os))

      variants = set()
      parameters = set()
      api_versions = set()

      for idx, osl in enumerate(os_data.values()):
        info.valid = bool(info.valid and osl and osl[0][1])
        if not info.valid:
          break

        (node_variants, node_params, node_api) = osl[0][3:6]
        if idx == 0:
          # First entry
          variants.update(node_variants)
          parameters.update(node_params)
          api_versions.update(node_api)
        else:
          # Filter out inconsistent values
          variants.intersection_update(node_variants)
          parameters.intersection_update(node_params)
          api_versions.intersection_update(node_api)

      info.variants = list(variants)
      info.parameters = list(parameters)
      info.api_versions = list(api_versions)

      data[os_name] = info

    # Prepare data in requested order
    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
            if name in data]
Exemplo n.º 32
0
  def test(self):
    self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, None)
    self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, "")
    self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, {})
    self.assertRaises(ValueError, opcodes.OpCode.LoadOpCode, {"OP_ID": ""})

    for cls in opcodes.OP_MAPPING.values():
      self.assert_(cls.OP_ID.startswith("OP_"))
      self.assert_(len(cls.OP_ID) > 3)
      self.assertEqual(cls.OP_ID, cls.OP_ID.upper())
      self.assertEqual(cls.OP_ID, opcodes._NameToId(cls.__name__))
      self.assertFalse(compat.any(cls.OP_ID.startswith(prefix)
                                  for prefix in opcodes._SUMMARY_PREFIX.keys()))
      if cls in MISSING_RESULT_CHECK:
        self.assertTrue(cls.OP_RESULT is None,
                        msg=("%s is listed to not have a result check" %
                             cls.OP_ID))
      else:
        self.assertTrue(callable(cls.OP_RESULT),
                        msg=("%s should have a result check" % cls.OP_ID))

      self.assertRaises(TypeError, cls, unsupported_parameter="some value")

      args = [
        # No variables
        {},

        # Variables supported by all opcodes
        {"dry_run": False, "debug_level": 0, },

        # All variables
        dict([(name, []) for name in cls.GetAllSlots()])
        ]

      for i in args:
        op = cls(**i)

        self.assertEqual(op.OP_ID, cls.OP_ID)
        self._checkSummary(op)

        # Try a restore
        state = op.__getstate__()
        self.assert_(isinstance(state, dict))

        restored = opcodes.OpCode.LoadOpCode(state)
        self.assert_(isinstance(restored, cls))
        self._checkSummary(restored)

        for name in ["x_y_z", "hello_world"]:
          assert name not in cls.GetAllSlots()
          for value in [None, True, False, [], "Hello World"]:
            self.assertRaises(AttributeError, setattr, op, name, value)
Exemplo n.º 33
0
    def testRunConfigUpdateNoPre(self):
        self.lu.hook_env = {"FOO": "value"}

        hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
        hm.RunConfigUpdate()

        (node_list, hpath, phase, env) = self._rpcs.pop(0)
        self.assertEqual(set(node_list), set([self.lu.cfg.GetMasterNodeName()]))
        self.assertEqual(hpath, constants.HOOKS_NAME_CFGUPDATE)
        self.assertEqual(phase, constants.HOOKS_PHASE_POST)
        self.assertEqual(env["GANETI_FOO"], "value")
        self.assertFalse(compat.any(key.startswith("GANETI_POST") for key in env))
        self._CheckEnv(env, constants.HOOKS_PHASE_POST, constants.HOOKS_NAME_CFGUPDATE)

        self.assertRaises(IndexError, self._rpcs.pop)
Exemplo n.º 34
0
  def testRunConfigUpdateNoPre(self):
    self.lu.hook_env = {
      "FOO": "value",
      }

    hm = hooksmaster.HooksMaster.BuildFromLu(self._HooksRpc, self.lu)
    hm.RunConfigUpdate()

    (node_list, hpath, phase, env) = self._rpcs.pop(0)
    self.assertEqual(set(node_list), set([self.lu.cfg.GetMasterNodeName()]))
    self.assertEqual(hpath, constants.HOOKS_NAME_CFGUPDATE)
    self.assertEqual(phase, constants.HOOKS_PHASE_POST)
    self.assertEqual(env["GANETI_FOO"], "value")
    self.assertFalse(compat.any(key.startswith("GANETI_POST") for key in env))
    self._CheckEnv(env, constants.HOOKS_PHASE_POST,
                   constants.HOOKS_NAME_CFGUPDATE)

    self.assertRaises(IndexError, self._rpcs.pop)
Exemplo n.º 35
0
  def testCancelWhileInQueue(self):
    queue = _FakeQueueForProc()

    ops = [opcodes.OpTestDummy(result="Res%s" % i, fail=False)
           for i in range(5)]

    # Create job
    job_id = 17045
    job = self._CreateJob(queue, job_id, ops)

    self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_QUEUED)

    # Mark as cancelled
    (success, _) = job.Cancel()
    self.assert_(success)

    self.assertRaises(IndexError, queue.GetNextUpdate)

    self.assertFalse(job.start_timestamp)
    self.assertTrue(job.end_timestamp)
    self.assert_(compat.all(op.status == constants.OP_STATUS_CANCELED
                            for op in job.ops))

    # Serialize to check for differences
    before_proc = job.Serialize()

    # Simulate processor called in workerpool
    opexec = _FakeExecOpCodeForProc(queue, None, None)
    self.assert_(jqueue._JobProcessor(queue, opexec, job)())

    # Check result
    self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_CANCELED)
    self.assertEqual(job.GetInfo(["status"]), [constants.JOB_STATUS_CANCELED])
    self.assertFalse(job.start_timestamp)
    self.assertTrue(job.end_timestamp)
    self.assertFalse(compat.any(op.start_timestamp or op.end_timestamp
                                for op in job.ops))
    self.assertEqual(job.GetInfo(["opstatus", "opresult"]),
                     [[constants.OP_STATUS_CANCELED for _ in job.ops],
                      ["Job canceled by request" for _ in job.ops]])

    # Must not have changed or written
    self.assertEqual(before_proc, job.Serialize())
    self.assertRaises(IndexError, queue.GetNextUpdate)
Exemplo n.º 36
0
    def CheckArguments(self):
        """Check arguments.

    """
        nodes = []
        for inst in self.op.instances:
            if inst.iallocator is not None:
                raise errors.OpPrereqError(
                    "iallocator are not allowed to be set on"
                    " instance objects", errors.ECODE_INVAL)
            nodes.append(bool(inst.pnode))
            if inst.disk_template in constants.DTS_INT_MIRROR:
                nodes.append(bool(inst.snode))

        has_nodes = compat.any(nodes)
        if compat.all(nodes) ^ has_nodes:
            raise errors.OpPrereqError(
                "There are instance objects providing"
                " pnode/snode while others do not", errors.ECODE_INVAL)

        if not has_nodes and self.op.iallocator is None:
            default_iallocator = self.cfg.GetDefaultIAllocator()
            if default_iallocator:
                self.op.iallocator = default_iallocator
            else:
                raise errors.OpPrereqError(
                    "No iallocator or nodes on the instances"
                    " given and no cluster-wide default"
                    " iallocator found; please specify either"
                    " an iallocator or nodes on the instances"
                    " or set a cluster-wide default iallocator",
                    errors.ECODE_INVAL)

        CheckOpportunisticLocking(self.op)

        dups = utils.FindDuplicates(
            [op.instance_name for op in self.op.instances])
        if dups:
            raise errors.OpPrereqError(
                "There are duplicate instance names: %s" %
                utils.CommaJoin(dups), errors.ECODE_INVAL)
Exemplo n.º 37
0
def _StartGroupChildren(cl, wait):
    """Starts a new instance of the watcher for every node group.

  """
    assert not compat.any(
        arg.startswith(cli.NODEGROUP_OPT_NAME) for arg in sys.argv)

    result = cl.QueryGroups([], ["name", "uuid"], False)

    children = []

    for (idx, (name, uuid)) in enumerate(result):
        if idx > 0:
            # Let's not kill the system
            time.sleep(CHILD_PROCESS_DELAY)

        logging.debug("Spawning child for group %r (%s).", name, uuid)

        signal.signal(signal.SIGCHLD, signal.SIG_IGN)
        try:
            pid = os.fork()
        except OSError:
            logging.exception("Failed to fork for group %r (%s)", name, uuid)

        if pid == 0:
            (options, _) = ParseOptions()
            options.nodegroup = uuid
            _GroupWatcher(options)
            return
        else:
            logging.debug("Started with PID %s", pid)
            children.append(pid)

    if wait:
        for child in children:
            logging.debug("Waiting for child PID %s", child)
            try:
                result = utils.RetryOnSignal(os.waitpid, child, 0)
            except EnvironmentError as err:
                result = str(err)
            logging.debug("Child PID %s exited with status %s", child, result)
Exemplo n.º 38
0
def _StartGroupChildren(cl, wait):
  """Starts a new instance of the watcher for every node group.

  """
  assert not compat.any(arg.startswith(cli.NODEGROUP_OPT_NAME)
                        for arg in sys.argv)

  result = cl.QueryGroups([], ["name", "uuid"], False)

  children = []

  for (idx, (name, uuid)) in enumerate(result):
    if idx > 0:
      # Let's not kill the system
      time.sleep(CHILD_PROCESS_DELAY)

    logging.debug("Spawning child for group %r (%s).", name, uuid)

    signal.signal(signal.SIGCHLD, signal.SIG_IGN)
    try:
      pid = os.fork()
    except OSError:
      logging.exception("Failed to fork for group %r (%s)", name, uuid)

    if pid == 0:
      (options, _) = ParseOptions()
      options.nodegroup = uuid
      _GroupWatcher(options)
      return
    else:
      logging.debug("Started with PID %s", pid)
      children.append(pid)

  if wait:
    for child in children:
      logging.debug("Waiting for child PID %s", child)
      try:
        result = utils.RetryOnSignal(os.waitpid, child, 0)
      except EnvironmentError as err:
        result = str(err)
      logging.debug("Child PID %s exited with status %s", child, result)
Exemplo n.º 39
0
  def testCancelWhileWaitlockInQueue(self):
    queue = _FakeQueueForProc()

    ops = [opcodes.OpTestDummy(result="Res%s" % i, fail=False)
           for i in range(5)]

    # Create job
    job_id = 8645
    job = self._CreateJob(queue, job_id, ops)

    self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_QUEUED)

    job.ops[0].status = constants.OP_STATUS_WAITLOCK

    assert len(job.ops) == 5

    self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_WAITLOCK)

    # Mark as cancelling
    (success, _) = job.Cancel()
    self.assert_(success)

    self.assertRaises(IndexError, queue.GetNextUpdate)

    self.assert_(compat.all(op.status == constants.OP_STATUS_CANCELING
                            for op in job.ops))

    opexec = _FakeExecOpCodeForProc(queue, None, None)
    self.assert_(jqueue._JobProcessor(queue, opexec, job)())

    # Check result
    self.assertEqual(job.CalcStatus(), constants.JOB_STATUS_CANCELED)
    self.assertEqual(job.GetInfo(["status"]), [constants.JOB_STATUS_CANCELED])
    self.assertFalse(job.start_timestamp)
    self.assert_(job.end_timestamp)
    self.assertFalse(compat.any(op.start_timestamp or op.end_timestamp
                                for op in job.ops))
    self.assertEqual(job.GetInfo(["opstatus", "opresult"]),
                     [[constants.OP_STATUS_CANCELED for _ in job.ops],
                      ["Job canceled by request" for _ in job.ops]])
Exemplo n.º 40
0
def _StartGroupChildren(cl, wait):
  """Starts a new instance of the watcher for every node group.

  """
  assert not compat.any(arg.startswith(cli.NODEGROUP_OPT_NAME)
                        for arg in sys.argv)

  result = cl.QueryGroups([], ["name", "uuid"], False)

  children = []

  for (idx, (name, uuid)) in enumerate(result):
    args = sys.argv + [cli.NODEGROUP_OPT_NAME, uuid]

    if idx > 0:
      # Let's not kill the system
      time.sleep(CHILD_PROCESS_DELAY)

    logging.debug("Spawning child for group '%s' (%s), arguments %s",
                  name, uuid, args)

    try:
      # TODO: Should utils.StartDaemon be used instead?
      pid = os.spawnv(os.P_NOWAIT, args[0], args)
    except Exception: # pylint: disable=W0703
      logging.exception("Failed to start child for group '%s' (%s)",
                        name, uuid)
    else:
      logging.debug("Started with PID %s", pid)
      children.append(pid)

  if wait:
    for pid in children:
      logging.debug("Waiting for child PID %s", pid)
      try:
        result = utils.RetryOnSignal(os.waitpid, pid, 0)
      except EnvironmentError, err:
        result = str(err)

      logging.debug("Child PID %s exited with status %s", pid, result)
Exemplo n.º 41
0
def _StartGroupChildren(cl, wait):
    """Starts a new instance of the watcher for every node group.

  """
    assert not compat.any(
        arg.startswith(cli.NODEGROUP_OPT_NAME) for arg in sys.argv)

    result = cl.QueryGroups([], ["name", "uuid"], False)

    children = []

    for (idx, (name, uuid)) in enumerate(result):
        args = sys.argv + [cli.NODEGROUP_OPT_NAME, uuid]

        if idx > 0:
            # Let's not kill the system
            time.sleep(CHILD_PROCESS_DELAY)

        logging.debug("Spawning child for group '%s' (%s), arguments %s", name,
                      uuid, args)

        try:
            # TODO: Should utils.StartDaemon be used instead?
            pid = os.spawnv(os.P_NOWAIT, args[0], args)
        except Exception:  # pylint: disable=W0703
            logging.exception("Failed to start child for group '%s' (%s)",
                              name, uuid)
        else:
            logging.debug("Started with PID %s", pid)
            children.append(pid)

    if wait:
        for pid in children:
            logging.debug("Waiting for child PID %s", pid)
            try:
                result = utils.RetryOnSignal(os.waitpid, pid, 0)
            except EnvironmentError, err:
                result = str(err)

            logging.debug("Child PID %s exited with status %s", pid, result)
Exemplo n.º 42
0
  def _GetQueryData(self, lu):
    """Computes the list of nodes and their attributes.

    """
    # Locking is not used
    # TODO
    assert not (compat.any(lu.glm.is_owned(level)
                           for level in locking.LEVELS
                           if level != locking.LEVEL_CLUSTER) or
                self.do_locking or self.use_locking)

    node_uuids = self._GetNames(lu, lu.cfg.GetNodeList(), locking.LEVEL_NODE)

    result = []
    for (node_uuid, nres) in lu.rpc.call_export_list(node_uuids).items():
      node = lu.cfg.GetNodeInfo(node_uuid)
      if nres.fail_msg:
        result.append((node.name, None))
      else:
        result.extend((node.name, expname) for expname in nres.payload)

    return result
Exemplo n.º 43
0
  def CheckArguments(self):
    """Check arguments.

    """
    nodes = []
    for inst in self.op.instances:
      if inst.iallocator is not None:
        raise errors.OpPrereqError("iallocator are not allowed to be set on"
                                   " instance objects", errors.ECODE_INVAL)
      nodes.append(bool(inst.pnode))
      if inst.disk_template in constants.DTS_INT_MIRROR:
        nodes.append(bool(inst.snode))

    has_nodes = compat.any(nodes)
    if compat.all(nodes) ^ has_nodes:
      raise errors.OpPrereqError("There are instance objects providing"
                                 " pnode/snode while others do not",
                                 errors.ECODE_INVAL)

    if not has_nodes and self.op.iallocator is None:
      default_iallocator = self.cfg.GetDefaultIAllocator()
      if default_iallocator:
        self.op.iallocator = default_iallocator
      else:
        raise errors.OpPrereqError("No iallocator or nodes on the instances"
                                   " given and no cluster-wide default"
                                   " iallocator found; please specify either"
                                   " an iallocator or nodes on the instances"
                                   " or set a cluster-wide default iallocator",
                                   errors.ECODE_INVAL)

    CheckOpportunisticLocking(self.op)

    dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
    if dups:
      raise errors.OpPrereqError("There are duplicate instance names: %s" %
                                 utils.CommaJoin(dups), errors.ECODE_INVAL)
  def testGetStdPvSize(self):
    """Test cases for bdev.LogicalVolume._GetStdPvSize()"""
    rnd = random.Random(9517)
    for _ in range(0, 50):
      # Identical volumes
      pvi = self._GenerateRandomPvInfo(rnd, "disk", "myvg")
      onesize = bdev.LogicalVolume._GetStdPvSize([pvi])
      self.assertTrue(onesize <= pvi.size)
      self.assertTrue(onesize > pvi.size * (1 - self._MARGIN))
      for length in range(2, 10):
        n_size = bdev.LogicalVolume._GetStdPvSize([pvi] * length)
        self.assertEqual(onesize, n_size)

      # Mixed volumes
      for length in range(1, 10):
        pvlist = [self._GenerateRandomPvInfo(rnd, "disk", "myvg")
                  for _ in range(0, length)]
        std_size = bdev.LogicalVolume._GetStdPvSize(pvlist)
        self.assertTrue(compat.all(std_size <= pvi.size for pvi in pvlist))
        self.assertTrue(compat.any(std_size > pvi.size * (1 - self._MARGIN)
                                   for pvi in pvlist))
        pvlist.append(pvlist[0])
        p1_size = bdev.LogicalVolume._GetStdPvSize(pvlist)
        self.assertEqual(std_size, p1_size)
Exemplo n.º 45
0
    def test(self):

        # The commands various compressions should use
        compress_import = {
            constants.IEC_GZIP: "gzip -d",
            constants.IEC_GZIP_FAST: "gzip -d",
            constants.IEC_GZIP_SLOW: "gzip -d",
            constants.IEC_LZOP: "lzop -d",
        }
        compress_export = {
            constants.IEC_GZIP: "gzip -1",
            constants.IEC_GZIP_FAST: "gzip -1",
            constants.IEC_GZIP_SLOW: "gzip",
            constants.IEC_LZOP: "lzop",
        }

        for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
            if mode == constants.IEM_IMPORT:
                compress_dict = compress_import
            elif mode == constants.IEM_EXPORT:
                compress_dict = compress_export

            for compress in constants.IEC_ALL:
                for magic in [
                        None, 10 * "-", "HelloWorld", "J9plh4nFo2",
                        "24A02A81-2264-4B51-A882-A2AB9D85B420"
                ]:
                    opts = CmdBuilderConfig(magic=magic, compress=compress)
                    builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)

                    magic_cmd = builder._GetMagicCommand()
                    dd_cmd = builder._GetDdCommand()

                    if magic:
                        self.assert_(("M=%s" % magic) in magic_cmd)
                        self.assert_(("M=%s" % magic) in dd_cmd)
                    else:
                        self.assertFalse(magic_cmd)

                for host in ["localhost", "198.51.100.4", "192.0.2.99"]:
                    for port in [0, 1, 1234, 7856, 45452]:
                        for cmd_prefix in [
                                None, "PrefixCommandGoesHere|",
                                "dd if=/dev/hda bs=1048576 |"
                        ]:
                            for cmd_suffix in [
                                    None, "< /some/file/name",
                                    "| dd of=/dev/null"
                            ]:
                                opts = CmdBuilderConfig(host=host,
                                                        port=port,
                                                        compress=compress,
                                                        cmd_prefix=cmd_prefix,
                                                        cmd_suffix=cmd_suffix)

                                builder = impexpd.CommandBuilder(
                                    mode, opts, 1, 2, 3)

                                # Check complete command
                                cmd = builder.GetCommand()
                                self.assert_(isinstance(cmd, list))

                                if compress != constants.IEC_NONE:
                                    self.assert_(
                                        CheckCmdWord(cmd,
                                                     compress_dict[compress]))

                                if cmd_prefix is not None:
                                    self.assert_(
                                        compat.any(cmd_prefix in i
                                                   for i in cmd))

                                if cmd_suffix is not None:
                                    self.assert_(
                                        compat.any(cmd_suffix in i
                                                   for i in cmd))

                                # Check socat command
                                socat_cmd = builder._GetSocatCommand()

                                if mode == constants.IEM_IMPORT:
                                    ssl_addr = socat_cmd[-2].split(",")
                                    self.assert_(("OPENSSL-LISTEN:%s" %
                                                  port) in ssl_addr)
                                elif mode == constants.IEM_EXPORT:
                                    ssl_addr = socat_cmd[-1].split(",")
                                    self.assert_(("OPENSSL:%s:%s" %
                                                  (host, port)) in ssl_addr)
                                    if impexpd.CommandBuilder._GetSocatVersion(
                                    ) >= (1, 7, 3):
                                        self.assert_(
                                            "openssl-commonname=%s" %
                                            constants.X509_CERT_CN in ssl_addr)
                                    else:
                                        self.assert_("openssl-commonname=%s" %
                                                     constants.X509_CERT_CN
                                                     not in ssl_addr)

                                self.assert_("verify=1" in ssl_addr)
Exemplo n.º 46
0
    def testPriority(self):
        # Acquire in exclusive mode
        self.assert_(self.sl.acquire(shared=0))

        # Queue acquires
        def _Acquire(prev, next, shared, priority, result):
            prev.wait()
            self.sl.acquire(shared=shared,
                            priority=priority,
                            test_notify=next.set)
            try:
                self.done.put(result)
            finally:
                self.sl.release()

        counter = itertools.count(0)
        priorities = range(-20, 30)
        first = threading.Event()
        prev = first

        # Data structure:
        # {
        #   priority:
        #     [(shared/exclusive, set(acquire names), set(pending threads)),
        #      (shared/exclusive, ...),
        #      ...,
        #     ],
        # }
        perprio = {}

        # References shared acquire per priority in L{perprio}. Data structure:
        # {
        #   priority: (shared=1, set(acquire names), set(pending threads)),
        # }
        prioshared = {}

        for seed in [4979, 9523, 14902, 32440]:
            # Use a deterministic random generator
            rnd = random.Random(seed)
            for priority in [rnd.choice(priorities) for _ in range(30)]:
                modes = [0, 1]
                rnd.shuffle(modes)
                for shared in modes:
                    # Unique name
                    acqname = "%s/shr=%s/prio=%s" % (next(counter), shared,
                                                     priority)

                    ev = threading.Event()
                    thread = self._addThread(target=_Acquire,
                                             args=(prev, ev, shared, priority,
                                                   acqname))
                    prev = ev

                    # Record expected aqcuire, see above for structure
                    data = (shared, set([acqname]), set([thread]))
                    priolist = perprio.setdefault(priority, [])
                    if shared:
                        priosh = prioshared.get(priority, None)
                        if priosh:
                            # Shared acquires are merged
                            for i, j in zip(priosh[1:], data[1:]):
                                i.update(j)
                            assert data[0] == priosh[0]
                        else:
                            prioshared[priority] = data
                            priolist.append(data)
                    else:
                        priolist.append(data)

        # Start all acquires and wait for them
        first.set()
        prev.wait()

        # Check lock information
        self.assertEqual(self.sl.GetLockInfo(set()),
                         [(self.sl.name, None, None, None)])
        self.assertEqual(
            self.sl.GetLockInfo(set([query.LQ_MODE, query.LQ_OWNER])),
            [(self.sl.name, "exclusive", [threading.currentThread().getName()
                                          ], None)])

        self._VerifyPrioPending(self.sl.GetLockInfo(set([query.LQ_PENDING])),
                                perprio)

        # Let threads acquire the lock
        self.sl.release()

        # Wait for everything to finish
        self._waitThreads()

        self.assert_(self.sl._check_empty())

        # Check acquires by priority
        for acquires in [perprio[i] for i in sorted(perprio.keys())]:
            for (_, names, _) in acquires:
                # For shared acquires, the set will contain 1..n entries. For exclusive
                # acquires only one.
                while names:
                    names.remove(self.done.get_nowait())
            self.assertFalse(compat.any(names for (_, names, _) in acquires))

        self.assertRaises(Queue.Empty, self.done.get_nowait)
Exemplo n.º 47
0
def CheckCmdWord(cmd, word):
    wre = re.compile(r"\b%s\b" % re.escape(word))
    return compat.any(wre.search(i) for i in cmd)
Exemplo n.º 48
0
  def Run(self):
    """Utility main loop.

    """
    while True:
      self._AddPendingToQueue()

      # Collect all active daemon names
      daemons = self._GetActiveDaemonNames(self._queue)
      if not daemons:
        break

      # Collection daemon status data
      data = self._CollectDaemonStatus(self._lu, daemons)

      # Use data
      delay = self.MAX_DELAY
      for diskie in self._queue:
        if not diskie.active:
          continue

        try:
          try:
            all_daemon_data = data[diskie.node_name]
          except KeyError:
            result = diskie.SetDaemonData(False, None)
          else:
            result = \
              diskie.SetDaemonData(True,
                                   all_daemon_data[diskie.GetDaemonName()])

          if not result:
            # Daemon not yet ready, retry soon
            delay = min(3.0, delay)
            continue

          if diskie.CheckFinished():
            # Transfer finished
            diskie.Finalize()
            continue

          # Normal case: check again in 5 seconds
          delay = min(5.0, delay)

          if not diskie.CheckListening():
            # Not yet listening, retry soon
            delay = min(1.0, delay)
            continue

          if not diskie.CheckConnected():
            # Not yet connected, retry soon
            delay = min(1.0, delay)
            continue

        except _ImportExportError, err:
          logging.exception("%s failed", diskie.MODE_TEXT)
          diskie.Finalize(error=str(err))

      if not compat.any(diskie.active for diskie in self._queue):
        break

      # Wait a bit
      delay = min(self.MAX_DELAY, max(self.MIN_DELAY, delay))
      logging.debug("Waiting for %ss", delay)
      time.sleep(delay)
Exemplo n.º 49
0
    def testParams(self):
        supported_by_all = set(["debug_level", "dry_run", "priority"])

        self.assertTrue(
            opcodes_base.BaseOpCode not in opcodes.OP_MAPPING.values())
        self.assertTrue(opcodes.OpCode not in opcodes.OP_MAPPING.values())

        for cls in opcodes.OP_MAPPING.values() + [opcodes.OpCode]:
            all_slots = cls.GetAllSlots()

            self.assertEqual(len(set(all_slots) & supported_by_all),
                             3,
                             msg=("Opcode %s doesn't support all base"
                                  " parameters (%r)" %
                                  (cls.OP_ID, supported_by_all)))

            # All opcodes must have OP_PARAMS
            self.assert_(hasattr(cls, "OP_PARAMS"),
                         msg="%s doesn't have OP_PARAMS" % cls.OP_ID)

            param_names = [name for (name, _, _, _) in cls.GetAllParams()]

            self.assertEqual(all_slots, param_names)

            # Without inheritance
            self.assertEqual(cls.__slots__,
                             [name for (name, _, _, _) in cls.OP_PARAMS])

            # This won't work if parameters are converted to a dictionary
            duplicates = utils.FindDuplicates(param_names)
            self.assertFalse(duplicates,
                             msg=("Found duplicate parameters %r in %s" %
                                  (duplicates, cls.OP_ID)))

            # Check parameter definitions
            for attr_name, aval, test, doc in cls.GetAllParams():
                self.assert_(attr_name)
                self.assertTrue(callable(test),
                                msg=("Invalid type check for %s.%s" %
                                     (cls.OP_ID, attr_name)))
                self.assertTrue(doc is None or isinstance(doc, basestring))

                if callable(aval):
                    default_value = aval()
                    self.assertFalse(
                        callable(default_value),
                        msg=("Default value of %s.%s returned by function"
                             " is callable" % (cls.OP_ID, attr_name)))
                else:
                    default_value = aval

                if aval is not ht.NoDefault and aval is not None:
                    self.assertTrue(
                        test(default_value),
                        msg=("Default value of %s.%s does not verify" %
                             (cls.OP_ID, attr_name)))

            # If any parameter has documentation, all others need to have it as well
            has_doc = [doc is not None for (_, _, _, doc) in cls.OP_PARAMS]
            self.assertTrue(not compat.any(has_doc) or compat.all(has_doc),
                            msg="%s does not document all parameters" % cls)
Exemplo n.º 50
0
  def Create(cls, unique_id, children, size, spindles, params, excl_stor,
             dyn_params, **kwargs):
    """Create a new logical volume.

    """
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
      raise errors.ProgrammerError("Invalid configuration data %s" %
                                   str(unique_id))
    vg_name, lv_name = unique_id
    cls._ValidateName(vg_name)
    cls._ValidateName(lv_name)
    pvs_info = cls.GetPVInfo([vg_name])
    if not pvs_info:
      if excl_stor:
        msg = "No (empty) PVs found"
      else:
        msg = "Can't compute PV info for vg %s" % vg_name
      base.ThrowError(msg)
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)

    pvlist = [pv.name for pv in pvs_info]
    if compat.any(":" in v for v in pvlist):
      base.ThrowError("Some of your PVs have the invalid character ':' in their"
                      " name, this is not supported - please filter them out"
                      " in lvm.conf using either 'filter' or 'preferred_names'")

    current_pvs = len(pvlist)
    desired_stripes = params[constants.LDP_STRIPES]
    stripes = min(current_pvs, desired_stripes)

    if excl_stor:
      if spindles is None:
        base.ThrowError("Unspecified number of spindles: this is required"
                        "when exclusive storage is enabled, try running"
                        " gnt-cluster repair-disk-sizes")
      (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
      if err_msgs:
        for m in err_msgs:
          logging.warning(m)
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
      if spindles < req_pvs:
        base.ThrowError("Requested number of spindles (%s) is not enough for"
                        " a disk of %d MB (at least %d spindles needed)",
                        spindles, size, req_pvs)
      else:
        req_pvs = spindles
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
      current_pvs = len(pvlist)
      if current_pvs < req_pvs:
        base.ThrowError("Not enough empty PVs (spindles) to create a disk of %d"
                        " MB: %d available, %d needed",
                        size, current_pvs, req_pvs)
      assert current_pvs == len(pvlist)
      # We must update stripes to be sure to use all the desired spindles
      stripes = current_pvs
      if stripes > desired_stripes:
        # Don't warn when lowering stripes, as it's no surprise
        logging.warning("Using %s stripes instead of %s, to be able to use"
                        " %s spindles", stripes, desired_stripes, current_pvs)

    else:
      if stripes < desired_stripes:
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
                        " available.", desired_stripes, vg_name, current_pvs)
      free_size = sum([pv.free for pv in pvs_info])
      # The size constraint should have been checked from the master before
      # calling the create function.
      if free_size < size:
        base.ThrowError("Not enough free space: required %s,"
                        " available %s", size, free_size)

    # If the free space is not well distributed, we won't be able to
    # create an optimally-striped volume; in that case, we want to try
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
    # stripes
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
    for stripes_arg in range(stripes, 0, -1):
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
      if not result.failed:
        break
    if result.failed:
      base.ThrowError("LV create failed (%s): %s",
                      result.fail_reason, result.output)
    return LogicalVolume(unique_id, children, size, params,
                         dyn_params, **kwargs)
Exemplo n.º 51
0
  def Run(self):
    """Utility main loop.

    """
    while True:
      self._AddPendingToQueue()

      # Collect all active daemon names
      daemons = self._GetActiveDaemonNames(self._queue)
      if not daemons:
        break

      # Collection daemon status data
      data = self._CollectDaemonStatus(self._lu, daemons)

      # Use data
      delay = self.MAX_DELAY
      for diskie in self._queue:
        if not diskie.active:
          continue

        try:
          try:
            all_daemon_data = data[diskie.node_name]
          except KeyError:
            result = diskie.SetDaemonData(False, None)
          else:
            result = \
              diskie.SetDaemonData(True,
                                   all_daemon_data[diskie.GetDaemonName()])

          if not result:
            # Daemon not yet ready, retry soon
            delay = min(3.0, delay)
            continue

          if diskie.CheckFinished():
            # Transfer finished
            diskie.Finalize()
            continue

          # Normal case: check again in 5 seconds
          delay = min(5.0, delay)

          if not diskie.CheckListening():
            # Not yet listening, retry soon
            delay = min(1.0, delay)
            continue

          if not diskie.CheckConnected():
            # Not yet connected, retry soon
            delay = min(1.0, delay)
            continue

        except _ImportExportError, err:
          logging.exception("%s failed", diskie.MODE_TEXT)
          diskie.Finalize(error=str(err))

      if not compat.any(diskie.active for diskie in self._queue):
        break

      # Wait a bit
      delay = min(self.MAX_DELAY, max(self.MIN_DELAY, delay))
      logging.debug("Waiting for %ss", delay)
      time.sleep(delay)
Exemplo n.º 52
0
    def test(self):
        for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]:
            if mode == constants.IEM_IMPORT:
                comprcmd = "gunzip"
            elif mode == constants.IEM_EXPORT:
                comprcmd = "gzip"

            for compress in [constants.IEC_NONE, constants.IEC_GZIP]:
                for magic in [
                        None, 10 * "-", "HelloWorld", "J9plh4nFo2",
                        "24A02A81-2264-4B51-A882-A2AB9D85B420"
                ]:
                    opts = CmdBuilderConfig(magic=magic, compress=compress)
                    builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3)

                    magic_cmd = builder._GetMagicCommand()
                    dd_cmd = builder._GetDdCommand()

                    if magic:
                        self.assert_(("M=%s" % magic) in magic_cmd)
                        self.assert_(("M=%s" % magic) in dd_cmd)
                    else:
                        self.assertFalse(magic_cmd)

                for host in ["localhost", "198.51.100.4", "192.0.2.99"]:
                    for port in [0, 1, 1234, 7856, 45452]:
                        for cmd_prefix in [
                                None, "PrefixCommandGoesHere|",
                                "dd if=/dev/hda bs=1048576 |"
                        ]:
                            for cmd_suffix in [
                                    None, "< /some/file/name",
                                    "| dd of=/dev/null"
                            ]:
                                opts = CmdBuilderConfig(host=host,
                                                        port=port,
                                                        compress=compress,
                                                        cmd_prefix=cmd_prefix,
                                                        cmd_suffix=cmd_suffix)

                                builder = impexpd.CommandBuilder(
                                    mode, opts, 1, 2, 3)

                                # Check complete command
                                cmd = builder.GetCommand()
                                self.assert_(isinstance(cmd, list))

                                if compress == constants.IEC_GZIP:
                                    self.assert_(CheckCmdWord(cmd, comprcmd))

                                if cmd_prefix is not None:
                                    self.assert_(
                                        compat.any(cmd_prefix in i
                                                   for i in cmd))

                                if cmd_suffix is not None:
                                    self.assert_(
                                        compat.any(cmd_suffix in i
                                                   for i in cmd))

                                # Check socat command
                                socat_cmd = builder._GetSocatCommand()

                                if mode == constants.IEM_IMPORT:
                                    ssl_addr = socat_cmd[-2].split(",")
                                    self.assert_(("OPENSSL-LISTEN:%s" %
                                                  port) in ssl_addr)
                                elif mode == constants.IEM_EXPORT:
                                    ssl_addr = socat_cmd[-1].split(",")
                                    self.assert_(("OPENSSL:%s:%s" %
                                                  (host, port)) in ssl_addr)

                                self.assert_("verify=1" in ssl_addr)
Exemplo n.º 53
0
  def Create(cls, unique_id, children, size, spindles, params, excl_stor,
             dyn_params, *args):
    """Create a new logical volume.

    """
    if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
      raise errors.ProgrammerError("Invalid configuration data %s" %
                                   str(unique_id))
    vg_name, lv_name = unique_id
    cls._ValidateName(vg_name)
    cls._ValidateName(lv_name)
    pvs_info = cls.GetPVInfo([vg_name])
    if not pvs_info:
      if excl_stor:
        msg = "No (empty) PVs found"
      else:
        msg = "Can't compute PV info for vg %s" % vg_name
      base.ThrowError(msg)
    pvs_info.sort(key=(lambda pv: pv.free), reverse=True)

    pvlist = [pv.name for pv in pvs_info]
    if compat.any(":" in v for v in pvlist):
      base.ThrowError("Some of your PVs have the invalid character ':' in their"
                      " name, this is not supported - please filter them out"
                      " in lvm.conf using either 'filter' or 'preferred_names'")

    current_pvs = len(pvlist)
    desired_stripes = params[constants.LDP_STRIPES]
    stripes = min(current_pvs, desired_stripes)

    if excl_stor:
      if spindles is None:
        base.ThrowError("Unspecified number of spindles: this is required"
                        "when exclusive storage is enabled, try running"
                        " gnt-cluster repair-disk-sizes")
      (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
      if err_msgs:
        for m in err_msgs:
          logging.warning(m)
      req_pvs = cls._ComputeNumPvs(size, pvs_info)
      if spindles < req_pvs:
        base.ThrowError("Requested number of spindles (%s) is not enough for"
                        " a disk of %d MB (at least %d spindles needed)",
                        spindles, size, req_pvs)
      else:
        req_pvs = spindles
      pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
      current_pvs = len(pvlist)
      if current_pvs < req_pvs:
        base.ThrowError("Not enough empty PVs (spindles) to create a disk of %d"
                        " MB: %d available, %d needed",
                        size, current_pvs, req_pvs)
      assert current_pvs == len(pvlist)
      # We must update stripes to be sure to use all the desired spindles
      stripes = current_pvs
      if stripes > desired_stripes:
        # Don't warn when lowering stripes, as it's no surprise
        logging.warning("Using %s stripes instead of %s, to be able to use"
                        " %s spindles", stripes, desired_stripes, current_pvs)

    else:
      if stripes < desired_stripes:
        logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
                        " available.", desired_stripes, vg_name, current_pvs)
      free_size = sum([pv.free for pv in pvs_info])
      # The size constraint should have been checked from the master before
      # calling the create function.
      if free_size < size:
        base.ThrowError("Not enough free space: required %s,"
                        " available %s", size, free_size)

    # If the free space is not well distributed, we won't be able to
    # create an optimally-striped volume; in that case, we want to try
    # with N, N-1, ..., 2, and finally 1 (non-stripped) number of
    # stripes
    cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
    for stripes_arg in range(stripes, 0, -1):
      result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
      if not result.failed:
        break
    if result.failed:
      base.ThrowError("LV create failed (%s): %s",
                      result.fail_reason, result.output)
    return LogicalVolume(unique_id, children, size, params, dyn_params, *args)
Exemplo n.º 54
0
    def BurninCluster(self):
        """Test a cluster intensively.

    This will create instances and then start/stop/failover them.
    It is safe for existing instances but could impact performance.

    """

        Log("Testing global parameters")

        if (len(self.nodes) == 1 and self.opts.disk_template
                not in _SINGLE_NODE_DISK_TEMPLATES):
            Err("When one node is available/selected the disk template must"
                " be one of %s" % utils.CommaJoin(_SINGLE_NODE_DISK_TEMPLATES))

        if self.opts.do_confd_tests and not constants.ENABLE_CONFD:
            Err("You selected confd tests but confd was disabled at configure time"
                )

        has_err = True
        try:
            self.BurnCreateInstances()

            if self.bep[constants.BE_MINMEM] < self.bep[constants.BE_MAXMEM]:
                self.BurnModifyRuntimeMemory()

            if self.opts.do_replace1 and \
                 self.opts.disk_template in constants.DTS_INT_MIRROR:
                self.BurnReplaceDisks1D8()
            if (self.opts.do_replace2 and len(self.nodes) > 2
                    and self.opts.disk_template in constants.DTS_INT_MIRROR):
                self.BurnReplaceDisks2()

            if (self.opts.disk_template in constants.DTS_GROWABLE
                    and compat.any(n > 0 for n in self.disk_growth)):
                self.BurnGrowDisks()

            if self.opts.do_failover and \
                 self.opts.disk_template in constants.DTS_MIRRORED:
                self.BurnFailover()

            if self.opts.do_migrate:
                if self.opts.disk_template not in constants.DTS_MIRRORED:
                    Log(
                        "Skipping migration (disk template %s does not support it)",
                        self.opts.disk_template)
                elif not self.hv_can_migrate:
                    Log(
                        "Skipping migration (hypervisor %s does not support it)",
                        self.hypervisor)
                else:
                    self.BurnMigrate()

            if (self.opts.do_move and len(self.nodes) > 1
                    and self.opts.disk_template
                    in [constants.DT_PLAIN, constants.DT_FILE]):
                self.BurnMove()

            if (self.opts.do_importexport
                    and self.opts.disk_template in _IMPEXP_DISK_TEMPLATES):
                self.BurnImportExport()

            if self.opts.do_reinstall:
                self.BurnReinstall()

            if self.opts.do_reboot:
                self.BurnReboot()

            if self.opts.do_renamesame:
                self.BurnRenameSame()

            if self.opts.do_addremove_disks:
                self.BurnAddRemoveDisks()

            default_nic_mode = self.cluster_default_nicparams[
                constants.NIC_MODE]
            # Don't add/remove nics in routed mode, as we would need an ip to add
            # them with
            if self.opts.do_addremove_nics:
                if default_nic_mode == constants.NIC_MODE_BRIDGED:
                    self.BurnAddRemoveNICs()
                else:
                    Log("Skipping nic add/remove as the cluster is not in bridged mode"
                        )

            if self.opts.do_activate_disks:
                self.BurnActivateDisks()

            if self.opts.rename:
                self.BurnRename()

            if self.opts.do_confd_tests:
                self.BurnConfd()

            if self.opts.do_startstop:
                self.BurnStopStart()

            has_err = False
        finally:
            if has_err:
                Log("Error detected: opcode buffer follows:\n\n")
                Log(self.GetFeedbackBuf())
                Log("\n\n")
            if not self.opts.keep_instances:
                try:
                    self.BurnRemove()
                except Exception, err:  # pylint: disable=W0703
                    if has_err:  # already detected errors, so errors in removal
                        # are quite expected
                        Log("Note: error detected during instance remove: %s",
                            err)
                    else:  # non-expected error
                        raise
Exemplo n.º 55
0
 def fn(val):
     return compat.any(t(val) for t in args)
Exemplo n.º 56
0
 def fn(val):
   return compat.any(t(val) for t in args)
Exemplo n.º 57
0
  def BurninCluster(self):
    """Test a cluster intensively.

    This will create instances and then start/stop/failover them.
    It is safe for existing instances but could impact performance.

    """

    Log("Testing global parameters")

    if (len(self.nodes) == 1 and
        self.opts.disk_template not in _SINGLE_NODE_DISK_TEMPLATES):
      Err("When one node is available/selected the disk template must"
          " be one of %s" % utils.CommaJoin(_SINGLE_NODE_DISK_TEMPLATES))

    has_err = True
    try:
      self.BurnCreateInstances()

      if self.opts.do_startstop:
        self.BurnStopStart()

      if self.bep[constants.BE_MINMEM] < self.bep[constants.BE_MAXMEM]:
        self.BurnModifyRuntimeMemory()

      if self.opts.do_replace1 and \
           self.opts.disk_template in constants.DTS_INT_MIRROR:
        self.BurnReplaceDisks1D8()
      if (self.opts.do_replace2 and len(self.nodes) > 2 and
          self.opts.disk_template in constants.DTS_INT_MIRROR):
        self.BurnReplaceDisks2()

      if (self.opts.disk_template in constants.DTS_GROWABLE and
          compat.any(n > 0 for n in self.disk_growth)):
        self.BurnGrowDisks()

      if self.opts.do_failover and \
           self.opts.disk_template in constants.DTS_MIRRORED:
        self.BurnFailover()

      if self.opts.do_migrate:
        if self.opts.disk_template not in constants.DTS_MIRRORED:
          Log("Skipping migration (disk template %s does not support it)",
              self.opts.disk_template)
        elif not self.hv_can_migrate:
          Log("Skipping migration (hypervisor %s does not support it)",
              self.hypervisor)
        else:
          self.BurnMigrate()

      if (self.opts.do_move and len(self.nodes) > 1 and
          self.opts.disk_template in [constants.DT_PLAIN, constants.DT_FILE]):
        self.BurnMove()

      if (self.opts.do_importexport and
          self.opts.disk_template in _IMPEXP_DISK_TEMPLATES):
        self.BurnImportExport()

      if self.opts.do_reinstall:
        self.BurnReinstall()

      if self.opts.do_reboot:
        self.BurnReboot()

      if self.opts.do_renamesame:
        self.BurnRenameSame(self.opts.name_check, self.opts.ip_check)

      if self.opts.do_confd_tests:
        self.BurnConfd()

      default_nic_mode = self.cluster_default_nicparams[constants.NIC_MODE]
      # Don't add/remove nics in routed mode, as we would need an ip to add
      # them with
      if self.opts.do_addremove_nics:
        if default_nic_mode == constants.NIC_MODE_BRIDGED:
          self.BurnAddRemoveNICs()
        else:
          Log("Skipping nic add/remove as the cluster is not in bridged mode")

      if self.opts.do_activate_disks:
        self.BurnActivateDisks()

      if self.opts.do_addremove_disks:
        self.BurnAddDisks()
        self.BurnRemoveDisks()

      if self.opts.rename:
        self.BurnRename(self.opts.name_check, self.opts.ip_check)

      has_err = False
    finally:
      if has_err:
        Log("Error detected: opcode buffer follows:\n\n")
        Log(self.GetFeedbackBuf())
        Log("\n\n")
      if not self.opts.keep_instances:
        try:
          self.BurnRemove()
        except Exception, err:  # pylint: disable=W0703
          if has_err: # already detected errors, so errors in removal
                      # are quite expected
            Log("Note: error detected during instance remove: %s", err)
          else: # non-expected error
            raise
Exemplo n.º 58
0
    if os.path.basename(rootdir) != hostname:
      raise RuntimeError("Last component of root directory ('%s') must match"
                         " hostname ('%s')" % (rootdir, hostname))

    return (os.path.dirname(rootdir), rootdir, hostname)
  else:
    return ("", "", None)


(_VIRT_BASEDIR, _VIRT_NODEROOT, _VIRT_HOSTNAME) = \
  _PreparePaths(_GetRootDirectory(_ROOTDIR_ENVNAME),
                _GetHostname(_HOSTNAME_ENVNAME))


assert (compat.all([_VIRT_BASEDIR, _VIRT_NODEROOT, _VIRT_HOSTNAME]) or
        not compat.any([_VIRT_BASEDIR, _VIRT_NODEROOT, _VIRT_HOSTNAME]))


def GetVirtualHostname():
  """Returns the virtual hostname.

  @rtype: string or L{None}

  """
  return _VIRT_HOSTNAME


def MakeNodeRoot(base, node_name):
  """Appends a node name to the base directory.

  """
Exemplo n.º 59
0
Arquivo: misc.py Projeto: badp/ganeti
  def _GetQueryData(self, lu):
    """Computes the list of nodes and their attributes.

    """
    # Locking is not used
    assert not (compat.any(lu.glm.is_owned(level)
                           for level in locking.LEVELS
                           if level != locking.LEVEL_CLUSTER) or
                self.do_locking or self.use_locking)

    valid_nodes = [node.uuid
                   for node in lu.cfg.GetAllNodesInfo().values()
                   if not node.offline and node.vm_capable]
    pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes))

    data = {}

    nodegroup_list = lu.cfg.GetNodeGroupList()

    for (es_name, es_data) in pol.items():
      # For every provider compute the nodegroup validity.
      # To do this we need to check the validity of each node in es_data
      # and then construct the corresponding nodegroup dict:
      #      { nodegroup1: status
      #        nodegroup2: status
      #      }
      ndgrp_data = {}
      for nodegroup in nodegroup_list:
        ndgrp = lu.cfg.GetNodeGroup(nodegroup)

        nodegroup_nodes = ndgrp.members
        nodegroup_name = ndgrp.name
        node_statuses = []

        for node in nodegroup_nodes:
          if node in valid_nodes:
            if es_data[node] != []:
              node_status = es_data[node][0][1]
              node_statuses.append(node_status)
            else:
              node_statuses.append(False)

        if False in node_statuses:
          ndgrp_data[nodegroup_name] = False
        else:
          ndgrp_data[nodegroup_name] = True

      # Compute the provider's parameters
      parameters = set()
      for idx, esl in enumerate(es_data.values()):
        valid = bool(esl and esl[0][1])
        if not valid:
          break

        node_params = esl[0][3]
        if idx == 0:
          # First entry
          parameters.update(node_params)
        else:
          # Filter out inconsistent values
          parameters.intersection_update(node_params)

      params = list(parameters)

      # Now fill all the info for this provider
      info = query.ExtStorageInfo(name=es_name, node_status=es_data,
                                  nodegroup_status=ndgrp_data,
                                  parameters=params)

      data[es_name] = info

    # Prepare data in requested order
    return [data[name] for name in self._GetNames(lu, pol.keys(), None)
            if name in data]