Exemple #1
0
    def __init__(self,
                 _context,
                 address_list,
                 _req_process_fn=None,
                 _getents=None):
        """Initializes this class.

    """
        lock_monitor_cb = None

        if address_list is None:
            resolver = compat.partial(_SsconfResolver, True)
        else:
            # Caller provided an address list
            resolver = _StaticResolver(address_list)

        encoders = _ENCODERS.copy()

        encoders.update({
            rpc_defs.ED_FILE_DETAILS:
            compat.partial(_PrepareFileUpload, _getents),
        })

        _RpcClientBase.__init__(self,
                                resolver,
                                encoders.get,
                                lock_monitor_cb=lock_monitor_cb,
                                _req_process_fn=_req_process_fn)
        _generated_rpc.RpcClientConfig.__init__(self)
Exemple #2
0
  def __init__(self, context, address_list, _req_process_fn=None,
               _getents=None):
    """Initializes this class.

    """
    if context:
      lock_monitor_cb = context.glm.AddToLockMonitor
    else:
      lock_monitor_cb = None

    if address_list is None:
      resolver = compat.partial(_SsconfResolver, True)
    else:
      # Caller provided an address list
      resolver = _StaticResolver(address_list)

    encoders = _ENCODERS.copy()

    encoders.update({
      rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),
      })

    _RpcClientBase.__init__(self, resolver, encoders.get,
                            lock_monitor_cb=lock_monitor_cb,
                            _req_process_fn=_req_process_fn)
    _generated_rpc.RpcClientConfig.__init__(self)
Exemple #3
0
  def __init__(self, resolver, encoder_fn, lock_monitor_cb=None,
               _req_process_fn=None):
    """Initializes this class.

    """
    proc = _RpcProcessor(resolver,
                         netutils.GetDaemonPort(constants.NODED),
                         lock_monitor_cb=lock_monitor_cb)
    self._proc = compat.partial(proc, _req_process_fn=_req_process_fn)
    self._encoder = compat.partial(self._EncodeArg, encoder_fn)
Exemple #4
0
  def __init__(self, resolver, encoder_fn, lock_monitor_cb=None,
               _req_process_fn=None):
    """Initializes this class.

    """
    proc = _RpcProcessor(resolver,
                         netutils.GetDaemonPort(constants.NODED),
                         lock_monitor_cb=lock_monitor_cb)
    self._proc = compat.partial(proc, _req_process_fn=_req_process_fn)
    self._encoder = compat.partial(self._EncodeArg, encoder_fn)
Exemple #5
0
    def __call__(mcs, *args, **kwargs):
        """Instantiates class and patches it for use by the RAPI daemon.

    """
        # Access to private attributes of a client class, pylint: disable=W0212
        obj = type.__call__(mcs, *args, **kwargs)

        for m_attrs in OPCODE_ATTRS:
            method, op_attr, rename_attr, aliases_attr, _, fn_attr = m_attrs.GetAll(
            )
            if hasattr(obj, method):
                # If the method handler is already defined, "*_RENAME" or
                # "Get*OpInput" shouldn't be (they're only used by the automatically
                # generated handler)
                assert not hasattr(obj, rename_attr)
                assert not hasattr(obj, fn_attr)

                # The aliases are allowed only on GET calls
                assert not hasattr(obj,
                                   aliases_attr) or method == http.HTTP_GET

                # GET methods can add aliases of values they return under a different
                # name
                if method == http.HTTP_GET and hasattr(obj, aliases_attr):
                    setattr(
                        obj, method,
                        compat.partial(GetHandler, getattr(obj, method),
                                       getattr(obj, aliases_attr)))
            else:
                # Try to generate handler method on handler instance
                try:
                    opcode = getattr(obj, op_attr)
                except AttributeError:
                    pass
                else:
                    setattr(
                        obj, method,
                        compat.partial(
                            obj._GenericHandler, opcode,
                            getattr(obj, rename_attr, None),
                            getattr(obj, fn_attr, obj._GetDefaultData)))

            # Finally, the method (generated or not) should be wrapped to handle
            # forbidden values
            if hasattr(obj, m_attrs.forbidden):
                forbidden_dict = ProduceForbiddenParamDict(
                    obj.__class__.__name__, method,
                    getattr(obj, m_attrs.forbidden))
                setattr(
                    obj, method,
                    compat.partial(obj._ForbiddenHandler, getattr(obj, method),
                                   forbidden_dict,
                                   getattr(obj, m_attrs.rename, None)))

        return obj
Exemple #6
0
  def __call__(mcs, *args, **kwargs):
    """Instantiates class and patches it for use by the RAPI daemon.

    """
    # Access to private attributes of a client class, pylint: disable=W0212
    obj = type.__call__(mcs, *args, **kwargs)

    for m_attrs in OPCODE_ATTRS:
      method, op_attr, rename_attr, aliases_attr, _, fn_attr = m_attrs.GetAll()
      if hasattr(obj, method):
        # If the method handler is already defined, "*_RENAME" or
        # "Get*OpInput" shouldn't be (they're only used by the automatically
        # generated handler)
        assert not hasattr(obj, rename_attr)
        assert not hasattr(obj, fn_attr)

        # The aliases are allowed only on GET calls
        assert not hasattr(obj, aliases_attr) or method == http.HTTP_GET

        # GET methods can add aliases of values they return under a different
        # name
        if method == http.HTTP_GET and hasattr(obj, aliases_attr):
          setattr(obj, method,
                  compat.partial(GetHandler, getattr(obj, method),
                                 getattr(obj, aliases_attr)))
      else:
        # Try to generate handler method on handler instance
        try:
          opcode = getattr(obj, op_attr)
        except AttributeError:
          pass
        else:
          setattr(obj, method,
                  compat.partial(obj._GenericHandler, opcode,
                                 getattr(obj, rename_attr, None),
                                 getattr(obj, fn_attr, obj._GetDefaultData)))

      # Finally, the method (generated or not) should be wrapped to handle
      # forbidden values
      if hasattr(obj, m_attrs.forbidden):
        forbidden_dict = ProduceForbiddenParamDict(
          obj.__class__.__name__, method, getattr(obj, m_attrs.forbidden)
        )
        setattr(
          obj, method, compat.partial(obj._ForbiddenHandler,
                                      getattr(obj, method),
                                      forbidden_dict,
                                      getattr(obj, m_attrs.rename, None))
        )

    return obj
Exemple #7
0
    def testMigrateInstance(self):
        clustername = "cluster.example.com"
        instname = "server01.example.com"
        target = constants.IP4_ADDRESS_LOCALHOST
        port = 22364

        hvparams = {constants.HV_XEN_CMD: self.CMD}

        for live in [False, True]:
            for fail in [False, True]:
                ping_fn = \
                  testutils.CallCounter(compat.partial(self._FakeTcpPing,
                                                       (target, port), True))

                run_cmd = \
                  compat.partial(self._MigrateInstanceCmd,
                                 clustername, instname, target, port, live,
                                 fail)

                hv = self._GetHv(run_cmd=run_cmd)

                if fail:
                    try:
                        hv._MigrateInstance(clustername,
                                            instname,
                                            target,
                                            port,
                                            live,
                                            hvparams,
                                            _ping_fn=ping_fn)
                    except errors.HypervisorError, err:
                        self.assertTrue(
                            str(err).startswith("Failed to migrate instance"))
                    else:
                        self.fail("Exception was not raised")
                else:
                    hv._MigrateInstance(clustername,
                                        instname,
                                        target,
                                        port,
                                        live,
                                        hvparams,
                                        _ping_fn=ping_fn)

                if self.CMD == constants.XEN_CMD_XM:
                    expected_pings = 1
                else:
                    expected_pings = 0

                self.assertEqual(ping_fn.Count(), expected_pings)
Exemple #8
0
  def __init__(self, cfg, lock_monitor_cb, _req_process_fn=None, _getents=None):
    """Initialized the RPC runner.

    @type cfg: L{config.ConfigWriter}
    @param cfg: Configuration
    @type lock_monitor_cb: callable
    @param lock_monitor_cb: Lock monitor callback

    """
    self._cfg = cfg

    encoders = _ENCODERS.copy()

    encoders.update({
      # Encoders requiring configuration object
      rpc_defs.ED_INST_DICT: self._InstDict,
      rpc_defs.ED_INST_DICT_HVP_BEP_DP: self._InstDictHvpBepDp,
      rpc_defs.ED_INST_DICT_OSP_DP: self._InstDictOspDp,
      rpc_defs.ED_NIC_DICT: self._NicDict,
      rpc_defs.ED_DEVICE_DICT: self._DeviceDict,

      # Encoders annotating disk parameters
      rpc_defs.ED_DISKS_DICT_DP: self._DisksDictDP,
      rpc_defs.ED_MULTI_DISKS_DICT_DP: self._MultiDiskDictDP,
      rpc_defs.ED_SINGLE_DISK_DICT_DP: self._SingleDiskDictDP,
      rpc_defs.ED_NODE_TO_DISK_DICT_DP: self._EncodeNodeToDiskDictDP,

      # Encoders with special requirements
      rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),

      rpc_defs.ED_IMPEXP_IO: self._EncodeImportExportIO,
      })

    # Resolver using configuration
    resolver = compat.partial(_NodeConfigResolver, cfg.GetNodeInfo,
                              cfg.GetAllNodesInfo)

    # Pylint doesn't recognize multiple inheritance properly, see
    # <http://www.logilab.org/ticket/36586> and
    # <http://www.logilab.org/ticket/35642>
    # pylint: disable=W0233
    _RpcClientBase.__init__(self, resolver, encoders.get,
                            lock_monitor_cb=lock_monitor_cb,
                            _req_process_fn=_req_process_fn)
    _generated_rpc.RpcClientConfig.__init__(self)
    _generated_rpc.RpcClientBootstrap.__init__(self)
    _generated_rpc.RpcClientDnsOnly.__init__(self)
    _generated_rpc.RpcClientDefault.__init__(self)
Exemple #9
0
  def __init__(self, cfg, lock_monitor_cb, _req_process_fn=None, _getents=None):
    """Initialized the RPC runner.

    @type cfg: L{config.ConfigWriter}
    @param cfg: Configuration
    @type lock_monitor_cb: callable
    @param lock_monitor_cb: Lock monitor callback

    """
    self._cfg = cfg

    encoders = _ENCODERS.copy()

    encoders.update({
      # Encoders requiring configuration object
      rpc_defs.ED_INST_DICT: self._InstDict,
      rpc_defs.ED_INST_DICT_HVP_BEP_DP: self._InstDictHvpBepDp,
      rpc_defs.ED_INST_DICT_OSP_DP: self._InstDictOspDp,
      rpc_defs.ED_NIC_DICT: self._NicDict,
      rpc_defs.ED_DEVICE_DICT: self._DeviceDict,

      # Encoders annotating disk parameters
      rpc_defs.ED_DISKS_DICT_DP: self._DisksDictDP,
      rpc_defs.ED_MULTI_DISKS_DICT_DP: self._MultiDiskDictDP,
      rpc_defs.ED_SINGLE_DISK_DICT_DP: self._SingleDiskDictDP,
      rpc_defs.ED_NODE_TO_DISK_DICT_DP: self._EncodeNodeToDiskDictDP,

      # Encoders with special requirements
      rpc_defs.ED_FILE_DETAILS: compat.partial(_PrepareFileUpload, _getents),

      rpc_defs.ED_IMPEXP_IO: self._EncodeImportExportIO,
      })

    # Resolver using configuration
    resolver = compat.partial(_NodeConfigResolver, cfg.GetNodeInfo,
                              cfg.GetAllNodesInfo)

    # Pylint doesn't recognize multiple inheritance properly, see
    # <http://www.logilab.org/ticket/36586> and
    # <http://www.logilab.org/ticket/35642>
    # pylint: disable=W0233
    _RpcClientBase.__init__(self, resolver, encoders.get,
                            lock_monitor_cb=lock_monitor_cb,
                            _req_process_fn=_req_process_fn)
    _generated_rpc.RpcClientConfig.__init__(self)
    _generated_rpc.RpcClientBootstrap.__init__(self)
    _generated_rpc.RpcClientDnsOnly.__init__(self)
    _generated_rpc.RpcClientDefault.__init__(self)
Exemple #10
0
 def _TestUpdate(self, failcmd):
   data = {
     constants.SSHS_SSH_HOST_KEY: [
       (constants.SSHK_DSA, "dsapriv", "dsapub"),
       (constants.SSHK_ECDSA, "ecdsapriv", "ecdsapub"),
       (constants.SSHK_RSA, "rsapriv", "rsapub"),
       ],
     constants.SSHS_SSH_KEY_TYPE: "dsa",
     constants.SSHS_SSH_KEY_BITS: 1024,
     }
   runcmd_fn = compat.partial(self._RunCmd, failcmd)
   if failcmd:
     self.assertRaises(_JoinError, prepare_node_join.UpdateSshDaemon,
                       data, False, _runcmd_fn=runcmd_fn,
                       _keyfiles=self.keyfiles)
   else:
     prepare_node_join.UpdateSshDaemon(data, False, _runcmd_fn=runcmd_fn,
                                       _keyfiles=self.keyfiles)
   self.assertEqual(sorted(os.listdir(self.tmpdir)), sorted([
     "rsa.public", "rsa.private",
     "dsa.public", "dsa.private",
     "ecdsa.public", "ecdsa.private",
     ]))
   self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.public")),
                    "rsapub")
   self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.private")),
                    "rsapriv")
   self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.public")),
                    "dsapub")
   self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.private")),
                    "dsapriv")
   self.assertEqual(utils.ReadFile(utils.PathJoin(
       self.tmpdir, "ecdsa.public")), "ecdsapub")
   self.assertEqual(utils.ReadFile(utils.PathJoin(
       self.tmpdir, "ecdsa.private")), "ecdsapriv")
Exemple #11
0
  def __call__(mcs, *args, **kwargs):
    """Instantiates class and patches it for use by the RAPI daemon.

    """
    # Access to private attributes of a client class, pylint: disable=W0212
    obj = type.__call__(mcs, *args, **kwargs)

    for (method, op_attr, rename_attr, fn_attr) in OPCODE_ATTRS:
      if hasattr(obj, method):
        # If the method handler is already defined, "*_RENAME" or "Get*OpInput"
        # shouldn't be (they're only used by the automatically generated
        # handler)
        assert not hasattr(obj, rename_attr)
        assert not hasattr(obj, fn_attr)
      else:
        # Try to generate handler method on handler instance
        try:
          opcode = getattr(obj, op_attr)
        except AttributeError:
          pass
        else:
          setattr(obj, method,
                  compat.partial(obj._GenericHandler, opcode,
                                 getattr(obj, rename_attr, None),
                                 getattr(obj, fn_attr, obj._GetDefaultData)))

    return obj
Exemple #12
0
def TStrictDict(require_all, exclusive, items):
    """Strict dictionary check with specific keys.

  @type require_all: boolean
  @param require_all: Whether all keys in L{items} are required
  @type exclusive: boolean
  @param exclusive: Whether only keys listed in L{items} should be accepted
  @type items: dictionary
  @param items: Mapping from key (string) to verification function

  """
    descparts = ["Dictionary containing"]

    if exclusive:
        descparts.append(" none but the")

    if require_all:
        descparts.append(" required")

    if len(items) == 1:
        descparts.append(" key ")
    else:
        descparts.append(" keys ")

    descparts.append(
        utils.CommaJoin("\"%s\" (value %s)" % (key, value)
                        for (key, value) in items.items()))

    desc = WithDesc("".join(descparts))

    return desc(
        TAnd(TDict,
             compat.partial(_TStrictDictCheck, require_all, exclusive, items)))
Exemple #13
0
def TStrictDict(require_all, exclusive, items):
  """Strict dictionary check with specific keys.

  @type require_all: boolean
  @param require_all: Whether all keys in L{items} are required
  @type exclusive: boolean
  @param exclusive: Whether only keys listed in L{items} should be accepted
  @type items: dictionary
  @param items: Mapping from key (string) to verification function

  """
  descparts = ["Dictionary containing"]

  if exclusive:
    descparts.append(" none but the")

  if require_all:
    descparts.append(" required")

  if len(items) == 1:
    descparts.append(" key ")
  else:
    descparts.append(" keys ")

  descparts.append(utils.CommaJoin("\"%s\" (value %s)" % (key, value)
                                   for (key, value) in items.items()))

  desc = WithDesc("".join(descparts))

  return desc(TAnd(TDict,
                   compat.partial(_TStrictDictCheck, require_all, exclusive,
                                  items)))
 def testResponseBody(self):
     test_data = {
         "Hello": "World",
         "xyz": range(10),
     }
     resolver = rpc._StaticResolver(["192.0.2.84"])
     http_proc = _FakeRequestProcessor(
         compat.partial(self._GetBodyTestResponse, test_data))
     proc = rpc._RpcProcessor(resolver, 18700)
     host = "node19759"
     body = {host: serializer.DumpJson(test_data)}
     result = proc([host],
                   "upload_file",
                   body,
                   30,
                   NotImplemented,
                   _req_process_fn=http_proc)
     self.assertEqual(result.keys(), [host])
     lhresp = result[host]
     self.assertFalse(lhresp.offline)
     self.assertEqual(lhresp.node, host)
     self.assertFalse(lhresp.fail_msg)
     self.assertEqual(lhresp.payload, None)
     self.assertEqual(lhresp.call, "upload_file")
     lhresp.Raise("should not raise")
     self.assertEqual(http_proc.reqcount, 1)
Exemple #15
0
    def testGetNodeInfo(self):
        run_cmd = compat.partial(self._GetNodeInfoCmd, False)
        hv = self._GetHv(run_cmd=run_cmd)
        result = hv.GetNodeInfo()

        self.assertEqual(result["hv_version"], (4, 0))
        self.assertEqual(result["memory_free"], 8004)
  def testFilterPending(self):
    opts = optparse.Values(dict(status_filter=constants.JOBS_PENDING,
                                force=False))

    def _Query(qfilter):
      # Need to sort as constants.JOBS_PENDING has no stable order
      assert isinstance(constants.JOBS_PENDING, frozenset)
      self.assertEqual(sorted(qfilter),
                       sorted(qlang.MakeSimpleFilter("status",
                                                     constants.JOBS_PENDING)))

      return [
        [(constants.RS_UNAVAIL, None),
         (constants.RS_UNAVAIL, None),
         (constants.RS_UNAVAIL, None)],
        [(constants.RS_NORMAL, 32532),
         (constants.RS_NORMAL, constants.JOB_STATUS_QUEUED),
         (constants.RS_NORMAL, ["op1", "op2", "op3"])],
        ]

    cl = _ClientForCancelJob(NotImplemented, _Query)

    result = gnt_job.CancelJobs(opts, [], cl=cl,
                                _stdout_fn=self._ToStdout,
                                _ask_fn=compat.partial(self._Ask, False))
    self.assertEqual(result, constants.EXIT_CONFIRMATION)
Exemple #17
0
    def testStopInstance(self):
        name = "inst4284.example.com"
        cfgfile = utils.PathJoin(self.tmpdir, name)
        cfgdata = "config file content\n"

        for force in [False, True]:
            for fail in [False, True]:
                utils.WriteFile(cfgfile, data=cfgdata)

                run_cmd = compat.partial(self._StopInstanceCommand, name,
                                         force, fail)

                hv = self._GetHv(run_cmd=run_cmd)

                self.assertTrue(os.path.isfile(cfgfile))

                if fail:
                    try:
                        hv._StopInstance(name, force, None,
                                         constants.DEFAULT_SHUTDOWN_TIMEOUT)
                    except errors.HypervisorError, err:
                        self.assertTrue(
                            str(err).startswith("listing instances failed"),
                            msg=str(err))
                    else:
                        self.fail("Exception was not raised")
                    self.assertEqual(
                        utils.ReadFile(cfgfile),
                        cfgdata,
                        msg=("Configuration was removed when stopping"
                             " instance failed"))
                else:
                    hv._StopInstance(name, force, None,
                                     constants.DEFAULT_SHUTDOWN_TIMEOUT)
                    self.assertFalse(os.path.exists(cfgfile))
Exemple #18
0
    def __init__(self):
        """Initialize this class.

    """
        _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, False),
                                _ENCODERS.get)
        _generated_rpc.RpcClientDnsOnly.__init__(self)
Exemple #19
0
    def __call__(mcs, *args, **kwargs):
        """Instantiates class and patches it for use by the RAPI daemon.

    """
        # Access to private attributes of a client class, pylint: disable=W0212
        obj = type.__call__(mcs, *args, **kwargs)

        for (method, op_attr, rename_attr, fn_attr) in OPCODE_ATTRS:
            if hasattr(obj, method):
                # If the method handler is already defined, "*_RENAME" or "Get*OpInput"
                # shouldn't be (they're only used by the automatically generated
                # handler)
                assert not hasattr(obj, rename_attr)
                assert not hasattr(obj, fn_attr)
            else:
                # Try to generate handler method on handler instance
                try:
                    opcode = getattr(obj, op_attr)
                except AttributeError:
                    pass
                else:
                    setattr(
                        obj, method,
                        compat.partial(
                            obj._GenericHandler, opcode,
                            getattr(obj, rename_attr, None),
                            getattr(obj, fn_attr, obj._GetDefaultData)))

        return obj
Exemple #20
0
    def testFilterPending(self):
        opts = optparse.Values(
            dict(status_filter=constants.JOBS_PENDING, force=False,
                 kill=False))

        def _Query(qfilter):
            # Need to sort as constants.JOBS_PENDING has no stable order
            assert isinstance(constants.JOBS_PENDING, frozenset)
            self.assertEqual(
                sorted(qfilter),
                sorted(qlang.MakeSimpleFilter("status",
                                              constants.JOBS_PENDING)))

            return [
                [(constants.RS_UNAVAIL, None), (constants.RS_UNAVAIL, None),
                 (constants.RS_UNAVAIL, None)],
                [(constants.RS_NORMAL, 32532),
                 (constants.RS_NORMAL, constants.JOB_STATUS_QUEUED),
                 (constants.RS_NORMAL, ["op1", "op2", "op3"])],
            ]

        cl = _ClientForCancelJob(NotImplemented, _Query)

        result = gnt_job.CancelJobs(opts, [],
                                    cl=cl,
                                    _stdout_fn=self._ToStdout,
                                    _ask_fn=compat.partial(self._Ask, False))
        self.assertEqual(result, constants.EXIT_CONFIRMATION)
Exemple #21
0
    def ValidateResult(self, ia, result):
        """Validates the result of an relocation request.

    """
        IARequestBase.ValidateResult(self, ia, result)

        node2group = dict((name, ndata["group"])
                          for (name, ndata) in ia.in_data["nodes"].items())

        fn = compat.partial(self._NodesToGroups, node2group,
                            ia.in_data["nodegroups"])

        instance = ia.cfg.GetInstanceInfo(self.inst_uuid)
        request_groups = fn(
            ia.cfg.GetNodeNames(self.relocate_from_node_uuids) +
            ia.cfg.GetNodeNames([instance.primary_node]))
        result_groups = fn(result +
                           ia.cfg.GetNodeNames([instance.primary_node]))

        if ia.success and not set(result_groups).issubset(request_groups):
            raise errors.ResultValidationError(
                "Groups of nodes returned by"
                " iallocator (%s) differ from original"
                " groups (%s)" % (utils.CommaJoin(result_groups),
                                  utils.CommaJoin(request_groups)))
Exemple #22
0
def PrepRapi(options, _):
    """Prep remote API function, executed with the PID file held.

  """
    mainloop = daemon.Mainloop()

    users = RapiUsers()

    handler = RemoteApiHandler(users.Get, options.reqauth)

    # Setup file watcher (it'll be driven by asyncore)
    SetupFileWatcher(pathutils.RAPI_USERS_FILE,
                     compat.partial(users.Load, pathutils.RAPI_USERS_FILE))

    users.Load(pathutils.RAPI_USERS_FILE)

    server = http.server.HttpServer(mainloop,
                                    options.bind_address,
                                    options.port,
                                    options.max_clients,
                                    handler,
                                    ssl_params=options.ssl_params,
                                    ssl_verify_peer=False)
    server.Start()

    return (mainloop, server)
  def testStartInstance(self):
    (inst, disks) = self._MakeInstance()
    pathutils.LOG_XEN_DIR = self.tmpdir

    for failcreate in [False, True]:
      for paused in [False, True]:
        run_cmd = compat.partial(self._StartInstanceCommand,
                                 inst, paused, failcreate)

        hv = self._GetHv(run_cmd=run_cmd)

        # Ensure instance is not listed
        self.assertTrue(inst.name not in hv.ListInstances())

        # Remove configuration
        cfgfile = utils.PathJoin(self.tmpdir, inst.name)
        utils.RemoveFile(cfgfile)

        if failcreate:
          self.assertRaises(errors.HypervisorError, hv.StartInstance,
                            inst, disks, paused)
          # Check whether a stale config file is left behind
          self.assertFalse(os.path.exists(cfgfile))
        else:
          hv.StartInstance(inst, disks, paused)
          # Check if configuration was updated
          lines = utils.ReadFile(cfgfile).splitlines()

        if constants.HV_VNC_PASSWORD_FILE in inst.hvparams:
          self.assertTrue(("vncpasswd = '%s'" % self.vncpw) in lines)
        else:
          extra = inst.hvparams[constants.HV_KERNEL_ARGS]
          self.assertTrue(("extra = '%s'" % extra) in lines)
Exemple #24
0
    def testMigrateTargetUnreachable(self):
        name = "server01.example.com"
        target = constants.IP4_ADDRESS_LOCALHOST
        port = 28349

        hv = self._GetHv(run_cmd=self._MigrateInstTargetUnreachCmd)
        hvparams = {constants.HV_XEN_CMD: self.CMD}

        for live in [False, True]:
            if self.CMD == constants.XEN_CMD_XL:
                # TODO: Detect unreachable targets
                pass
            else:
                try:
                    hv._MigrateInstance(NotImplemented,
                                        name,
                                        target,
                                        port,
                                        live,
                                        hvparams,
                                        _ping_fn=compat.partial(
                                            self._FakeTcpPing, (target, port),
                                            False))
                except errors.HypervisorError, err:
                    wanted = "Remote host %s not" % target
                    self.assertTrue(str(err).startswith(wanted))
                else:
                    self.fail("Exception was not raised")
Exemple #25
0
    def testStartInstance(self):
        (inst, disks) = self._MakeInstance()
        pathutils.LOG_XEN_DIR = self.tmpdir

        for failcreate in [False, True]:
            for paused in [False, True]:
                run_cmd = compat.partial(self._StartInstanceCommand, inst,
                                         paused, failcreate)

                hv = self._GetHv(run_cmd=run_cmd)

                # Ensure instance is not listed
                self.assertTrue(inst.name not in hv.ListInstances())

                # Remove configuration
                cfgfile = utils.PathJoin(self.tmpdir, inst.name)
                utils.RemoveFile(cfgfile)

                if failcreate:
                    self.assertRaises(errors.HypervisorError, hv.StartInstance,
                                      inst, disks, paused)
                    # Check whether a stale config file is left behind
                    self.assertFalse(os.path.exists(cfgfile))
                else:
                    hv.StartInstance(inst, disks, paused)
                    # Check if configuration was updated
                    lines = utils.ReadFile(cfgfile).splitlines()

                if constants.HV_VNC_PASSWORD_FILE in inst.hvparams:
                    self.assertTrue(("vncpasswd = '%s'" % self.vncpw) in lines)
                else:
                    extra = inst.hvparams[constants.HV_KERNEL_ARGS]
                    self.assertTrue(("extra = '%s'" % extra) in lines)
Exemple #26
0
  def __init__(self):
    """Initialize this class.

    """
    _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, False),
                            _ENCODERS.get)
    _generated_rpc.RpcClientDnsOnly.__init__(self)
  def testGetNodeInfo(self):
    run_cmd = compat.partial(self._GetNodeInfoCmd, False)
    hv = self._GetHv(run_cmd=run_cmd)
    result = hv.GetNodeInfo()

    self.assertEqual(result["hv_version"], (4, 0))
    self.assertEqual(result["memory_free"], 8004)
Exemple #28
0
def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
  """Unpacks the result of change-group and node-evacuate iallocator requests.

  Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
  L{constants.IALLOCATOR_MODE_CHG_GROUP}.

  @type lu: L{LogicalUnit}
  @param lu: Logical unit instance
  @type alloc_result: tuple/list
  @param alloc_result: Result from iallocator
  @type early_release: bool
  @param early_release: Whether to release locks early if possible
  @type use_nodes: bool
  @param use_nodes: Whether to display node names instead of groups

  """
  (moved, failed, jobs) = alloc_result

  if failed:
    failreason = utils.CommaJoin("%s (%s)" % (name, reason)
                                 for (name, reason) in failed)
    lu.LogWarning("Unable to evacuate instances %s", failreason)
    raise errors.OpExecError("Unable to evacuate instances %s" % failreason)

  if moved:
    lu.LogInfo("Instances to be moved: %s",
               utils.CommaJoin(
                 "%s (to %s)" %
                 (name, _NodeEvacDest(use_nodes, group, node_names))
                 for (name, group, node_names) in moved))

  return [map(compat.partial(_SetOpEarlyRelease, early_release),
              map(opcodes.OpCode.LoadOpCode, ops))
          for ops in jobs]
 def _TestUpdate(self, failcmd):
   data = {
     constants.SSHS_SSH_HOST_KEY: [
       (constants.SSHK_DSA, "dsapriv", "dsapub"),
       (constants.SSHK_ECDSA, "ecdsapriv", "ecdsapub"),
       (constants.SSHK_RSA, "rsapriv", "rsapub"),
       ],
     }
   runcmd_fn = compat.partial(self._RunCmd, failcmd)
   if failcmd:
     self.assertRaises(_JoinError, prepare_node_join.UpdateSshDaemon,
                       data, False, _runcmd_fn=runcmd_fn,
                       _keyfiles=self.keyfiles)
   else:
     prepare_node_join.UpdateSshDaemon(data, False, _runcmd_fn=runcmd_fn,
                                       _keyfiles=self.keyfiles)
   self.assertEqual(sorted(os.listdir(self.tmpdir)), sorted([
     "rsa.public", "rsa.private",
     "dsa.public", "dsa.private",
     "ecdsa.public", "ecdsa.private",
     ]))
   self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.public")),
                    "rsapub")
   self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.private")),
                    "rsapriv")
   self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.public")),
                    "dsapub")
   self.assertEqual(utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.private")),
                    "dsapriv")
   self.assertEqual(utils.ReadFile(utils.PathJoin(
       self.tmpdir, "ecdsa.public")), "ecdsapub")
   self.assertEqual(utils.ReadFile(utils.PathJoin(
       self.tmpdir, "ecdsa.private")), "ecdsapriv")
  def testStopInstance(self):
    name = "inst4284.example.com"
    cfgfile = utils.PathJoin(self.tmpdir, name)
    cfgdata = "config file content\n"

    for force in [False, True]:
      for fail in [False, True]:
        utils.WriteFile(cfgfile, data=cfgdata)

        run_cmd = compat.partial(self._StopInstanceCommand, name, force, fail)

        hv = self._GetHv(run_cmd=run_cmd)

        self.assertTrue(os.path.isfile(cfgfile))

        if fail:
          try:
            hv._StopInstance(name, force, None,
                             constants.DEFAULT_SHUTDOWN_TIMEOUT)
          except errors.HypervisorError, err:
            self.assertTrue(str(err).startswith("listing instances failed"),
                            msg=str(err))
          else:
            self.fail("Exception was not raised")
          self.assertEqual(utils.ReadFile(cfgfile), cfgdata,
                           msg=("Configuration was removed when stopping"
                                " instance failed"))
        else:
          hv._StopInstance(name, force, None,
                           constants.DEFAULT_SHUTDOWN_TIMEOUT)
          self.assertFalse(os.path.exists(cfgfile))
Exemple #31
0
  def LocalExport(self, dest_node, compress):
    """Intra-cluster instance export.

    @type dest_node: L{objects.Node}
    @param dest_node: Destination node
    @type compress: string
    @param compress: Compression tool to use

    """
    instance = self._instance
    src_node_uuid = instance.primary_node

    assert len(self._snap_disks) == len(instance.disks)

    transfers = []

    for idx, dev in enumerate(self._snap_disks):
      if not dev:
        transfers.append(None)
        continue

      path = utils.PathJoin(pathutils.EXPORT_DIR, "%s.new" % instance.name,
                            dev.logical_id[1])

      finished_fn = compat.partial(self._TransferFinished, idx)

      if instance.os:
        src_io = constants.IEIO_SCRIPT
        src_ioargs = ((dev, instance), idx)
      else:
        src_io = constants.IEIO_RAW_DISK
        src_ioargs = (dev, instance)

      # FIXME: pass debug option from opcode to backend
      dt = DiskTransfer("snapshot/%s" % idx,
                        src_io, src_ioargs,
                        constants.IEIO_FILE, (path, ),
                        finished_fn)
      transfers.append(dt)

    # Actually export data
    dresults = TransferInstanceData(self._lu, self._feedback_fn,
                                    src_node_uuid, dest_node.uuid,
                                    dest_node.secondary_ip,
                                    compress,
                                    instance, transfers)

    assert len(dresults) == len(instance.disks)

    self._feedback_fn("Finalizing export on %s" % dest_node.name)
    result = self._lu.rpc.call_finalize_export(dest_node.uuid, instance,
                                               self._snap_disks)
    msg = result.fail_msg
    fin_resu = not msg
    if msg:
      self._lu.LogWarning("Could not finalize export for instance %s"
                          " on node %s: %s", instance.name, dest_node.name, msg)

    return (fin_resu, dresults)
Exemple #32
0
  def LocalExport(self, dest_node, compress):
    """Intra-cluster instance export.

    @type dest_node: L{objects.Node}
    @param dest_node: Destination node
    @type compress: string
    @param compress: Compression tool to use

    """
    instance = self._instance
    src_node_uuid = instance.primary_node

    assert len(self._snap_disks) == len(instance.disks)

    transfers = []

    for idx, dev in enumerate(self._snap_disks):
      if not dev:
        transfers.append(None)
        continue

      path = utils.PathJoin(pathutils.EXPORT_DIR, "%s.new" % instance.name,
                            dev.logical_id[1])

      finished_fn = compat.partial(self._TransferFinished, idx)

      if instance.os:
        src_io = constants.IEIO_SCRIPT
        src_ioargs = ((dev, instance), idx)
      else:
        src_io = constants.IEIO_RAW_DISK
        src_ioargs = (dev, instance)

      # FIXME: pass debug option from opcode to backend
      dt = DiskTransfer("snapshot/%s" % idx,
                        src_io, src_ioargs,
                        constants.IEIO_FILE, (path, ),
                        finished_fn)
      transfers.append(dt)

    # Actually export data
    dresults = TransferInstanceData(self._lu, self._feedback_fn,
                                    src_node_uuid, dest_node.uuid,
                                    dest_node.secondary_ip,
                                    compress,
                                    instance, transfers)

    assert len(dresults) == len(instance.disks)

    self._feedback_fn("Finalizing export on %s" % dest_node.name)
    result = self._lu.rpc.call_finalize_export(dest_node.uuid, instance,
                                               self._snap_disks)
    msg = result.fail_msg
    fin_resu = not msg
    if msg:
      self._lu.LogWarning("Could not finalize export for instance %s"
                          " on node %s: %s", instance.name, dest_node.name, msg)

    return (fin_resu, dresults)
  def test(self):
    tmpfile = tempfile.NamedTemporaryFile()
    fd = tmpfile.fileno()

    for features in [0, netdev.IFF_VNET_HDR]:
      fn = compat.partial(self._FakeIoctl, features)
      result = netdev._GetTunFeatures(fd, _ioctl=fn)
      self.assertEqual(result, features)
    def test(self):
        tmpfile = tempfile.NamedTemporaryFile()
        fd = tmpfile.fileno()

        for features in [0, netdev.IFF_VNET_HDR]:
            fn = compat.partial(self._FakeIoctl, features)
            result = netdev._GetTunFeatures(fd, _ioctl=fn)
            self.assertEqual(result, features)
Exemple #35
0
 def _CheckFilter():
   _DoTests([
     # With filter
     ("/2/query/%s" % what, compat.partial(_Check, all_fields), "PUT", {
        "fields": all_fields,
        "filter": [qlang.OP_TRUE, namefield],
        }),
     ])
Exemple #36
0
 def _CheckFilter():
   _DoTests([
     # With filter
     ("/2/query/%s" % what, compat.partial(_Check, all_fields), "PUT", {
        "fields": all_fields,
        "filter": trivial_filter
        }),
     ])
Exemple #37
0
    def __getattr__(self, name):
        """Finds method by name.

    The method is wrapped using L{_TestWrapper} to produce the actual test
    result.

    """
        return _HideInternalErrors(
            compat.partial(_TestWrapper, getattr(self._client, name)))
Exemple #38
0
  def __getattr__(self, name):
    """Finds method by name.

    The method is wrapped using L{_TestWrapper} to produce the actual test
    result.

    """
    return _HideInternalErrors(compat.partial(_TestWrapper,
                                              getattr(self._client, name)))
Exemple #39
0
  def __call__(self, address=None):
    """Creates an instrumented LUXI client.

    The LUXI client will record all method calls (use L{CalledNames} to
    retrieve them).

    """
    return luxi.Client(transport=compat.partial(_TestLuxiTransport,
                                                self.Record),
                       address=address)
Exemple #40
0
 def _CheckFilter():
     _DoTests([
         # With filter
         ("/2/query/%s" % what, compat.partial(_Check,
                                               all_fields), "PUT", {
                                                   "fields": all_fields,
                                                   "filter":
                                                   trivial_filter
                                               }),
     ])
Exemple #41
0
  def testNodeNotReachableByPingOnSecondary(self):
    self.netutils_mod.GetHostname.return_value = \
      HostnameMock(self.node_add.name, self.node_add.primary_ip)
    self.netutils_mod.TcpPing.side_effect = \
      compat.partial(_TcpPingFailSecondary, self.cfg, self.netutils_mod.TcpPing)

    op = self.CopyOpCode(self.op_add)

    self.ExecOpCodeExpectOpPrereqError(op, "Node secondary ip not reachable by"
                                       " TCP based ping to node daemon port")
Exemple #42
0
    def __call__(self, address=None):
        """Creates an instrumented LUXI client.

    The LUXI client will record all method calls (use L{CalledNames} to
    retrieve them).

    """
        return luxi.Client(transport=compat.partial(_TestLuxiTransport,
                                                    self.Record),
                           address=address)
Exemple #43
0
        def _ProcessRequests(multi, handles):
            self.assertTrue(isinstance(multi, self._DummyCurlMulti))
            self.assertEqual(len(requests), len(handles))
            self.assertTrue(
                compat.all(isinstance(curl, _FakeCurl) for curl in handles))

            # Prepare for lock check
            for req in requests:
                assert req.completion_cb is NotImplementedError
                if use_monitor:
                    req.completion_cb = \
                      compat.partial(_LockCheckReset, lock_monitor_cb.GetMonitor())

            for idx, curl in enumerate(handles):
                try:
                    port = curl.opts["__port__"]
                except KeyError:
                    self.fail("Per-request config function was not called")

                if use_monitor:
                    # Check if lock information is correct
                    lock_info = lock_monitor_cb.GetMonitor().GetLockInfo(None)
                    expected = \
                      [("rpc/%s" % (_BuildNiceName(handle.opts["__port__"],
                                                   default=("localhost/version%s" %
                                                            handle.opts["__port__"]))),
                        None,
                        [threading.currentThread().getName()], None)
                       for handle in handles[idx:]]
                    self.assertEqual(sorted(lock_info), sorted(expected))

                if port % 3 == 0:
                    response_code = http.HTTP_OK
                    msg = None
                else:
                    response_code = http.HttpNotFound.code
                    msg = "test error"

                curl.info = {
                    pycurl.RESPONSE_CODE: response_code,
                }
                if hasattr(pycurl, 'LOCAL_IP'):
                    curl.info[pycurl.LOCAL_IP] = '127.0.0.1'
                if hasattr(pycurl, 'LOCAL_PORT'):
                    curl.info[pycurl.LOCAL_PORT] = port

                # Prepare for reset
                self.assertFalse(curl.opts.pop(pycurl.POSTFIELDS))
                self.assertTrue(callable(curl.opts.pop(pycurl.WRITEFUNCTION)))

                yield (curl, msg)

            if use_monitor:
                self.assertTrue(compat.all(req.lockcheck__
                                           for req in requests))
Exemple #44
0
  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
                              dev):
    """Compute block device status.

    @attention: The device has to be annotated already.

    """
    drbd_info = None
    output_logical_id = dev.logical_id
    if dev.dev_type in constants.DTS_DRBD:
      # we change the snode then (otherwise we use the one passed in)
      if dev.logical_id[0] == instance.primary_node:
        snode_uuid = dev.logical_id[1]
        snode_minor = dev.logical_id[4]
        pnode_minor = dev.logical_id[3]
      else:
        snode_uuid = dev.logical_id[0]
        snode_minor = dev.logical_id[3]
        pnode_minor = dev.logical_id[4]
      drbd_info = {
        "primary_node": node_uuid2name_fn(instance.primary_node),
        "primary_minor": pnode_minor,
        "secondary_node": node_uuid2name_fn(snode_uuid),
        "secondary_minor": snode_minor,
        "port": dev.logical_id[2],
      }
      # replace the secret present at the end of the ids with None
      output_logical_id = dev.logical_id[:-1] + (None,)

    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
                                              instance, dev)
    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)

    if dev.children:
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
                                        instance, snode_uuid,
                                        node_uuid2name_fn),
                         dev.children)
    else:
      dev_children = []

    return {
      "iv_name": dev.iv_name,
      "dev_type": dev.dev_type,
      "logical_id": output_logical_id,
      "drbd_info": drbd_info,
      "pstatus": dev_pstatus,
      "sstatus": dev_sstatus,
      "children": dev_children,
      "mode": dev.mode,
      "size": dev.size,
      "spindles": dev.spindles,
      "name": dev.name,
      "uuid": dev.uuid,
      }
Exemple #45
0
    def testNodeNotReachableByPingOnSecondary(self):
        self.netutils_mod.GetHostname.return_value = \
          HostnameMock(self.node_add.name, self.node_add.primary_ip)
        self.netutils_mod.TcpPing.side_effect = \
          compat.partial(_TcpPingFailSecondary, self.cfg, self.netutils_mod.TcpPing)

        op = self.CopyOpCode(self.op_add)

        self.ExecOpCodeExpectOpPrereqError(
            op, "Node secondary ip not reachable by"
            " TCP based ping to node daemon port")
    def _ProcessRequests(multi, handles):
      self.assertTrue(isinstance(multi, self._DummyCurlMulti))
      self.assertEqual(len(requests), len(handles))
      self.assertTrue(compat.all(isinstance(curl, _FakeCurl)
                                 for curl in handles))

      # Prepare for lock check
      for req in requests:
        assert req.completion_cb is NotImplementedError
        if use_monitor:
          req.completion_cb = \
            compat.partial(_LockCheckReset, lock_monitor_cb.GetMonitor())

      for idx, curl in enumerate(handles):
        try:
          port = curl.opts["__port__"]
        except KeyError:
          self.fail("Per-request config function was not called")

        if use_monitor:
          # Check if lock information is correct
          lock_info = lock_monitor_cb.GetMonitor().GetLockInfo(None)
          expected = \
            [("rpc/%s" % (_BuildNiceName(handle.opts["__port__"],
                                         default=("localhost/version%s" %
                                                  handle.opts["__port__"]))),
              None,
              [threading.currentThread().getName()], None)
             for handle in handles[idx:]]
          self.assertEqual(sorted(lock_info), sorted(expected))

        if port % 3 == 0:
          response_code = http.HTTP_OK
          msg = None
        else:
          response_code = http.HttpNotFound.code
          msg = "test error"

        curl.info = {
          pycurl.RESPONSE_CODE: response_code,
        }
        if hasattr(pycurl, 'LOCAL_IP'):
          curl.info[pycurl.LOCAL_IP] = '127.0.0.1'
        if hasattr(pycurl, 'LOCAL_PORT'):
          curl.info[pycurl.LOCAL_PORT] = port

        # Prepare for reset
        self.assertFalse(curl.opts.pop(pycurl.POSTFIELDS))
        self.assertTrue(callable(curl.opts.pop(pycurl.WRITEFUNCTION)))

        yield (curl, msg)

      if use_monitor:
        self.assertTrue(compat.all(req.lockcheck__ for req in requests))
Exemple #47
0
  def RemoteExport(self, disk_info, key_name, dest_ca_pem, compress, timeouts):
    """Inter-cluster instance export.

    @type disk_info: list
    @param disk_info: Per-disk destination information
    @type key_name: string
    @param key_name: Name of X509 key to use
    @type dest_ca_pem: string
    @param dest_ca_pem: Destination X509 CA in PEM format
    @type compress: string
    @param compress: Compression tool to use
    @type timeouts: L{ImportExportTimeouts}
    @param timeouts: Timeouts for this import

    """
    instance = self._instance
    inst_disks = self._lu.cfg.GetInstanceDisks(instance.uuid)

    assert len(disk_info) == len(instance.disks)

    cbs = _RemoteExportCb(self._feedback_fn, len(instance.disks))

    ieloop = ImportExportLoop(self._lu)
    try:
      for idx, (dev, (host, port, magic)) in enumerate(zip(inst_disks,
                                                           disk_info)):
        # Decide whether to use IPv6
        ipv6 = netutils.IP6Address.IsValid(host)

        opts = objects.ImportExportOptions(key_name=key_name,
                                           ca_pem=dest_ca_pem,
                                           magic=magic,
                                           compress=compress,
                                           ipv6=ipv6)

        if instance.os:
          src_io = constants.IEIO_SCRIPT
          src_ioargs = ((dev, instance), idx)
        else:
          src_io = constants.IEIO_RAW_DISK
          src_ioargs = (dev, instance)

        self._feedback_fn("Sending disk %s to %s:%s" % (idx, host, port))
        finished_fn = compat.partial(self._TransferFinished, idx)
        ieloop.Add(DiskExport(self._lu, instance.primary_node,
                              opts, host, port, instance, "disk%d" % idx,
                              src_io, src_ioargs,
                              timeouts, cbs, private=(idx, finished_fn)))

      ieloop.Run()
    finally:
      ieloop.FinalizeAll()

    return (True, cbs.disk_results)
Exemple #48
0
  def RemoteExport(self, disk_info, key_name, dest_ca_pem, compress, timeouts):
    """Inter-cluster instance export.

    @type disk_info: list
    @param disk_info: Per-disk destination information
    @type key_name: string
    @param key_name: Name of X509 key to use
    @type dest_ca_pem: string
    @param dest_ca_pem: Destination X509 CA in PEM format
    @type compress: string
    @param compress: Compression tool to use
    @type timeouts: L{ImportExportTimeouts}
    @param timeouts: Timeouts for this import

    """
    instance = self._instance
    inst_disks = self._lu.cfg.GetInstanceDisks(instance.uuid)

    assert len(disk_info) == len(instance.disks)

    cbs = _RemoteExportCb(self._feedback_fn, len(instance.disks))

    ieloop = ImportExportLoop(self._lu)
    try:
      for idx, (dev, (host, port, magic)) in enumerate(zip(inst_disks,
                                                           disk_info)):
        # Decide whether to use IPv6
        ipv6 = netutils.IP6Address.IsValid(host)

        opts = objects.ImportExportOptions(key_name=key_name,
                                           ca_pem=dest_ca_pem,
                                           magic=magic,
                                           compress=compress,
                                           ipv6=ipv6)

        if instance.os:
          src_io = constants.IEIO_SCRIPT
          src_ioargs = ((dev, instance), idx)
        else:
          src_io = constants.IEIO_RAW_DISK
          src_ioargs = (dev, instance)

        self._feedback_fn("Sending disk %s to %s:%s" % (idx, host, port))
        finished_fn = compat.partial(self._TransferFinished, idx)
        ieloop.Add(DiskExport(self._lu, instance.primary_node,
                              opts, host, port, instance, "disk%d" % idx,
                              src_io, src_ioargs,
                              timeouts, cbs, private=(idx, finished_fn)))

      ieloop.Run()
    finally:
      ieloop.FinalizeAll()

    return (True, cbs.disk_results)
    def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
                                dev):
        """Compute block device status.

    @attention: The device has to be annotated already.

    """
        drbd_info = None
        output_logical_id = dev.logical_id
        if dev.dev_type in constants.DTS_DRBD:
            # we change the snode then (otherwise we use the one passed in)
            if dev.logical_id[0] == instance.primary_node:
                snode_uuid = dev.logical_id[1]
                snode_minor = dev.logical_id[4]
                pnode_minor = dev.logical_id[3]
            else:
                snode_uuid = dev.logical_id[0]
                snode_minor = dev.logical_id[3]
                pnode_minor = dev.logical_id[4]
            drbd_info = {
                "primary_node": node_uuid2name_fn(instance.primary_node),
                "primary_minor": pnode_minor,
                "secondary_node": node_uuid2name_fn(snode_uuid),
                "secondary_minor": snode_minor,
                "port": dev.logical_id[2],
            }
            # replace the secret present at the end of the ids with None
            output_logical_id = dev.logical_id[:-1] + (None, )

        dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
                                                  instance, dev)
        dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)

        if dev.children:
            dev_children = map(
                compat.partial(self._ComputeDiskStatusInner, instance,
                               snode_uuid, node_uuid2name_fn), dev.children)
        else:
            dev_children = []

        return {
            "iv_name": dev.iv_name,
            "dev_type": dev.dev_type,
            "logical_id": output_logical_id,
            "drbd_info": drbd_info,
            "pstatus": dev_pstatus,
            "sstatus": dev_sstatus,
            "children": dev_children,
            "mode": dev.mode,
            "size": dev.size,
            "spindles": dev.spindles,
            "name": dev.name,
            "uuid": dev.uuid,
        }
  def testFailsValidation(self):
    filename = utils.PathJoin(self.tmpdir, "qa.json")
    testconfig = {}

    check_fn = compat.partial(self._CheckLoadError, filename, testconfig)

    # No cluster name
    check_fn("Cluster name is required")

    testconfig["name"] = "cluster.example.com"

    # No nodes
    check_fn("Need at least one node")

    testconfig["nodes"] = [
      {
        "primary": "xen-test-0",
        "secondary": "192.0.2.1",
        },
      ]

    # No instances
    check_fn("Need at least one instance")

    testconfig["instances"] = [
      {
        "name": "xen-test-inst1",
        },
      ]

    # Missing "disk" and "disk-growth"
    check_fn("Config options 'disk' and 'disk-growth' ")

    testconfig["disk"] = []
    testconfig["disk-growth"] = testconfig["disk"]

    # Minimal accepted configuration
    self._WriteConfig(filename, testconfig)
    result = qa_config._QaConfig.Load(filename)
    self.assertTrue(result.get("nodes"))

    # Non-existent instance check script
    testconfig[qa_config._INSTANCE_CHECK_KEY] = \
      utils.PathJoin(self.tmpdir, "instcheck")
    check_fn("Can't find instance check script")
    del testconfig[qa_config._INSTANCE_CHECK_KEY]

    # No enabled hypervisor
    testconfig[qa_config._ENABLED_HV_KEY] = None
    check_fn("No hypervisor is enabled")

    # Unknown hypervisor
    testconfig[qa_config._ENABLED_HV_KEY] = ["#unknownhv#"]
    check_fn("Unknown hypervisor(s) enabled:")
Exemple #51
0
def WithDesc(text):
  """Builds wrapper class with description text.

  @type text: string
  @param text: Description text
  @return: Callable class

  """
  assert text[0] == text[0].upper()

  return compat.partial(_DescWrapper, text)
Exemple #52
0
def Comment(text):
  """Builds wrapper for adding comment to description text.

  @type text: string
  @param text: Comment text
  @return: Callable class

  """
  assert not frozenset(text).intersection("[]")

  return compat.partial(_CommentWrapper, text)
Exemple #53
0
def Comment(text):
    """Builds wrapper for adding comment to description text.

  @type text: string
  @param text: Comment text
  @return: Callable class

  """
    assert not frozenset(text).intersection("[]")

    return compat.partial(_CommentWrapper, text)
Exemple #54
0
def WithDesc(text):
    """Builds wrapper class with description text.

  @type text: string
  @param text: Description text
  @return: Callable class

  """
    assert text[0] == text[0].upper()

    return compat.partial(_DescWrapper, text)
Exemple #55
0
    def __init__(self):
        """Initializes this class.

    """
        # Pylint doesn't recognize multiple inheritance properly, see
        # <http://www.logilab.org/ticket/36586> and
        # <http://www.logilab.org/ticket/35642>
        # pylint: disable=W0233
        _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, True),
                                _ENCODERS.get)
        _generated_rpc.RpcClientBootstrap.__init__(self)
        _generated_rpc.RpcClientDnsOnly.__init__(self)
    def test(self):
        tmpfile = tempfile.NamedTemporaryFile()
        fd = tmpfile.fileno()

        for flags in [0, netdev.IFF_VNET_HDR]:
            fn = compat.partial(self._FakeTunFeatures, fd, flags)

            result = netdev._ProbeTapVnetHdr(fd, _features_fn=fn)
            if flags == 0:
                self.assertFalse(result)
            else:
                self.assertTrue(result)
Exemple #57
0
  def __init__(self):
    """Initializes this class.

    """
    # Pylint doesn't recognize multiple inheritance properly, see
    # <http://www.logilab.org/ticket/36586> and
    # <http://www.logilab.org/ticket/35642>
    # pylint: disable=W0233
    _RpcClientBase.__init__(self, compat.partial(_SsconfResolver, True),
                            _ENCODERS.get)
    _generated_rpc.RpcClientBootstrap.__init__(self)
    _generated_rpc.RpcClientDnsOnly.__init__(self)
  def test(self):
    tmpfile = tempfile.NamedTemporaryFile()
    fd = tmpfile.fileno()

    for flags in [0, netdev.IFF_VNET_HDR]:
      fn = compat.partial(self._FakeTunFeatures, fd, flags)

      result = netdev._ProbeTapVnetHdr(fd, _features_fn=fn)
      if flags == 0:
        self.assertFalse(result)
      else:
        self.assertTrue(result)
  def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn,
                              dev):
    """Compute block device status.

    @attention: The device has to be annotated already.

    """

    drbd_info = None
    if dev.dev_type in constants.LDS_DRBD:
      # we change the snode then (otherwise we use the one passed in)
      if dev.logical_id[0] == instance.primary_node:
        snode_uuid = dev.logical_id[1]
      else:
        snode_uuid = dev.logical_id[0]
      drbd_info = {
        "primary_node": node_uuid2name_fn(instance.primary_node),
        "primary_minor": dev.logical_id[3],
        "secondary_node": node_uuid2name_fn(snode_uuid),
        "secondary_minor": dev.logical_id[4],
        "port": dev.logical_id[2],
        "secret": dev.logical_id[5],
      }

    dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
                                              instance, dev)
    dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)

    if dev.children:
      dev_children = map(compat.partial(self._ComputeDiskStatusInner,
                                        instance, snode_uuid,
                                        node_uuid2name_fn),
                         dev.children)
    else:
      dev_children = []

    return {
      "access_type": dev.params[constants.LDP_ACCESS],
      "iv_name": dev.iv_name,
      "dev_type": dev.dev_type,
      "logical_id": dev.logical_id,
      "drbd_info": drbd_info,
      "physical_id": dev.physical_id,
      "pstatus": dev_pstatus,
      "sstatus": dev_sstatus,
      "children": dev_children,
      "mode": dev.mode,
      "size": dev.size,
      "spindles": dev.spindles,
      "name": dev.name,
      "uuid": dev.uuid,
      }
Exemple #60
0
def Daemonize(logfile):
  """Daemonize the current process.

  This detaches the current process from the controlling terminal and
  runs it in the background as a daemon.

  @type logfile: str
  @param logfile: the logfile to which we should redirect stdout/stderr
  @rtype: tuple; (int, callable)
  @return: File descriptor of pipe(2) which must be closed to notify parent
    process and a callable to reopen log files

  """
  # pylint: disable=W0212
  # yes, we really want os._exit

  # TODO: do another attempt to merge Daemonize and StartDaemon, or at
  # least abstract the pipe functionality between them

  # Create pipe for sending error messages
  (rpipe, wpipe) = os.pipe()

  # this might fail
  pid = os.fork()
  if (pid == 0):  # The first child.
    SetupDaemonEnv()

    # this might fail
    pid = os.fork() # Fork a second child.
    if (pid == 0):  # The second child.
      utils_wrapper.CloseFdNoError(rpipe)
    else:
      # exit() or _exit()?  See below.
      os._exit(0) # Exit parent (the first child) of the second child.
  else:
    utils_wrapper.CloseFdNoError(wpipe)
    # Wait for daemon to be started (or an error message to
    # arrive) and read up to 100 KB as an error message
    errormsg = utils_wrapper.RetryOnSignal(os.read, rpipe, 100 * 1024)
    if errormsg:
      sys.stderr.write("Error when starting daemon process: %r\n" % errormsg)
      rcode = 1
    else:
      rcode = 0
    os._exit(rcode) # Exit parent of the first child.

  reopen_fn = compat.partial(SetupDaemonFDs, logfile, None)

  # Open logs for the first time
  reopen_fn()

  return (wpipe, reopen_fn)