Beispiel #1
0
def ReadSsconfFile(filename):
    """Reads an ssconf file and verifies its size.

  @type filename: string
  @param filename: Path to file
  @rtype: string
  @return: File contents without newlines at the end
  @raise RuntimeError: When the file size exceeds L{_MAX_SIZE}

  """
    statcb = utils.FileStatHelper()

    data = utils.ReadFile(filename, size=_MAX_SIZE, preread=statcb)

    if statcb.st.st_size > _MAX_SIZE:
        msg = ("File '%s' has a size of %s bytes (up to %s allowed)" %
               (filename, statcb.st.st_size, _MAX_SIZE))
        raise RuntimeError(msg)

    return data.rstrip("\n")
Beispiel #2
0
def _PrepareFileUpload(getents_fn, node, filename):
    """Loads a file and prepares it for an upload to nodes.

  """
    statcb = utils.FileStatHelper()
    data = _Compress(node, utils.ReadFile(filename, preread=statcb))
    st = statcb.st

    if getents_fn is None:
        getents_fn = runtime.GetEnts

    getents = getents_fn()

    virt_filename = vcluster.MakeVirtualPath(filename)

    return [
        virt_filename, data, st.st_mode,
        getents.LookupUid(st.st_uid),
        getents.LookupGid(st.st_gid), st.st_atime, st.st_mtime
    ]
Beispiel #3
0
def _ReadNumericFile(file_name):
    """Reads a file containing a number.

  @rtype: None or int
  @return: None if file is not found, otherwise number

  """
    try:
        contents = utils.ReadFile(file_name)
    except EnvironmentError as err:
        if err.errno in (errno.ENOENT, ):
            return None
        raise

    try:
        return int(contents)
    except (ValueError, TypeError) as err:
        # Couldn't convert to int
        raise errors.JobQueueError("Content of file '%s' is not numeric: %s" %
                                   (file_name, err))
Beispiel #4
0
  def Load(self, filename):
    """Loads a file containing users and passwords.

    @type filename: string
    @param filename: Path to file

    """
    logging.info("Reading users file at %s", filename)
    try:
      try:
        contents = utils.ReadFile(filename)
      except EnvironmentError, err:
        self._users = None
        if err.errno == errno.ENOENT:
          logging.warning("No users file at %s", filename)
        else:
          logging.warning("Error while reading %s: %s", filename, err)
        return False

      users = ParsePasswordFile(contents)
 def _TestUpdate(self, failcmd):
     data = {
         constants.SSHS_SSH_HOST_KEY: [
             (constants.SSHK_DSA, "dsapriv", "dsapub"),
             (constants.SSHK_ECDSA, "ecdsapriv", "ecdsapub"),
             (constants.SSHK_RSA, "rsapriv", "rsapub"),
         ],
     }
     runcmd_fn = compat.partial(self._RunCmd, failcmd)
     if failcmd:
         self.assertRaises(_JoinError,
                           prepare_node_join.UpdateSshDaemon,
                           data,
                           False,
                           _runcmd_fn=runcmd_fn,
                           _keyfiles=self.keyfiles)
     else:
         prepare_node_join.UpdateSshDaemon(data,
                                           False,
                                           _runcmd_fn=runcmd_fn,
                                           _keyfiles=self.keyfiles)
     self.assertEqual(
         sorted(os.listdir(self.tmpdir)),
         sorted([
             "rsa.public",
             "rsa.private",
             "dsa.public",
             "dsa.private",
             "ecdsa.public",
             "ecdsa.private",
         ]))
     self.assertEqual(
         utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.public")),
         "rsapub")
     self.assertEqual(
         utils.ReadFile(utils.PathJoin(self.tmpdir, "rsa.private")),
         "rsapriv")
     self.assertEqual(
         utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.public")),
         "dsapub")
     self.assertEqual(
         utils.ReadFile(utils.PathJoin(self.tmpdir, "dsa.private")),
         "dsapriv")
     self.assertEqual(
         utils.ReadFile(utils.PathJoin(self.tmpdir, "ecdsa.public")),
         "ecdsapub")
     self.assertEqual(
         utils.ReadFile(utils.PathJoin(self.tmpdir, "ecdsa.private")),
         "ecdsapriv")
Beispiel #6
0
    def testInputWithCloseFds(self):
        testfile = testutils.TestDataFilename("cert1.pem")

        temp = open(self.fname, "r+")
        try:
            temp.write("test283523367")
            temp.seek(0)

            result = utils.RunCmd([
                "/bin/bash", "-c",
                ("cat && read -u %s; echo $REPLY" % temp.fileno())
            ],
                                  input_fd=open(testfile, "r"),
                                  noclose_fds=[temp.fileno()])
            self.assertFalse(result.failed)
            self.assertEqual(result.stdout.strip(),
                             utils.ReadFile(testfile) + "test283523367")
            self.assertEqual(result.stderr, "")
        finally:
            temp.close()
Beispiel #7
0
    def _GetCurrentCgroupSubsysGroups(cls):
        """Return the dict of cgroup subsystem hierarchies this process belongs to.

    The dictionary has the cgroup subsystem as a key and its hierarchy as a
    value.
    Information is read from /proc/self/cgroup.

    """
        try:
            cgroup_list = utils.ReadFile(cls._PROC_SELF_CGROUP_FILE)
        except EnvironmentError as err:
            raise HypervisorError("Failed to read %s : %s" %
                                  (cls._PROC_SELF_CGROUP_FILE, err))

        cgroups = {}
        for line in filter(None, cgroup_list.split("\n")):
            _, subsystems, hierarchy = line.split(":")
            for subsys in subsystems.split(","):
                cgroups[subsys] = hierarchy[1:]  # discard first '/'

        return cgroups
Beispiel #8
0
def _InitSSHSetup():
    """Setup the SSH configuration for the cluster.

  This generates a dsa keypair for root, adds the pub key to the
  permitted hosts and adds the hostkey to its own known hosts.

  """
    priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)

    for name in priv_key, pub_key:
        if os.path.exists(name):
            utils.CreateBackup(name)
        utils.RemoveFile(name)

    result = utils.RunCmd(
        ["ssh-keygen", "-t", "dsa", "-f", priv_key, "-q", "-N", ""])
    if result.failed:
        raise errors.OpExecError("Could not generate ssh keypair, error %s" %
                                 result.output)

    utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
Beispiel #9
0
  def GetUsermodeHelper(filename=_USERMODE_HELPER_FILE):
    """Returns DRBD usermode_helper currently set.

    @type filename: string
    @param filename: the filename to read the usermode helper from
    @rtype: string
    @return: the currently configured DRBD usermode helper

    """
    try:
      helper = utils.ReadFile(filename).splitlines()[0]
    except EnvironmentError as err:
      if err.errno == errno.ENOENT:
        base.ThrowError("The file %s cannot be opened, check if the module"
                        " is loaded (%s)", filename, str(err))
      else:
        base.ThrowError("Can't read DRBD helper file %s: %s",
                        filename, str(err))
    if not helper:
      base.ThrowError("Can't read any data from %s", filename)
    return helper
def SetupNodeDaemon(opts, cluster_name, node, ssh_port):
    """Add a node to the cluster.

  This function must be called before the actual opcode, and will ssh
  to the remote node, copy the needed files, and start ganeti-noded,
  allowing the master to do the rest via normal rpc calls.

  @param cluster_name: the cluster name
  @param node: the name of the new node
  @param ssh_port: the SSH port of the new node

  """
    data = {
        constants.NDS_CLUSTER_NAME:
        cluster_name,
        constants.NDS_NODE_DAEMON_CERTIFICATE:
        utils.ReadFile(pathutils.NODED_CERT_FILE),
        constants.NDS_SSCONF:
        ssconf.SimpleStore().ReadAll(),
        constants.NDS_START_NODE_DAEMON:
        True,
        constants.NDS_NODE_NAME:
        node,
    }

    ssh.RunSshCmdWithStdin(cluster_name,
                           node,
                           pathutils.NODE_DAEMON_SETUP,
                           ssh_port,
                           data,
                           debug=opts.debug,
                           verbose=opts.verbose,
                           use_cluster_key=True,
                           ask_key=opts.ssh_key_check,
                           strict_host_check=opts.ssh_key_check,
                           ensure_version=True)

    _WaitForSshDaemon(node, ssh_port)
    _WaitForNodeDaemon(node)
Beispiel #11
0
def _ReadInstanceStatus(filename):
    """Reads an instance status file.

  @type filename: string
  @param filename: Path to status file
  @rtype: tuple; (None or number, list of lists containing instance name and
    status)
  @return: File's mtime and instance status contained in the file; mtime is
    C{None} if file can't be read

  """
    logging.debug("Reading per-group instance status from '%s'", filename)

    statcb = utils.FileStatHelper()
    try:
        content = utils.ReadFile(filename, preread=statcb)
    except EnvironmentError, err:
        if err.errno == errno.ENOENT:
            logging.error("Can't read '%s', does not exist (yet)", filename)
        else:
            logging.exception("Unable to read '%s', ignoring", filename)
        return (None, None)
Beispiel #12
0
def _SetupSSH(options, cluster_name, node, ssh_port):
    """Configures a destination node's SSH daemon.

  @param options: Command line options
  @type cluster_name
  @param cluster_name: Cluster name
  @type node: string
  @param node: Destination node name
  @type ssh_port: int
  @param ssh_port: Destination node ssh port

  """
    if options.force_join:
        ToStderr(
            "The \"--force-join\" option is no longer supported and will be"
            " ignored.")

    host_keys = _ReadSshKeys(constants.SSH_DAEMON_KEYFILES)

    (_, root_keyfiles) = \
      ssh.GetAllUserFiles(constants.SSH_LOGIN_USER, mkdir=False, dircheck=False)

    root_keys = _ReadSshKeys(root_keyfiles)

    (_, cert_pem) = \
      utils.ExtractX509Certificate(utils.ReadFile(pathutils.NODED_CERT_FILE))

    data = {
        constants.SSHS_CLUSTER_NAME: cluster_name,
        constants.SSHS_NODE_DAEMON_CERTIFICATE: cert_pem,
        constants.SSHS_SSH_HOST_KEY: host_keys,
        constants.SSHS_SSH_ROOT_KEY: root_keys,
    }

    bootstrap.RunNodeSetupCmd(cluster_name, node, pathutils.PREPARE_NODE_JOIN,
                              options.debug, options.verbose, False,
                              options.ssh_key_check, options.ssh_key_check,
                              ssh_port, data)
Beispiel #13
0
def InitSSHSetup(key_type,
                 key_bits,
                 error_fn=errors.OpPrereqError,
                 _homedir_fn=None,
                 _suffix=""):
    """Setup the SSH configuration for the node.

  This generates a dsa keypair for root, adds the pub key to the
  permitted hosts and adds the hostkey to its own known hosts.

  @param key_type: the type of SSH keypair to be generated
  @param key_bits: the key length, in bits, to be used

  """
    priv_key, _, auth_keys = GetUserFiles(constants.SSH_LOGIN_USER,
                                          kind=key_type,
                                          mkdir=True,
                                          _homedir_fn=_homedir_fn)

    new_priv_key_name = priv_key + _suffix
    new_pub_key_name = priv_key + _suffix + ".pub"

    for name in new_priv_key_name, new_pub_key_name:
        if os.path.exists(name):
            utils.CreateBackup(name)
        utils.RemoveFile(name)

    result = utils.RunCmd([
        "ssh-keygen", "-b",
        str(key_bits), "-t", key_type, "-f", new_priv_key_name, "-q", "-N", ""
    ])
    if result.failed:
        raise error_fn("Could not generate ssh keypair, error %s" %
                       result.output)

    AddAuthorizedKey(auth_keys, utils.ReadFile(new_pub_key_name))
Beispiel #14
0
    def testSignedSslCertificate(self):
        server_cert_filename = os.path.join(self.tmpdir, "server.pem")
        utils.GenerateSelfSignedSslCert(server_cert_filename, 123456)

        client_hostname = "myhost.example.com"
        client_cert_filename = os.path.join(self.tmpdir, "client.pem")
        utils.GenerateSignedSslCert(client_cert_filename,
                                    666,
                                    server_cert_filename,
                                    common_name=client_hostname)

        client_cert_pem = utils.ReadFile(client_cert_filename)

        self._checkRsaPrivateKey(client_cert_pem)
        self._checkCertificate(client_cert_pem)

        priv_key = OpenSSL.crypto.load_privatekey(OpenSSL.crypto.FILETYPE_PEM,
                                                  client_cert_pem)
        client_cert = OpenSSL.crypto.load_certificate(
            OpenSSL.crypto.FILETYPE_PEM, client_cert_pem)

        self.assertTrue(self._checkKeyMatchesCert(priv_key, client_cert))
        self.assertEqual(client_cert.get_issuer().CN, "ganeti.example.com")
        self.assertEqual(client_cert.get_subject().CN, client_hostname)
    def testConsole(self):
        temp_file = tempfile.NamedTemporaryFile(mode="w", encoding="utf-8")
        failing_file = self._FailingFile(os.devnull, "w")
        for (console, check) in [(None, False), (temp_file, True),
                                 (failing_file, False)]:
            # Create a handler which will fail when handling errors
            cls = utils.log._LogErrorsToConsole(self._FailingHandler)

            # Instantiate handler with file which will fail when writing,
            # provoking a write to the console
            failing_output = self._FailingFile(os.devnull)
            handler = cls(console, failing_output)

            logger = logging.Logger("TestLogger")
            logger.addHandler(handler)
            self.assertEqual(len(logger.handlers), 1)

            # Provoke write
            logger.error("Test message ERROR")

            # Take everything apart
            logger.removeHandler(handler)
            self.assertFalse(logger.handlers)
            handler.close()
            failing_output.close()

            if console and check:
                console.flush()

                # Check console output
                consout = utils.ReadFile(console.name)
                self.assertTrue("Cannot log message" in consout)
                self.assertTrue("Test message ERROR" in consout)

        temp_file.close()
        failing_file.close()
    def testUpgradeSave(self):
        """Test that any modification done during upgrading is saved back"""
        cfg = self._get_object()

        # Remove an element, run upgrade, and check if the element is
        # back and the file upgraded
        node = cfg.GetNodeInfo(cfg.GetNodeList()[0])
        # For a ConfigObject, None is the same as a missing field
        node.ndparams = None
        oldsaved = utils.ReadFile(self.cfg_file)
        cfg._UpgradeConfig(saveafter=True)
        self.assertTrue(node.ndparams is not None)
        newsaved = utils.ReadFile(self.cfg_file)
        # We rely on the fact that at least the serial number changes
        self.assertNotEqual(oldsaved, newsaved)

        # Add something that should not be there this time
        key = list(constants.NDC_GLOBALS)[0]
        node.ndparams[key] = constants.NDC_DEFAULTS[key]
        cfg._WriteConfig(None)
        oldsaved = utils.ReadFile(self.cfg_file)
        cfg._UpgradeConfig(saveafter=True)
        self.assertTrue(node.ndparams.get(key) is None)
        newsaved = utils.ReadFile(self.cfg_file)
        self.assertNotEqual(oldsaved, newsaved)

        # Do the upgrade again, this time there should be no update
        oldsaved = newsaved
        cfg._UpgradeConfig(saveafter=True)
        newsaved = utils.ReadFile(self.cfg_file)
        self.assertEqual(oldsaved, newsaved)

        # Reload the configuration again: it shouldn't change the file
        oldsaved = newsaved
        self._get_object()
        newsaved = utils.ReadFile(self.cfg_file)
        self.assertEqual(oldsaved, newsaved)
Beispiel #17
0
def _SetupSSH(options, cluster_name, node, ssh_port, cl):
    """Configures a destination node's SSH daemon.

  @param options: Command line options
  @type cluster_name
  @param cluster_name: Cluster name
  @type node: string
  @param node: Destination node name
  @type ssh_port: int
  @param ssh_port: Destination node ssh port
  @param cl: luxi client

  """
    # Retrieve the list of master and master candidates
    candidate_filter = ["|", ["=", "role", "M"], ["=", "role", "C"]]
    result = cl.Query(constants.QR_NODE, ["uuid"], candidate_filter)
    if len(result.data) < 1:
        raise errors.OpPrereqError(
            "No master or master candidate node is found.")
    candidates = [uuid for ((_, uuid), ) in result.data]
    candidate_keys = ssh.QueryPubKeyFile(candidates)

    if options.force_join:
        ToStderr(
            "The \"--force-join\" option is no longer supported and will be"
            " ignored.")

    host_keys = _ReadSshKeys(constants.SSH_DAEMON_KEYFILES)

    (_, root_keyfiles) = \
      ssh.GetAllUserFiles(constants.SSH_LOGIN_USER, mkdir=False, dircheck=False)

    dsa_root_keyfiles = dict((kind, value)
                             for (kind, value) in root_keyfiles.items()
                             if kind == constants.SSHK_DSA)
    root_keys = _ReadSshKeys(dsa_root_keyfiles)

    (_, cert_pem) = \
      utils.ExtractX509Certificate(utils.ReadFile(pathutils.NODED_CERT_FILE))

    data = {
        constants.SSHS_CLUSTER_NAME: cluster_name,
        constants.SSHS_NODE_DAEMON_CERTIFICATE: cert_pem,
        constants.SSHS_SSH_HOST_KEY: host_keys,
        constants.SSHS_SSH_ROOT_KEY: root_keys,
        constants.SSHS_SSH_AUTHORIZED_KEYS: candidate_keys,
    }

    ssh.RunSshCmdWithStdin(cluster_name,
                           node,
                           pathutils.PREPARE_NODE_JOIN,
                           ssh_port,
                           data,
                           debug=options.debug,
                           verbose=options.verbose,
                           use_cluster_key=False,
                           ask_key=options.ssh_key_check,
                           strict_host_check=options.ssh_key_check)

    (_, dsa_pub_keyfile) = root_keyfiles[constants.SSHK_DSA]
    pub_key = ssh.ReadRemoteSshPubKeys(dsa_pub_keyfile, node, cluster_name,
                                       ssh_port, options.ssh_key_check,
                                       options.ssh_key_check)
    # Unfortunately, we have to add the key with the node name rather than
    # the node's UUID here, because at this point, we do not have a UUID yet.
    # The entry will be corrected in noded later.
    ssh.AddPublicKey(node, pub_key)
        if template == constants.DT_DRBD8 and vg_name is not None:
            # The default METAVG value is equal to the VG name set at init time,
            # if provided
            dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name

    try:
        utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
    except errors.OpPrereqError, err:
        raise errors.OpPrereqError("While verify diskparam options: %s" % err,
                                   errors.ECODE_INVAL)

    # set up ssh config and /etc/hosts
    rsa_sshkey = ""
    dsa_sshkey = ""
    if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
        sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
        rsa_sshkey = sshline.split(" ")[1]
    if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
        sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
        dsa_sshkey = sshline.split(" ")[1]
    if not rsa_sshkey and not dsa_sshkey:
        raise errors.OpPrereqError("Failed to find SSH public keys",
                                   errors.ECODE_ENVIRON)

    if modify_etc_hosts:
        utils.AddHostToEtcHosts(hostname.name, hostname.ip)

    if modify_ssh_setup:
        ssh.InitSSHSetup()

    if default_iallocator is not None:
Beispiel #19
0
    def Run(self):
        """Main program.

    """
        self._ComposePaths()

        self.SetupLogging()

        # Option checking
        if self.args:
            raise Error("No arguments expected")
        if self.opts.downgrade and not self.opts.no_verify:
            self.opts.no_verify = True

        # Check master name
        if not (self.CheckHostname(self.opts.SSCONF_MASTER_NODE)
                or self.opts.ignore_hostname):
            logging.error("Aborting due to hostname mismatch")
            sys.exit(constants.EXIT_FAILURE)

        self._AskUser()

        # Check whether it's a Ganeti configuration directory
        if not (os.path.isfile(self.opts.CONFIG_DATA_PATH)
                and os.path.isfile(self.opts.SERVER_PEM_PATH)
                and os.path.isfile(self.opts.KNOWN_HOSTS_PATH)):
            raise Error(("%s does not seem to be a Ganeti configuration"
                         " directory") % self.opts.data_dir)

        if not os.path.isdir(self.opts.conf_dir):
            raise Error("Not a directory: %s" % self.opts.conf_dir)

        self.config_data = serializer.LoadJson(
            utils.ReadFile(self.opts.CONFIG_DATA_PATH))

        try:
            config_version = self.config_data["version"]
        except KeyError:
            raise Error("Unable to determine configuration version")

        (config_major, config_minor, config_revision) = \
          version.SplitVersion(config_version)

        logging.info("Found configuration version %s (%d.%d.%d)",
                     config_version, config_major, config_minor,
                     config_revision)

        if "config_version" in self.config_data["cluster"]:
            raise Error("Inconsistent configuration: found config_version in"
                        " configuration file")

        # Downgrade to the previous stable version
        if self.opts.downgrade:
            self._Downgrade(config_major, config_minor, config_version,
                            config_revision)

        # Upgrade from 2.0-2.16 to 3.0
        # TODO: handle upgrades from 2.17beta
        elif config_major == 2 and config_minor in range(0, LAST_V2_MINOR + 1):
            if config_revision != 0:
                logging.warning("Config revision is %s, not 0",
                                config_revision)
            if not self.UpgradeAll():
                raise Error("Upgrade failed:\n%s" % '\n'.join(self.errors))

        elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
            logging.info("No changes necessary")

        else:
            raise Error(
                "Configuration version %d.%d.%d not supported by this tool" %
                (config_major, config_minor, config_revision))

        try:
            logging.info("Writing configuration file to %s",
                         self.opts.CONFIG_DATA_PATH)
            utils.WriteFile(file_name=self.opts.CONFIG_DATA_PATH,
                            data=serializer.DumpJson(self.config_data),
                            mode=0600,
                            dry_run=self.opts.dry_run,
                            backup=True)

            if not self.opts.dry_run:
                # This creates the cluster certificate if it does not exist yet.
                # In this case, we do not automatically create a client certificate
                # as well, because if the cluster certificate did not exist before,
                # no client certificate will exist on any node yet. In this case
                # all client certificate should be renewed by 'gnt-cluster
                # renew-crypto --new-node-certificates'. This will be enforced
                # by a nagging warning in 'gnt-cluster verify'.
                bootstrap.GenerateClusterCrypto(
                    False,
                    False,
                    False,
                    False,
                    False,
                    False,
                    None,
                    nodecert_file=self.opts.SERVER_PEM_PATH,
                    rapicert_file=self.opts.RAPI_CERT_FILE,
                    spicecert_file=self.opts.SPICE_CERT_FILE,
                    spicecacert_file=self.opts.SPICE_CACERT_FILE,
                    hmackey_file=self.opts.CONFD_HMAC_KEY,
                    cds_file=self.opts.CDS_FILE)

        except Exception:
            logging.critical(
                "Writing configuration failed. It is probably in an"
                " inconsistent state and needs manual intervention.")
            raise

        self._TestLoadingConfigFile()
Beispiel #20
0
 def _ReadManFile(name):
     return utils.ReadFile("%s/man/%s.rst" %
                           (testutils.GetSourceDir(), name))
Beispiel #21
0
def _ReadDocFile(filename):
    return utils.ReadFile("%s/doc/%s" % (testutils.GetSourceDir(), filename))
 def _CheckFile():
   if not (os.path.isfile(path) and
           utils.ReadFile(path).strip() == expected):
     raise utils.RetryAgain()
Beispiel #23
0
 def _LoadConfig(self):
   return serializer.LoadJson(utils.ReadFile(self.config_path))
 def testWriteSimpleUnicode(self):
     data = u"abc"
     utils.WriteFile(self.tfile.name, data=data)
     self.assertEqual(utils.ReadFile(self.tfile.name), data)
 def testDryRun(self):
     orig = "abc"
     self.tfile.write(orig)
     self.tfile.flush()
     utils.WriteFile(self.tfile.name, data="hello", dry_run=True)
     self.assertEqual(utils.ReadFile(self.tfile.name), orig)
 def testWrite(self):
     data = "abc"
     utils.WriteFile(self.tfile.name, data=data)
     self.assertEqual(utils.ReadFile(self.tfile.name), data)
Beispiel #27
0
def ListDrbd(opts, args):
  """Modifies a node.

  @param opts: the command line options selected by the user
  @type args: list
  @param args: should contain only one element, the node name
  @rtype: int
  @return: the desired exit code

  """
  if len(args) != 1:
    ToStderr("Please give one (and only one) node.")
    return constants.EXIT_FAILURE

  status = ReplyStatus()

  def ListDrbdConfdCallback(reply):
    """Callback for confd queries"""
    if reply.type == confd_client.UPCALL_REPLY:
      answer = reply.server_reply.answer
      reqtype = reply.orig_request.type
      if reqtype == constants.CONFD_REQ_NODE_DRBD:
        if reply.server_reply.status != constants.CONFD_REPL_STATUS_OK:
          ToStderr("Query gave non-ok status '%s': %s" %
                   (reply.server_reply.status,
                    reply.server_reply.answer))
          status.failure = True
          return
        if not confd.HTNodeDrbd(answer):
          ToStderr("Invalid response from server: expected %s, got %s",
                   confd.HTNodeDrbd, answer)
          status.failure = True
        else:
          status.failure = False
          status.answer = answer
      else:
        ToStderr("Unexpected reply %s!?", reqtype)
        status.failure = True

  node = args[0]
  hmac = utils.ReadFile(pathutils.CONFD_HMAC_KEY)
  filter_callback = confd_client.ConfdFilterCallback(ListDrbdConfdCallback)
  counting_callback = confd_client.ConfdCountingCallback(filter_callback)
  cf_client = confd_client.ConfdClient(hmac, [constants.IP4_ADDRESS_LOCALHOST],
                                       counting_callback)
  req = confd_client.ConfdClientRequest(type=constants.CONFD_REQ_NODE_DRBD,
                                        query=node)

  def DoConfdRequestReply(req):
    counting_callback.RegisterQuery(req.rsalt)
    cf_client.SendRequest(req, async=False)
    while not counting_callback.AllAnswered():
      if not cf_client.ReceiveReply():
        ToStderr("Did not receive all expected confd replies")
        break

  DoConfdRequestReply(req)

  if status.failure:
    return constants.EXIT_FAILURE

  fields = ["node", "minor", "instance", "disk", "role", "peer"]
  if opts.no_headers:
    headers = None
  else:
    headers = {"node": "Node", "minor": "Minor", "instance": "Instance",
               "disk": "Disk", "role": "Role", "peer": "PeerNode"}

  data = GenerateTable(separator=opts.separator, headers=headers,
                       fields=fields, data=sorted(status.answer),
                       numfields=["minor"])
  for line in data:
    ToStdout(line)

  return constants.EXIT_SUCCESS
 def testSuccessfulCheck(self):
     cert_filename = testutils.TestDataFilename("cert1.pem")
     cert_pem = utils.ReadFile(cert_filename)
     prepare_node_join._VerifyCertificate(cert_pem, _check_fn=self._Check)
Beispiel #29
0
 def testCallback(self):
   def _Cb(fh):
     self.assertEqual(fh.tell(), 0)
   data = utils.ReadFile(testutils.TestDataFilename("cert1.pem"), preread=_Cb)
   self.assertEqual(len(data), 814)
Beispiel #30
0
    for (filename, required) in es_files.items():
        try:
            # Here we actually fill the dict with the ablsolute path name for each
            # script or None, depending on the corresponding checks. See the
            # function's docstrings for more on these checks.
            es_files[filename] = _CheckExtStorageFile(es_dir, filename,
                                                      required)
        except errors.BlockDeviceError, err:
            return False, str(err)

    parameters = []
    if constants.ES_PARAMETERS_FILE in es_files:
        parameters_file = es_files[constants.ES_PARAMETERS_FILE]
        try:
            parameters = utils.ReadFile(parameters_file).splitlines()
        except EnvironmentError, err:
            return False, (
                "Error while reading the EXT parameters file at %s: %s" %
                (parameters_file, utils.ErrnoOrStr(err)))
        parameters = [v.split(None, 1) for v in parameters]

    es_obj = \
      objects.ExtStorage(name=name, path=es_dir,
                         create_script=es_files[constants.ES_SCRIPT_CREATE],
                         remove_script=es_files[constants.ES_SCRIPT_REMOVE],
                         grow_script=es_files[constants.ES_SCRIPT_GROW],
                         attach_script=es_files[constants.ES_SCRIPT_ATTACH],
                         detach_script=es_files[constants.ES_SCRIPT_DETACH],
                         setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
                         verify_script=es_files[constants.ES_SCRIPT_VERIFY],