Ejemplo n.º 1
0
def config_show(argv):
    print("Cluster Name: %s" % utils.getClusterName())
    status.nodes_status(["config"])
    print()
    config_show_cib()
    if (
        utils.hasCorosyncConf()
        and
        (
            utils.is_rhel6()
            or
            (not utils.usefile and "--corosync_conf" not in utils.pcs_options)
        )
    ):
        # with corosync 1 and cman, uid gid is part of cluster.conf file
        # with corosync 2, uid gid is in a separate directory
        cluster.cluster_uidgid([], True)
    if (
        "--corosync_conf" in utils.pcs_options
        or
        (not utils.is_rhel6() and utils.hasCorosyncConf())
    ):
        print()
        print("Quorum:")
        try:
            config = lib_quorum.get_config(utils.get_lib_env())
            print("\n".join(indent(quorum.quorum_config_to_str(config))))
        except LibraryError as e:
            utils.process_library_reports(e.args)
Ejemplo n.º 2
0
def config_show(argv):
    print("Cluster Name: %s" % utils.getClusterName())
    status.nodes_status(["config"])
    print()
    config_show_cib()
    if (
        utils.hasCorosyncConf()
        and
        (
            utils.is_rhel6()
            or
            (not utils.usefile and "--corosync_conf" not in utils.pcs_options)
        )
    ):
        # with corosync 1 and cman, uid gid is part of cluster.conf file
        # with corosync 2, uid gid is in a separate directory
        cluster.cluster_uidgid([], True)
    if (
        "--corosync_conf" in utils.pcs_options
        or
        (not utils.is_rhel6() and utils.hasCorosyncConf())
    ):
        print()
        print("Quorum:")
        try:
            config = lib_quorum.get_config(utils.get_lib_env())
            print("\n".join(indent(quorum.quorum_config_to_str(config))))
        except LibraryError as e:
            utils.process_library_reports(e.args)
Ejemplo n.º 3
0
def check_nodes(node_list, prefix=""):
    """
    Print pcsd status on node_list, return if there is any pcsd not online
    """
    if not utils.is_rhel6():
        pm_nodes = utils.getPacemakerNodesID(allow_failure=True)
        cs_nodes = utils.getCorosyncNodesID(allow_failure=True)

    STATUS_ONLINE = 0
    status_desc_map = {
        STATUS_ONLINE: 'Online',
        3: 'Unable to authenticate'
    }
    status_list = []
    def report(node, returncode, output):
        print("{0}{1}: {2}".format(
            prefix,
            node if utils.is_rhel6() else utils.prepare_node_name(
                node, pm_nodes, cs_nodes
            ),
            status_desc_map.get(returncode, 'Offline')
        ))
        status_list.append(returncode)

    utils.run_parallel(
        utils.create_task_list(report, utils.checkAuthorization, node_list)
    )

    return any([status != STATUS_ONLINE for status in status_list])
Ejemplo n.º 4
0
def full_status():
    if "--hide-inactive" in utils.pcs_options and "--full" in utils.pcs_options:
        utils.err("you cannot specify both --hide-inactive and --full")

    monitor_command = ["crm_mon", "--one-shot"]
    if "--hide-inactive" not in utils.pcs_options:
        monitor_command.append('--inactive')
    if "--full" in utils.pcs_options:
        monitor_command.extend(
            ["--show-detail", "--show-node-attributes", "--failcounts"])

    output, retval = utils.run(monitor_command)

    if (retval != 0):
        utils.err("cluster is not currently running on this node")

    if not utils.usefile or "--corosync_conf" in utils.pcs_options:
        cluster_name = utils.getClusterName()
        print("Cluster name: %s" % cluster_name)

    status_stonith_check()

    if (not utils.usefile and not utils.is_rhel6()
            and utils.corosyncPacemakerNodeCheck()):
        print(
            "WARNING: corosync and pacemaker node names do not match (IPs used in setup?)"
        )

    print(output)

    if not utils.usefile:
        if "--full" in utils.pcs_options and utils.hasCorosyncConf():
            print_pcsd_daemon_status()
            print()
        utils.serviceStatus("  ")
Ejemplo n.º 5
0
def full_status():
    if "--hide-inactive" in utils.pcs_options and "--full" in utils.pcs_options:
        utils.err("you cannot specify both --hide-inactive and --full")

    monitor_command = ["crm_mon", "--one-shot"]
    if "--hide-inactive" not in utils.pcs_options:
        monitor_command.append('--inactive')
    if "--full" in utils.pcs_options:
        monitor_command.extend(
            ["--show-detail", "--show-node-attributes", "--failcounts"]
        )

    output, retval = utils.run(monitor_command)

    if (retval != 0):
        utils.err("cluster is not currently running on this node")

    if not utils.usefile or "--corosync_conf" in utils.pcs_options:
        cluster_name = utils.getClusterName()
        print("Cluster name: %s" % cluster_name)

    if utils.stonithCheck():
        print("WARNING: no stonith devices and stonith-enabled is not false")

    if not utils.is_rhel6() and utils.corosyncPacemakerNodeCheck():
        print("WARNING: corosync and pacemaker node names do not match (IPs used in setup?)")

    print(output)

    if not utils.usefile:
        if  "--full" in utils.pcs_options:
            print_pcsd_daemon_status()
            print()
        utils.serviceStatus("  ")
Ejemplo n.º 6
0
    def test_node_add_rhel6_missing_fence_pcmk(self):
        if not utils.is_rhel6():
            return

        # chnage the fence device name to a value different that set by pcs
        with open(cluster_conf_file, "r") as f:
            data = f.read()
        data = data.replace('agent="fence_pcmk"', 'agent="fence_whatever"')
        with open(cluster_conf_tmp, "w") as f:
            f.write(data)

        # test a node is added correctly and uses the fence device
        output, returnVal = pcs(
            temp_cib,
            "cluster localnode add --cluster_conf={0} rh7-3.localhost"
            .format(cluster_conf_tmp)
        )
        ac(output, "rh7-3.localhost: successfully added!\n")
        self.assertEqual(returnVal, 0)
        with open(cluster_conf_tmp) as f:
            data = f.read()
            ac(data, """\
<cluster config_version="14" name="test99">
  <fence_daemon/>
  <clusternodes>
    <clusternode name="rh7-1" nodeid="1">
      <fence>
        <method name="pcmk-method">
          <device name="pcmk-redirect" port="rh7-1"/>
        </method>
      </fence>
    </clusternode>
    <clusternode name="rh7-2" nodeid="2">
      <fence>
        <method name="pcmk-method">
          <device name="pcmk-redirect" port="rh7-2"/>
        </method>
      </fence>
    </clusternode>
    <clusternode name="rh7-3.localhost" nodeid="3">
      <fence>
        <method name="pcmk-method">
          <device name="pcmk-redirect-1" port="rh7-3.localhost"/>
        </method>
      </fence>
    </clusternode>
  </clusternodes>
  <cman broadcast="no" transport="udpu"/>
  <fencedevices>
    <fencedevice agent="fence_whatever" name="pcmk-redirect"/>
    <fencedevice agent="fence_pcmk" name="pcmk-redirect-1"/>
  </fencedevices>
  <rm>
    <failoverdomains/>
    <resources/>
  </rm>
</cluster>
""")
Ejemplo n.º 7
0
 def report(node, returncode, output):
     print("{0}{1}: {2}".format(
         prefix,
         node if utils.is_rhel6() else utils.prepare_node_name(
             node, pm_nodes, cs_nodes
         ),
         status_desc_map.get(returncode, 'Offline')
     ))
     status_list.append(returncode)
Ejemplo n.º 8
0
Archivo: quorum.py Proyecto: idevat/pcs
def quorum_unblock_cmd(argv):
    if len(argv) > 0:
        usage.quorum(["unblock"])
        sys.exit(1)

    if utils.is_rhel6():
        utils.err("operation is not supported on CMAN clusters")

    output, retval = utils.run(
        ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"]
    )
    if retval != 0:
        utils.err("unable to check quorum status")
    if output.split("=")[-1].strip() != "1":
        utils.err("cluster is not waiting for nodes to establish quorum")

    unjoined_nodes = (
        set(utils.getNodesFromCorosyncConf())
        -
        set(utils.getCorosyncActiveNodes())
    )
    if not unjoined_nodes:
        utils.err("no unjoined nodes found")
    if "--force" not in utils.pcs_options:
        answer = utils.get_terminal_input(
            (
                "WARNING: If node(s) {nodes} are not powered off or they do"
                + " have access to shared resources, data corruption and/or"
                + " cluster failure may occur. Are you sure you want to"
                + " continue? [y/N] "
            ).format(nodes=", ".join(unjoined_nodes))
        )
        if answer.lower() not in ["y", "yes"]:
            print("Canceled")
            return
    for node in unjoined_nodes:
        stonith.stonith_confirm([node], skip_question=True)

    output, retval = utils.run(
        ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"]
    )
    if retval != 0:
        utils.err("unable to cancel waiting for nodes")
    print("Quorum unblocked")

    startup_fencing = utils.get_set_properties().get("startup-fencing", "")
    utils.set_cib_property(
        "startup-fencing",
        "false" if startup_fencing.lower() != "false" else "true"
    )
    utils.set_cib_property("startup-fencing", startup_fencing)
    print("Waiting for nodes canceled")
Ejemplo n.º 9
0
def quorum_unblock_cmd(argv):
    if len(argv) > 0:
        usage.quorum(["unblock"])
        sys.exit(1)

    if utils.is_rhel6():
        utils.err("operation is not supported on CMAN clusters")

    output, retval = utils.run(
        ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"]
    )
    if retval != 0:
        utils.err("unable to check quorum status")
    if output.split("=")[-1].strip() != "1":
        utils.err("cluster is not waiting for nodes to establish quorum")

    unjoined_nodes = (
        set(utils.getNodesFromCorosyncConf())
        -
        set(utils.getCorosyncActiveNodes())
    )
    if not unjoined_nodes:
        utils.err("no unjoined nodes found")
    if "--force" not in utils.pcs_options:
        answer = utils.get_terminal_input(
            (
                "WARNING: If node(s) {nodes} are not powered off or they do"
                + " have access to shared resources, data corruption and/or"
                + " cluster failure may occur. Are you sure you want to"
                + " continue? [y/N] "
            ).format(nodes=", ".join(unjoined_nodes))
        )
        if answer.lower() not in ["y", "yes"]:
            print("Canceled")
            return
    for node in unjoined_nodes:
        stonith.stonith_confirm([node], skip_question=True)

    output, retval = utils.run(
        ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"]
    )
    if retval != 0:
        utils.err("unable to cancel waiting for nodes")
    print("Quorum unblocked")

    startup_fencing = utils.get_set_properties().get("startup-fencing", "")
    utils.set_cib_property(
        "startup-fencing",
        "false" if startup_fencing.lower() != "false" else "true"
    )
    utils.set_cib_property("startup-fencing", startup_fencing)
    print("Waiting for nodes canceled")
Ejemplo n.º 10
0
    def test_stonith_create_provides_unfencing_rhel6(self):
        if not utils.is_rhel6():
            return

        output, returnVal = pcs(
            temp_cib,
            "stonith create f1 fence_mpath key=abc"
        )
        ac(output, "")
        self.assertEqual(0, returnVal)

        output, returnVal = pcs(
            temp_cib,
            "stonith create f2 fence_mpath key=abc meta provides=unfencing"
        )
        ac(output, "")
        self.assertEqual(0, returnVal)

        output, returnVal = pcs(
            temp_cib,
            "stonith create f3 fence_mpath key=abc meta provides=something"
        )
        ac(output, "")
        self.assertEqual(0, returnVal)

        output, returnVal = pcs(
            temp_cib,
            "stonith create f4 fence_xvm meta provides=something"
        )
        ac(output, "")
        self.assertEqual(0, returnVal)

        output, returnVal = pcs(temp_cib, "stonith show --full")
        ac(output, """\
 Resource: f1 (class=stonith type=fence_mpath)
  Attributes: key=abc
  Meta Attrs: provides=unfencing 
  Operations: monitor interval=60s (f1-monitor-interval-60s)
 Resource: f2 (class=stonith type=fence_mpath)
  Attributes: key=abc
  Meta Attrs: provides=unfencing 
  Operations: monitor interval=60s (f2-monitor-interval-60s)
 Resource: f3 (class=stonith type=fence_mpath)
  Attributes: key=abc
  Meta Attrs: provides=unfencing 
  Operations: monitor interval=60s (f3-monitor-interval-60s)
 Resource: f4 (class=stonith type=fence_xvm)
  Meta Attrs: provides=something 
  Operations: monitor interval=60s (f4-monitor-interval-60s)
""")
        self.assertEqual(0, returnVal)
Ejemplo n.º 11
0
    def test_stonith_create_provides_unfencing_rhel6(self):
        if not utils.is_rhel6():
            return

        output, returnVal = pcs(
            temp_cib,
            "stonith create f1 fence_mpath key=abc"
        )
        ac(output, "")
        self.assertEqual(0, returnVal)

        output, returnVal = pcs(
            temp_cib,
            "stonith create f2 fence_mpath key=abc meta provides=unfencing"
        )
        ac(output, "")
        self.assertEqual(0, returnVal)

        output, returnVal = pcs(
            temp_cib,
            "stonith create f3 fence_mpath key=abc meta provides=something"
        )
        ac(output, "")
        self.assertEqual(0, returnVal)

        output, returnVal = pcs(
            temp_cib,
            "stonith create f4 fence_xvm meta provides=something"
        )
        ac(output, "")
        self.assertEqual(0, returnVal)

        output, returnVal = pcs(temp_cib, "stonith show --full")
        ac(output, """\
 Resource: f1 (class=stonith type=fence_mpath)
  Attributes: key=abc
  Meta Attrs: provides=unfencing 
  Operations: monitor interval=60s (f1-monitor-interval-60s)
 Resource: f2 (class=stonith type=fence_mpath)
  Attributes: key=abc
  Meta Attrs: provides=unfencing 
  Operations: monitor interval=60s (f2-monitor-interval-60s)
 Resource: f3 (class=stonith type=fence_mpath)
  Attributes: key=abc
  Meta Attrs: provides=unfencing 
  Operations: monitor interval=60s (f3-monitor-interval-60s)
 Resource: f4 (class=stonith type=fence_xvm)
  Meta Attrs: provides=something 
  Operations: monitor interval=60s (f4-monitor-interval-60s)
""")
        self.assertEqual(0, returnVal)
Ejemplo n.º 12
0
def cluster_pcsd_status(argv, dont_exit=False):
    bad_nodes = False
    if len(argv) == 0:
        nodes = utils.getNodesFromCorosyncConf()
        if len(nodes) == 0:
            if utils.is_rhel6():
                utils.err("no nodes found in cluster.conf")
            else:
                utils.err("no nodes found in corosync.conf")
        bad_nodes = check_nodes(nodes, "  ")
    else:
        bad_nodes = check_nodes(argv, "  ")
    if bad_nodes and not dont_exit:
        sys.exit(2)
Ejemplo n.º 13
0
def cluster_pcsd_status(argv, dont_exit=False):
    bad_nodes = False
    if len(argv) == 0:
        nodes = utils.getNodesFromCorosyncConf()
        if len(nodes) == 0:
            if utils.is_rhel6():
                utils.err("no nodes found in cluster.conf")
            else:
                utils.err("no nodes found in corosync.conf")
        bad_nodes = check_nodes(nodes, "  ")
    else:
        bad_nodes = check_nodes(argv, "  ")
    if bad_nodes and not dont_exit:
        sys.exit(2)
Ejemplo n.º 14
0
def config_show(argv):
    print("Cluster Name: %s" % utils.getClusterName())
    status.nodes_status(["config"])
    print()
    config_show_cib()
    cluster.cluster_uidgid([], True)
    if "--corosync_conf" in utils.pcs_options or not utils.is_rhel6():
        print()
        print("Quorum:")
        try:
            config = lib_quorum.get_config(utils.get_lib_env())
            print("\n".join(indent(quorum.quorum_config_to_str(config))))
        except LibraryError as e:
            utils.process_library_reports(e.args)
Ejemplo n.º 15
0
def cluster_get_corosync_conf(argv):
    if utils.is_rhel6():
        utils.err("corosync.conf is not supported on CMAN clusters")

    if len(argv) > 1:
        usage.cluster()
        exit(1)

    if len(argv) == 0:
        print(utils.getCorosyncConf().rstrip())
        return

    node = argv[0]
    retval, output = utils.getCorosyncConfig(node)
    if retval != 0:
        utils.err(output)
    else:
        print(output.rstrip())
Ejemplo n.º 16
0
def config_restore_remote(infile_name, infile_obj):
    extracted = {
        "version.txt": "",
        "corosync.conf": "",
        "cluster.conf": "",
    }
    try:
        tarball = tarfile.open(infile_name, "r|*", infile_obj)
        while True:
            # next(tarball) does not work in python2.6
            tar_member_info = tarball.next()
            if tar_member_info is None:
                break
            if tar_member_info.name in extracted:
                tar_member = tarball.extractfile(tar_member_info)
                extracted[tar_member_info.name] = tar_member.read()
                tar_member.close()
        tarball.close()
    except (tarfile.TarError, EnvironmentError) as e:
        utils.err("unable to read the tarball: %s" % e)

    config_backup_check_version(extracted["version.txt"])

    node_list = utils.getNodesFromCorosyncConf(
        extracted["cluster.conf" if utils.is_rhel6() else "corosync.conf"].decode("utf-8")
    )
    if not node_list:
        utils.err("no nodes found in the tarball")

    err_msgs = []
    for node in node_list:
        try:
            retval, output = utils.checkStatus(node)
            if retval != 0:
                err_msgs.append(output)
                continue
            status = json.loads(output)
            if (
                status["corosync"]
                or
                status["pacemaker"]
                or
                status["cman"]
                or
                # not supported by older pcsd, do not fail if not present
                status.get("pacemaker_remote", False)
            ):
                err_msgs.append(
                    "Cluster is currently running on node %s. You need to stop "
                        "the cluster in order to restore the configuration."
                    % node
                )
                continue
        except (ValueError, NameError, LookupError):
            err_msgs.append("unable to determine status of the node %s" % node)
    if err_msgs:
        for msg in err_msgs:
            utils.err(msg, False)
        sys.exit(1)

    # Temporarily disable config files syncing thread in pcsd so it will not
    # rewrite restored files. 10 minutes should be enough time to restore.
    # If node returns HTTP 404 it does not support config syncing at all.
    for node in node_list:
        retval, output = utils.pauseConfigSyncing(node, 10 * 60)
        if not (retval == 0 or "(HTTP error: 404)" in output):
            utils.err(output)

    if infile_obj:
        infile_obj.seek(0)
        tarball_data = infile_obj.read()
    else:
        with open(infile_name, "rb") as tarball:
            tarball_data = tarball.read()

    error_list = []
    for node in node_list:
        retval, error = utils.restoreConfig(node, tarball_data)
        if retval != 0:
            error_list.append(error)
    if error_list:
        utils.err("unable to restore all nodes\n" + "\n".join(error_list))
Ejemplo n.º 17
0
def config_export_pcs_commands(argv, verbose=False):
    if no_clufter:
        utils.err(
            "Unable to perform export due to missing python-clufter package")

    # parse options
    debug = "--debug" in utils.pcs_options
    force = "--force" in utils.pcs_options
    interactive = "--interactive" in utils.pcs_options
    invalid_args = False
    output_file = None
    dist = None
    for arg in argv:
        if "=" in arg:
            name, value = arg.split("=", 1)
            if name == "output":
                output_file = value
            elif name == "dist":
                dist = value
            else:
                invalid_args = True
        else:
            invalid_args = True
    # check options
    if invalid_args:
        usage.config(["export", "pcs-commands"])
        sys.exit(1)
    # complete optional options
    if dist is None:
        dist = ",".join(platform.linux_distribution(full_distribution_name=0))

    # prepare convertor options
    clufter_args = {
        "nocheck": force,
        "batch": True,
        "sys": "linux",
        "dist": dist,
        "coro": settings.corosync_conf_file,
        "ccs": settings.cluster_conf_file,
        "start_wait": "60",
        "tmp_cib": "tmp-cib.xml",
        "force": force,
        "text_width": "80",
        "silent": True,
        "noguidance": True,
    }
    if output_file:
        clufter_args["output"] = {"passin": "bytestring"}
    else:
        clufter_args["output"] = "-"
    if interactive:
        if "EDITOR" not in os.environ:
            utils.err("$EDITOR environment variable is not set")
        clufter_args["batch"] = False
        clufter_args["editor"] = os.environ["EDITOR"]
    if debug:
        logging.getLogger("clufter").setLevel(logging.DEBUG)
    if utils.usefile:
        clufter_args["cib"] = os.path.abspath(utils.filename)
    else:
        clufter_args["cib"] = ("bytestring", utils.get_cib())
    if verbose:
        clufter_args["text_width"] = "-1"
        clufter_args["silent"] = False
        clufter_args["noguidance"] = False
    clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args)
    cmd_name = "pcs2pcscmd-flatiron" if utils.is_rhel6(
    ) else "pcs2pcscmd-needle"

    # run convertor
    run_clufter(cmd_name, clufter_args_obj, debug, force,
                "Error: unable to export cluster configuration")

    # save commands if not printed to stdout by clufter
    if output_file:
        ok, message = utils.write_file(
            output_file, clufter_args_obj.output["passout"].decode())
        if not ok:
            utils.err(message)
Ejemplo n.º 18
0
from __future__ import (
    absolute_import,
    division,
    print_function,
    unicode_literals,
)

from pcs import utils
from pcs.test.cib_resource.common import ResourceTest
from pcs.test.tools import pcs_unittest as unittest
from pcs.test.cib_resource.stonith_common import need_load_xvm_fence_agent

need_fence_scsi_providing_unfencing = unittest.skipUnless(
    not utils.is_rhel6(),
    "test requires system where stonith agent 'fence_scsi' provides unfencing")


class PlainStonith(ResourceTest):
    @need_load_xvm_fence_agent
    def test_simplest(self):
        self.assert_effect(
            "stonith create S fence_xvm", """<resources>
                <primitive class="stonith" id="S" type="fence_xvm">
                    <operations>
                        <op id="S-monitor-interval-60s" interval="60s"
                            name="monitor"
                        />
                    </operations>
                </primitive>
            </resources>""")
Ejemplo n.º 19
0
def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
    rhel6 = utils.is_rhel6() if force_rhel6 is None else force_rhel6
    corosync_attrs = {
        "mtime": int(time.time()),
        "mode": 0o644,
        "uname": "root",
        "gname": "root",
        "uid": 0,
        "gid": 0,
    }
    corosync_authkey_attrs = dict(corosync_attrs)
    corosync_authkey_attrs["mode"] = 0o400
    cib_attrs = {
        "mtime": int(time.time()),
        "mode": 0o600,
        "uname": settings.pacemaker_uname,
        "gname": settings.pacemaker_gname,
    }
    if with_uid_gid:
        cib_attrs["uid"] = _get_uid(cib_attrs["uname"])
        cib_attrs["gid"] = _get_gid(cib_attrs["gname"])

    pcmk_authkey_attrs = dict(cib_attrs)
    pcmk_authkey_attrs["mode"] = 0o440
    file_list = {
        "cib.xml": {
            "path": os.path.join(settings.cib_dir, "cib.xml"),
            "required": True,
            "attrs": dict(cib_attrs),
        },
        "corosync_authkey": {
            "path": settings.corosync_authkey_file,
            "required": False,
            "attrs": corosync_authkey_attrs,
            "restore_procedure": None,
            "rename": True,
        },
        "pacemaker_authkey": {
            "path": settings.pacemaker_authkey_file,
            "required": False,
            "attrs": pcmk_authkey_attrs,
            "restore_procedure": None,
            "rename": True,
            "pre_store_call": _ensure_etc_pacemaker_exists,
        },
    }
    if rhel6:
        file_list["cluster.conf"] = {
            "path": settings.cluster_conf_file,
            "required": True,
            "attrs": dict(corosync_attrs),
        }
    else:
        file_list["corosync.conf"] = {
            "path": settings.corosync_conf_file,
            "required": True,
            "attrs": dict(corosync_attrs),
        }
        file_list["uidgid.d"] = {
            "path": settings.corosync_uidgid_dir.rstrip("/"),
            "required": False,
            "attrs": dict(corosync_attrs),
        }
        file_list["pcs_settings.conf"] = {
            "path": settings.pcsd_settings_conf_location,
            "required": False,
            "attrs": {
                "mtime": int(time.time()),
                "mode": 0o644,
                "uname": "root",
                "gname": "root",
                "uid": 0,
                "gid": 0,
            },
        }
    return file_list
Ejemplo n.º 20
0
def config_import_cman(argv):
    if no_clufter:
        utils.err(
            "Unable to perform a CMAN cluster conversion due to missing python-clufter package"
        )
    # prepare convertor options
    cluster_conf = settings.cluster_conf_file
    dry_run_output = None
    output_format = "cluster.conf" if utils.is_rhel6() else "corosync.conf"
    dist = None
    invalid_args = False
    for arg in argv:
        if "=" in arg:
            name, value = arg.split("=", 1)
            if name == "input":
                cluster_conf = value
            elif name == "output":
                dry_run_output = value
            elif name == "output-format":
                if value in (
                        "cluster.conf",
                        "corosync.conf",
                        "pcs-commands",
                        "pcs-commands-verbose",
                ):
                    output_format = value
                else:
                    invalid_args = True
            elif name == "dist":
                dist = value
            else:
                invalid_args = True
        else:
            invalid_args = True
    if (output_format not in ("pcs-commands", "pcs-commands-verbose")
            and (dry_run_output and not dry_run_output.endswith(".tar.bz2"))):
        dry_run_output += ".tar.bz2"
    if invalid_args or not dry_run_output:
        usage.config(["import-cman"])
        sys.exit(1)
    debug = "--debug" in utils.pcs_options
    force = "--force" in utils.pcs_options
    interactive = "--interactive" in utils.pcs_options

    if dist is not None:
        if output_format == "cluster.conf":
            if not clufter.facts.cluster_pcs_flatiron("linux",
                                                      dist.split(",")):
                utils.err("dist does not match output-format")
        elif output_format == "corosync.conf":
            if not clufter.facts.cluster_pcs_needle("linux", dist.split(",")):
                utils.err("dist does not match output-format")
    elif ((output_format == "cluster.conf" and utils.is_rhel6())
          or (output_format == "corosync.conf" and not utils.is_rhel6())):
        dist = ",".join(platform.linux_distribution(full_distribution_name=0))
    elif output_format == "cluster.conf":
        dist = "redhat,6.7,Santiago"
    elif output_format == "corosync.conf":
        dist = "redhat,7.1,Maipo"
    else:
        # for output-format=pcs-command[-verbose]
        dist = ",".join(platform.linux_distribution(full_distribution_name=0))

    clufter_args = {
        "input": str(cluster_conf),
        "cib": {
            "passin": "bytestring"
        },
        "nocheck": force,
        "batch": True,
        "sys": "linux",
        "dist": dist,
    }
    if interactive:
        if "EDITOR" not in os.environ:
            utils.err("$EDITOR environment variable is not set")
        clufter_args["batch"] = False
        clufter_args["editor"] = os.environ["EDITOR"]
    if debug:
        logging.getLogger("clufter").setLevel(logging.DEBUG)
    if output_format == "cluster.conf":
        clufter_args["ccs_pcmk"] = {"passin": "bytestring"}
        cmd_name = "ccs2pcs-flatiron"
    elif output_format == "corosync.conf":
        clufter_args["coro"] = {"passin": "struct"}
        cmd_name = "ccs2pcs-needle"
    elif output_format in ("pcs-commands", "pcs-commands-verbose"):
        clufter_args["output"] = {"passin": "bytestring"}
        clufter_args["start_wait"] = "60"
        clufter_args["tmp_cib"] = "tmp-cib.xml"
        clufter_args["force"] = force
        clufter_args["text_width"] = "80"
        clufter_args["silent"] = True
        clufter_args["noguidance"] = True
        if output_format == "pcs-commands-verbose":
            clufter_args["text_width"] = "-1"
            clufter_args["silent"] = False
            clufter_args["noguidance"] = False
        if clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")):
            cmd_name = "ccs2pcscmd-flatiron"
        elif clufter.facts.cluster_pcs_needle("linux", dist.split(",")):
            cmd_name = "ccs2pcscmd-needle"
        else:
            utils.err(
                "unrecognized dist, try something recognized" +
                " (e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty)")
    clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args)

    # run convertor
    run_clufter(cmd_name, clufter_args_obj, debug, force,
                "Error: unable to import cluster configuration")

    # save commands
    if output_format in ("pcs-commands", "pcs-commands-verbose"):
        ok, message = utils.write_file(
            dry_run_output, clufter_args_obj.output["passout"].decode())
        if not ok:
            utils.err(message)
        return

    # put new config files into tarball
    file_list = config_backup_path_list(
        force_rhel6=(output_format == "cluster.conf"))
    for file_item in file_list.values():
        file_item["attrs"]["uname"] = "root"
        file_item["attrs"]["gname"] = "root"
        file_item["attrs"]["uid"] = 0
        file_item["attrs"]["gid"] = 0
        file_item["attrs"]["mode"] = 0o600
    tar_data = BytesIO()
    try:
        tarball = tarfile.open(fileobj=tar_data, mode="w|bz2")
        config_backup_add_version_to_tarball(tarball)
        utils.tar_add_file_data(tarball, clufter_args_obj.cib["passout"],
                                "cib.xml", **file_list["cib.xml"]["attrs"])
        if output_format == "cluster.conf":
            utils.tar_add_file_data(tarball,
                                    clufter_args_obj.ccs_pcmk["passout"],
                                    "cluster.conf",
                                    **file_list["cluster.conf"]["attrs"])
        else:
            # put uidgid into separate files
            fmt_simpleconfig = clufter.format_manager.FormatManager.init_lookup(
                'simpleconfig').plugins['simpleconfig']
            corosync_struct = []
            uidgid_list = []
            for section in clufter_args_obj.coro["passout"][2]:
                if section[0] == "uidgid":
                    uidgid_list.append(section[1])
                else:
                    corosync_struct.append(section)
            corosync_conf_data = fmt_simpleconfig(
                "struct", ("corosync", (), corosync_struct))("bytestring")
            utils.tar_add_file_data(tarball, corosync_conf_data,
                                    "corosync.conf",
                                    **file_list["corosync.conf"]["attrs"])
            for uidgid in uidgid_list:
                uid = ""
                gid = ""
                for item in uidgid:
                    if item[0] == "uid":
                        uid = item[1]
                    if item[0] == "gid":
                        gid = item[1]
                filename = utils.get_uid_gid_file_name(uid, gid)
                uidgid_data = fmt_simpleconfig(
                    "struct",
                    ("corosync", (), [("uidgid", uidgid, None)]))("bytestring")
                utils.tar_add_file_data(tarball, uidgid_data,
                                        "uidgid.d/" + filename,
                                        **file_list["uidgid.d"]["attrs"])
        tarball.close()
    except (tarfile.TarError, EnvironmentError) as e:
        utils.err("unable to create tarball: %s" % e)
    tar_data.seek(0)

    #save tarball / remote restore
    if dry_run_output:
        ok, message = utils.write_file(dry_run_output,
                                       tar_data.read(),
                                       permissions=0o600,
                                       binary=True)
        if not ok:
            utils.err(message)
    else:
        config_restore_remote(None, tar_data)
    tar_data.close()
Ejemplo n.º 21
0
def config_export_pcs_commands(argv, verbose=False):
    if no_clufter:
        utils.err(
            "Unable to perform export due to missing python-clufter package"
        )

    # parse options
    debug = "--debug" in utils.pcs_options
    force = "--force" in utils.pcs_options
    interactive = "--interactive" in utils.pcs_options
    invalid_args = False
    output_file = None
    dist = None
    for arg in argv:
        if "=" in arg:
            name, value = arg.split("=", 1)
            if name == "output":
                output_file = value
            elif name == "dist":
                dist = value
            else:
                invalid_args = True
        else:
            invalid_args = True
    # check options
    if invalid_args:
        usage.config(["export", "pcs-commands"])
        sys.exit(1)
    # complete optional options
    if dist is None:
        dist = ",".join(platform.linux_distribution(full_distribution_name=0))

    # prepare convertor options
    clufter_args = {
        "nocheck": force,
        "batch": True,
        "sys": "linux",
        "dist": dist,
        # Make it work on RHEL6 as well for sure
        "color": "always" if sys.stdout.isatty() else "never",
        "coro": settings.corosync_conf_file,
        "ccs": settings.cluster_conf_file,
        "start_wait": "60",
        "tmp_cib": "tmp-cib.xml",
        "force": force,
        "text_width": "80",
        "silent": True,
        "noguidance": True,
    }
    if output_file:
        clufter_args["output"] = {"passin": "bytestring"}
    else:
        clufter_args["output"] = "-"
    if interactive:
        if "EDITOR" not in os.environ:
            utils.err("$EDITOR environment variable is not set")
        clufter_args["batch"] = False
        clufter_args["editor"] = os.environ["EDITOR"]
    if debug:
        logging.getLogger("clufter").setLevel(logging.DEBUG)
    if utils.usefile:
        clufter_args["cib"] = os.path.abspath(utils.filename)
    else:
        clufter_args["cib"] = ("bytestring", utils.get_cib())
    if verbose:
        clufter_args["text_width"] = "-1"
        clufter_args["silent"] = False
        clufter_args["noguidance"] = False
    clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args)
    cmd_name = "pcs2pcscmd-flatiron" if utils.is_rhel6() else "pcs2pcscmd-needle"

    # run convertor
    run_clufter(
        cmd_name, clufter_args_obj, debug, force,
        "Error: unable to export cluster configuration"
    )

    # save commands if not printed to stdout by clufter
    if output_file:
        ok, message = utils.write_file(
            output_file,
            clufter_args_obj.output["passout"]
        )
        if not ok:
            utils.err(message)
Ejemplo n.º 22
0
def config_restore_remote(infile_name, infile_obj):
    extracted = {
        "version.txt": "",
        "corosync.conf": "",
        "cluster.conf": "",
    }
    try:
        tarball = tarfile.open(infile_name, "r|*", infile_obj)
        while True:
            # next(tarball) does not work in python2.6
            tar_member_info = tarball.next()
            if tar_member_info is None:
                break
            if tar_member_info.name in extracted:
                tar_member = tarball.extractfile(tar_member_info)
                extracted[tar_member_info.name] = tar_member.read()
                tar_member.close()
        tarball.close()
    except (tarfile.TarError, EnvironmentError) as e:
        utils.err("unable to read the tarball: %s" % e)

    config_backup_check_version(extracted["version.txt"])

    node_list = utils.getNodesFromCorosyncConf(
        extracted["cluster.conf" if utils.is_rhel6() else "corosync.conf"].decode("utf-8")
    )
    if not node_list:
        utils.err("no nodes found in the tarball")

    err_msgs = []
    for node in node_list:
        try:
            retval, output = utils.checkStatus(node)
            if retval != 0:
                err_msgs.append(output)
                continue
            status = json.loads(output)
            if (
                status["corosync"]
                or
                status["pacemaker"]
                or
                status["cman"]
                or
                # not supported by older pcsd, do not fail if not present
                status.get("pacemaker_remote", False)
            ):
                err_msgs.append(
                    "Cluster is currently running on node %s. You need to stop "
                        "the cluster in order to restore the configuration."
                    % node
                )
                continue
        except (ValueError, NameError, LookupError):
            err_msgs.append("unable to determine status of the node %s" % node)
    if err_msgs:
        for msg in err_msgs:
            utils.err(msg, False)
        sys.exit(1)

    # Temporarily disable config files syncing thread in pcsd so it will not
    # rewrite restored files. 10 minutes should be enough time to restore.
    # If node returns HTTP 404 it does not support config syncing at all.
    for node in node_list:
        retval, output = utils.pauseConfigSyncing(node, 10 * 60)
        if not (retval == 0 or "(HTTP error: 404)" in output):
            utils.err(output)

    if infile_obj:
        infile_obj.seek(0)
        tarball_data = infile_obj.read()
    else:
        with open(infile_name, "rb") as tarball:
            tarball_data = tarball.read()

    error_list = []
    for node in node_list:
        retval, error = utils.restoreConfig(node, tarball_data)
        if retval != 0:
            error_list.append(error)
    if error_list:
        utils.err("unable to restore all nodes\n" + "\n".join(error_list))
Ejemplo n.º 23
0
def config_import_cman(argv):
    if no_clufter:
        utils.err("Unable to perform a CMAN cluster conversion due to missing python-clufter package")
    # prepare convertor options
    cluster_conf = settings.cluster_conf_file
    dry_run_output = None
    output_format = "cluster.conf" if utils.is_rhel6() else "corosync.conf"
    dist = None
    invalid_args = False
    for arg in argv:
        if "=" in arg:
            name, value = arg.split("=", 1)
            if name == "input":
                cluster_conf = value
            elif name == "output":
                dry_run_output = value
            elif name == "output-format":
                if value in (
                    "cluster.conf", "corosync.conf",
                    "pcs-commands", "pcs-commands-verbose",
                ):
                    output_format = value
                else:
                    invalid_args = True
            elif name == "dist":
                dist = value
            else:
                invalid_args = True
        else:
            invalid_args = True
    if (
        output_format not in ("pcs-commands", "pcs-commands-verbose")
        and
        (dry_run_output and not dry_run_output.endswith(".tar.bz2"))
    ):
        dry_run_output += ".tar.bz2"
    if invalid_args or not dry_run_output:
        usage.config(["import-cman"])
        sys.exit(1)
    debug = "--debug" in utils.pcs_options
    force = "--force" in utils.pcs_options
    interactive = "--interactive" in utils.pcs_options

    if dist is not None:
        if output_format == "cluster.conf":
            if not clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")):
                utils.err("dist does not match output-format")
        elif output_format == "corosync.conf":
            if not clufter.facts.cluster_pcs_needle("linux", dist.split(",")):
                utils.err("dist does not match output-format")
    elif (
        (output_format == "cluster.conf" and utils.is_rhel6())
        or
        (output_format == "corosync.conf" and not utils.is_rhel6())
    ):
        dist = ",".join(platform.linux_distribution(full_distribution_name=0))
    elif output_format == "cluster.conf":
        dist = "redhat,6.7,Santiago"
    elif output_format == "corosync.conf":
        dist = "redhat,7.1,Maipo"
    else:
        # for output-format=pcs-command[-verbose]
        dist = ",".join(platform.linux_distribution(full_distribution_name=0))

    clufter_args = {
        "input": str(cluster_conf),
        "cib": {"passin": "bytestring"},
        "nocheck": force,
        "batch": True,
        "sys": "linux",
        "dist": dist,
        # Make it work on RHEL6 as well for sure
        "color": "always" if sys.stdout.isatty() else "never"
    }
    if interactive:
        if "EDITOR" not in os.environ:
            utils.err("$EDITOR environment variable is not set")
        clufter_args["batch"] = False
        clufter_args["editor"] = os.environ["EDITOR"]
    if debug:
        logging.getLogger("clufter").setLevel(logging.DEBUG)
    if output_format == "cluster.conf":
        clufter_args["ccs_pcmk"] = {"passin": "bytestring"}
        cmd_name = "ccs2pcs-flatiron"
    elif output_format == "corosync.conf":
        clufter_args["coro"] = {"passin": "struct"}
        cmd_name = "ccs2pcs-needle"
    elif output_format in ("pcs-commands", "pcs-commands-verbose"):
        clufter_args["output"] = {"passin": "bytestring"}
        clufter_args["start_wait"] = "60"
        clufter_args["tmp_cib"] = "tmp-cib.xml"
        clufter_args["force"] = force
        clufter_args["text_width"] = "80"
        clufter_args["silent"] = True
        clufter_args["noguidance"] = True
        if output_format == "pcs-commands-verbose":
            clufter_args["text_width"] = "-1"
            clufter_args["silent"] = False
            clufter_args["noguidance"] = False
        if clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")):
            cmd_name = "ccs2pcscmd-flatiron"
        elif clufter.facts.cluster_pcs_needle("linux", dist.split(",")):
            cmd_name = "ccs2pcscmd-needle"
        else:
            utils.err(
                "unrecognized dist, try something recognized"
                + " (e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty)"
            )
    clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args)

    # run convertor
    run_clufter(
        cmd_name, clufter_args_obj, debug, force,
            "Error: unable to import cluster configuration"
    )

    # save commands
    if output_format in ("pcs-commands", "pcs-commands-verbose"):
        ok, message = utils.write_file(
            dry_run_output,
            clufter_args_obj.output["passout"]
        )
        if not ok:
            utils.err(message)
        return

    # put new config files into tarball
    file_list = config_backup_path_list(
        force_rhel6=(output_format == "cluster.conf")
    )
    for file_item in file_list.values():
        file_item["attrs"]["uname"] = "root"
        file_item["attrs"]["gname"] = "root"
        file_item["attrs"]["uid"] = 0
        file_item["attrs"]["gid"] = 0
        file_item["attrs"]["mode"] = 0o600
    tar_data = BytesIO()
    try:
        tarball = tarfile.open(fileobj=tar_data, mode="w|bz2")
        config_backup_add_version_to_tarball(tarball)
        utils.tar_add_file_data(
            tarball,
            clufter_args_obj.cib["passout"].encode("utf-8"),
            "cib.xml",
            **file_list["cib.xml"]["attrs"]
        )
        if output_format == "cluster.conf":
            utils.tar_add_file_data(
                tarball,
                clufter_args_obj.ccs_pcmk["passout"].encode("utf-8"),
                "cluster.conf",
                **file_list["cluster.conf"]["attrs"]
            )
        else:
            # put uidgid into separate files
            fmt_simpleconfig = clufter.format_manager.FormatManager.init_lookup(
                'simpleconfig'
            ).plugins['simpleconfig']
            corosync_struct = []
            uidgid_list = []
            for section in clufter_args_obj.coro["passout"][2]:
                if section[0] == "uidgid":
                    uidgid_list.append(section[1])
                else:
                    corosync_struct.append(section)
            corosync_conf_data = fmt_simpleconfig(
                "struct", ("corosync", (), corosync_struct)
            )("bytestring")
            utils.tar_add_file_data(
                tarball,
                corosync_conf_data.encode("utf-8"),
                "corosync.conf",
                **file_list["corosync.conf"]["attrs"]
            )
            for uidgid in uidgid_list:
                uid = ""
                gid = ""
                for item in uidgid:
                    if item[0] == "uid":
                        uid = item[1]
                    if item[0] == "gid":
                        gid = item[1]
                filename = utils.get_uid_gid_file_name(uid, gid)
                uidgid_data = fmt_simpleconfig(
                    "struct", ("corosync", (), [("uidgid", uidgid, None)])
                )("bytestring")
                utils.tar_add_file_data(
                    tarball,
                    uidgid_data.encode("utf-8"),
                    "uidgid.d/" + filename,
                    **file_list["uidgid.d"]["attrs"]
                )
        tarball.close()
    except (tarfile.TarError, EnvironmentError) as e:
        utils.err("unable to create tarball: %s" % e)
    tar_data.seek(0)

    #save tarball / remote restore
    if dry_run_output:
        ok, message = utils.write_file(
            dry_run_output, tar_data.read(), permissions=0o600, binary=True
        )
        if not ok:
            utils.err(message)
    else:
        config_restore_remote(None, tar_data)
    tar_data.close()
Ejemplo n.º 24
0
def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
    rhel6 = utils.is_rhel6() if force_rhel6 is None else force_rhel6
    corosync_attrs = {
        "mtime": int(time.time()),
        "mode": 0o644,
        "uname": "root",
        "gname": "root",
        "uid": 0,
        "gid": 0,
    }
    cib_attrs = {
        "mtime": int(time.time()),
        "mode": 0o600,
        "uname": settings.pacemaker_uname,
        "gname": settings.pacemaker_gname,
    }
    if with_uid_gid:
        try:
            cib_attrs["uid"] = pwd.getpwnam(cib_attrs["uname"]).pw_uid
        except KeyError:
            utils.err(
                "Unable to determine uid of user '%s'" % cib_attrs["uname"]
            )
        try:
            cib_attrs["gid"] = grp.getgrnam(cib_attrs["gname"]).gr_gid
        except KeyError:
            utils.err(
                "Unable to determine gid of group '%s'" % cib_attrs["gname"]
            )

    file_list = {
        "cib.xml": {
            "path": os.path.join(settings.cib_dir, "cib.xml"),
            "required": True,
            "attrs": dict(cib_attrs),
        },
    }
    if rhel6:
        file_list["cluster.conf"] = {
            "path": settings.cluster_conf_file,
            "required": True,
            "attrs": dict(corosync_attrs),
        }
    else:
        file_list["corosync.conf"] = {
            "path": settings.corosync_conf_file,
            "required": True,
            "attrs": dict(corosync_attrs),
        }
        file_list["uidgid.d"] = {
            "path": settings.corosync_uidgid_dir.rstrip("/"),
            "required": False,
            "attrs": dict(corosync_attrs),
        }
        file_list["pcs_settings.conf"] = {
            "path": settings.pcsd_settings_conf_location,
            "required": False,
            "attrs": {
                "mtime": int(time.time()),
                "mode": 0o644,
                "uname": "root",
                "gname": "root",
                "uid": 0,
                "gid": 0,
            },
        }
    return file_list
Ejemplo n.º 25
0
def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
    rhel6 = utils.is_rhel6() if force_rhel6 is None else force_rhel6
    corosync_attrs = {
        "mtime": int(time.time()),
        "mode": 0o644,
        "uname": "root",
        "gname": "root",
        "uid": 0,
        "gid": 0,
    }
    cib_attrs = {
        "mtime": int(time.time()),
        "mode": 0o600,
        "uname": settings.pacemaker_uname,
        "gname": settings.pacemaker_gname,
    }
    if with_uid_gid:
        try:
            cib_attrs["uid"] = pwd.getpwnam(cib_attrs["uname"]).pw_uid
        except KeyError:
            utils.err(
                "Unable to determine uid of user '%s'" % cib_attrs["uname"]
            )
        try:
            cib_attrs["gid"] = grp.getgrnam(cib_attrs["gname"]).gr_gid
        except KeyError:
            utils.err(
                "Unable to determine gid of group '%s'" % cib_attrs["gname"]
            )

    file_list = {
        "cib.xml": {
            "path": os.path.join(settings.cib_dir, "cib.xml"),
            "required": True,
            "attrs": dict(cib_attrs),
        },
    }
    if rhel6:
        file_list["cluster.conf"] = {
            "path": settings.cluster_conf_file,
            "required": True,
            "attrs": dict(corosync_attrs),
        }
    else:
        file_list["corosync.conf"] = {
            "path": settings.corosync_conf_file,
            "required": True,
            "attrs": dict(corosync_attrs),
        }
        file_list["uidgid.d"] = {
            "path": settings.corosync_uidgid_dir.rstrip("/"),
            "required": False,
            "attrs": dict(corosync_attrs),
        }
        file_list["pcs_settings.conf"] = {
            "path": settings.pcsd_settings_conf_location,
            "required": False,
            "attrs": {
                "mtime": int(time.time()),
                "mode": 0o644,
                "uname": "root",
                "gname": "root",
                "uid": 0,
                "gid": 0,
            },
        }
    return file_list
Ejemplo n.º 26
0
    def testUIDGID(self):
        if utils.is_rhel6():
            os.system("cp {0} {1}".format(cluster_conf_file, cluster_conf_tmp))

            o,r = pcs(temp_cib, "cluster uidgid --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            ac(o, "No uidgids configured in cluster.conf\n")

            o,r = pcs(temp_cib, "cluster uidgid blah --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 1
            assert o.startswith("\nUsage:")

            o,r = pcs(temp_cib, "cluster uidgid rm --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 1
            assert o.startswith("\nUsage:")

            o,r = pcs(temp_cib, "cluster uidgid add --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 1
            assert o.startswith("\nUsage:")

            o,r = pcs(temp_cib, "cluster uidgid add blah --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 1
            ac(o, "Error: uidgid options must be of the form uid=<uid> gid=<gid>\n")

            o,r = pcs(temp_cib, "cluster uidgid rm blah --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 1
            ac(o, "Error: uidgid options must be of the form uid=<uid> gid=<gid>\n")

            o,r = pcs(temp_cib, "cluster uidgid add uid=zzz --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            ac(o, "")

            o,r = pcs(temp_cib, "cluster uidgid add uid=zzz --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 1
            ac(o, "Error: unable to add uidgid\nError: uidgid entry already exists with uid=zzz, gid=\n")

            o,r = pcs(temp_cib, "cluster uidgid add gid=yyy --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            ac(o, "")

            o,r = pcs(temp_cib, "cluster uidgid add uid=aaa gid=bbb --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            ac(o, "")

            o,r = pcs(temp_cib, "cluster uidgid --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            ac(o, "UID/GID: gid=, uid=zzz\nUID/GID: gid=yyy, uid=\nUID/GID: gid=bbb, uid=aaa\n")

            o,r = pcs(temp_cib, "cluster uidgid rm gid=bbb --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 1
            ac(o, "Error: unable to remove uidgid\nError: unable to find uidgid with uid=, gid=bbb\n")

            o,r = pcs(temp_cib, "cluster uidgid rm uid=aaa gid=bbb --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            ac(o, "")

            o,r = pcs(temp_cib, "cluster uidgid --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            ac(o, "UID/GID: gid=, uid=zzz\nUID/GID: gid=yyy, uid=\n")

            o,r = pcs(temp_cib, "cluster uidgid rm uid=zzz --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            ac(o, "")

            o,r = pcs(temp_cib, "config --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            assert o.find("UID/GID: gid=yyy, uid=") != -1

            o,r = pcs(temp_cib, "cluster uidgid rm gid=yyy --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            ac(o, "")

            o,r = pcs(temp_cib, "config --cluster_conf={0}".format(cluster_conf_tmp))
            assert r == 0
            assert o.find("No uidgids") == -1
        else:
            o,r = pcs(temp_cib, "cluster uidgid")
            assert r == 0
            ac(o, "No uidgids configured in cluster.conf\n")

            o,r = pcs(temp_cib, "cluster uidgid add")
            assert r == 1
            assert o.startswith("\nUsage:")

            o,r = pcs(temp_cib, "cluster uidgid rm")
            assert r == 1
            assert o.startswith("\nUsage:")

            o,r = pcs(temp_cib, "cluster uidgid xx")
            assert r == 1
            assert o.startswith("\nUsage:")

            o,r = pcs(temp_cib, "cluster uidgid add uid=testuid gid=testgid")
            assert r == 0
            ac(o, "")

            o,r = pcs(temp_cib, "cluster uidgid add uid=testuid gid=testgid")
            assert r == 1
            ac(o, "Error: uidgid file with uid=testuid and gid=testgid already exists\n")

            o,r = pcs(temp_cib, "cluster uidgid rm uid=testuid2 gid=testgid2")
            assert r == 1
            ac(o, "Error: no uidgid files with uid=testuid2 and gid=testgid2 found\n")

            o,r = pcs(temp_cib, "cluster uidgid rm uid=testuid gid=testgid2")
            assert r == 1
            ac(o, "Error: no uidgid files with uid=testuid and gid=testgid2 found\n")

            o,r = pcs(temp_cib, "cluster uidgid rm uid=testuid2 gid=testgid")
            assert r == 1
            ac(o, "Error: no uidgid files with uid=testuid2 and gid=testgid found\n")

            o,r = pcs(temp_cib, "cluster uidgid")
            assert r == 0
            ac(o, "UID/GID: uid=testuid gid=testgid\n")

            o,r = pcs(temp_cib, "cluster uidgid rm uid=testuid gid=testgid")
            assert r == 0
            ac(o, "")

            o,r = pcs(temp_cib, "cluster uidgid")
            assert r == 0
            ac(o, "No uidgids configured in cluster.conf\n")
Ejemplo n.º 27
0
from __future__ import (
    absolute_import,
    division,
    print_function,
    unicode_literals,
)

from pcs import utils
from pcs.test.cib_resource.common import ResourceTest
from pcs.test.tools import pcs_unittest as unittest
from pcs.test.cib_resource.stonith_common import need_load_xvm_fence_agent

need_fence_scsi_providing_unfencing = unittest.skipUnless(
    not utils.is_rhel6(),
    "test requires system where stonith agent 'fence_scsi' provides unfencing"
)

class PlainStonith(ResourceTest):
    @need_load_xvm_fence_agent
    def test_simplest(self):
        self.assert_effect(
            "stonith create S fence_xvm",
            """<resources>
                <primitive class="stonith" id="S" type="fence_xvm">
                    <operations>
                        <op id="S-monitor-interval-60s" interval="60s"
                            name="monitor"
                        />
                    </operations>
                </primitive>
            </resources>"""