def temp_config(ocp_node, cfg):
    """Context manager to help define YAML files on the remote node
    that can be in turn fed to 'oc create'. Must be used as a context
    manager (with-statement).

    Example:
        >>> d = {'foo': True, 'bar': 22, 'baz': [1, 5, 9]}
        >>> with temp_config(node, d) as fpath:
        ...     func_that_takes_a_path(fpath)

        Here, the data dictionary `d` is serialized to YAML and written
        to a temporary file at `fpath`. Then, `fpath` can be used by
        a function that takes a file path. When the context manager exits
        the temporary file is automatically cleaned up.

    Args:
        ocp_node (str): The node to create the temp file on.
        cfg (dict): A data structure to be converted to YAML and
            saved in a tempfile on the node.
    Returns:
        str: A path to a temporary file.
    """
    with tempfile.NamedTemporaryFile() as tmp:
        tmp.write(yaml.safe_dump(cfg).encode())
        tmp.flush()
        filename = tmp.name
        g.upload(ocp_node, filename, filename)
        yield filename
Пример #2
0
    def test_nfsv4_acls(self):
        # pylint: disable=too-many-locals

        source_file = ("/usr/share/glustolibs/io/scripts/nfs_ganesha/"
                       "nfsv4_acl_test.sh")
        test_acl_file = "/tmp/nfsv4_acl_test.sh"

        for server in self.servers:
            g.upload(server, source_file, "/tmp/", user="******")

            cmd = ("export ONLY_CREATE_USERS_AND_GROUPS=\"yes\";sh %s %s" %
                   (test_acl_file, "/tmp"))
            ret, _, _ = g.run(server, cmd)
            self.assertEqual(ret, 0,
                             ("Failed to create users and groups "
                              "for running acl test in server %s" % server))
        time.sleep(5)

        for client in self.clients:
            g.upload(client, source_file, "/tmp/", user="******")
            option_flag = 0
            for mount in self.mounts:
                if mount.client_system == client:
                    mountpoint = mount.mountpoint
                    if "vers=4" not in mount.options:
                        option_flag = 1
                    break

            if option_flag:
                g.log.info(
                    "This acl test required mount option to be "
                    "vers=4 in %s", client)
                continue

            dirname = mountpoint + "/" + "testdir_" + client
            cmd = "[ -d %s ] || mkdir %s" % (dirname, dirname)
            ret, _, _ = g.run(client, cmd)
            self.assertEqual(
                ret, 0, "Failed to create dir %s for running "
                "acl test" % dirname)

            cmd = "sh %s %s" % (test_acl_file, dirname)
            ret, out, _ = g.run(client, cmd)
            self.assertEqual(ret, 0,
                             ("Failed to execute acl test on %s" % client))

            g.log.info("ACL test output in %s : %s", client, out)
            acl_output = out.split('\n')[:-1]
            for output in acl_output:
                match = re.search("^OK.*", output)
                if match is None:
                    self.assertTrue(
                        False, "Unexpected behaviour in acl "
                        "functionality in %s" % client)

            cmd = "rm -rf %s" % dirname
            ret, _, _ = g.run(client, cmd)
            self.assertEqual(
                ret, 0, "Failed to remove dir %s after running "
                "acl test" % dirname)
Пример #3
0
 def test_upload(self):
     """Testing SSH upload() method"""
     print "Running: %s - %s" % (self.id(), self.shortDescription())
     g.run(self.primary_host, 'rm -f /tmp/upload_test_file')
     rcode, rout, _ = g.run_local('md5sum /etc/hosts | awk \'{print $1}\'')
     if rcode == 0:
         md5sum = rout.strip()
     g.upload(self.primary_host, '/etc/hosts', '/tmp/upload_test_file')
     command = 'md5sum /tmp/upload_test_file | awk \'{print $1}\''
     rcode, rout, _ = g.run(self.primary_host, command)
     if rcode == 0:
         md5sum_up = rout.strip()
     self.assertEqual(md5sum, md5sum_up, '')
Пример #4
0
 def test_upload(self):
     """Testing SSH upload() method"""
     print "Running: %s - %s" % (self.id(), self.shortDescription())
     g.run(self.primary_host, 'rm -f /tmp/upload_test_file')
     rcode, rout, _ = g.run_local('md5sum /etc/hosts | awk \'{print $1}\'')
     if rcode == 0:
         md5sum = rout.strip()
     g.upload(self.primary_host,
              '/etc/hosts', '/tmp/upload_test_file')
     command = 'md5sum /tmp/upload_test_file | awk \'{print $1}\''
     rcode,  rout, _ = g.run(self.primary_host, command)
     if rcode == 0:
         md5sum_up = rout.strip()
     self.assertEqual(md5sum, md5sum_up, '')
def enable_pvc_resize(master_node):
    '''
     This function edits the /etc/origin/master/master-config.yaml
     file - to enable pv_resize feature
     and restarts atomic-openshift service on master node
     Args:
         master_node (str): hostname of masternode  on which
                           want to edit the
                           master-config.yaml file
     Returns:
         bool: True if successful,
               otherwise raise Exception
    '''
    version = get_openshift_version()
    if version < "3.9":
        msg = ("pv resize is not available in openshift "
               "version %s " % version)
        g.log.error(msg)
        raise NotSupportedException(msg)

    with tempfile.NamedTemporaryFile(delete=False) as temp:
        temp_filename = temp.name

    try:
        g.download(master_node, MASTER_CONFIG_FILEPATH, temp_filename)
    except Exception as e:
        err_msg = (
            "Failed to download '{}' from master node '{}' due to"
            "exception\n{}".format(
                MASTER_CONFIG_FILEPATH, master_node, six.text_type(e)))
        raise ExecutionError(err_msg)

    with open(temp_filename, 'r') as f:
        data = yaml.load(f, Loader=yaml.FullLoader)
        dict_add = data['admissionConfig']['pluginConfig']
        if "PersistentVolumeClaimResize" in dict_add:
            g.log.info("master-config.yaml file is already edited")
            return True
        dict_add['PersistentVolumeClaimResize'] = {
            'configuration': {
                'apiVersion': 'v1',
                'disable': 'false',
                'kind': 'DefaultAdmissionConfig'}}
        data['admissionConfig']['pluginConfig'] = dict_add
        kube_config = data['kubernetesMasterConfig']
        for key in ('apiServerArguments', 'controllerArguments'):
            kube_config[key] = (
                kube_config.get(key)
                if isinstance(kube_config.get(key), dict) else {})
            value = ['ExpandPersistentVolumes=true']
            kube_config[key]['feature-gates'] = value

    with open(temp_filename, 'w+') as f:
        yaml.dump(data, f, default_flow_style=False)

    try:
        g.upload(master_node, temp_filename, MASTER_CONFIG_FILEPATH)
    except Exception as e:
        err_msg = (
            "Failed to upload '{}' to master node '{}' due to"
            "exception\n{}".format(
                master_node, MASTER_CONFIG_FILEPATH, six.text_type(e)))
        raise ExecutionError(err_msg)
    os.unlink(temp_filename)

    if version == "3.9":
        cmd = ("systemctl restart atomic-openshift-master-api "
               "atomic-openshift-master-controllers")
    else:
        cmd = ("/usr/local/bin/master-restart api && "
               "/usr/local/bin/master-restart controllers")
    ret, out, err = g.run(master_node, cmd, "root")
    if ret != 0:
        err_msg = "Failed to execute cmd %s on %s\nout: %s\nerr: %s" % (
            cmd, master_node, out, err)
        g.log.error(err_msg)
        raise ExecutionError(err_msg)

    # Wait for API service to be ready after the restart
    for w in waiter.Waiter(timeout=120, interval=1):
        try:
            cmd_run("oc get nodes", master_node)
            return True
        except AssertionError:
            continue
    err_msg = "Exceeded 120s timeout waiting for OCP API to start responding."
    g.log.error(err_msg)
    raise ExecutionError(err_msg)
Пример #6
0
def upload_scripts(list_of_nodes,
                   list_of_scripts_abs_path,
                   upload_dir="/usr/share/glustolibs/io/scripts/",
                   user=None):
    """Upload specified scripts to all the nodes.

    Args:
        list_of_nodes (list): Nodes on which scripts have to be uploaded.
        list_of_scripts_abs_path (list): List of absolute path of all
            scripts that are to be uploaded from local node.
        upload_dir (optional[str]): Name of the dir under which
            scripts will be uploaded on remote node.
        user (optional[str]): The user to use for the remote connection.

    Returns:
        bool: True if uploading scripts is successful on all nodes.
            False otherwise.
    """
    if not isinstance(list_of_nodes, list):
        list_of_nodes = [list_of_nodes]

    if not isinstance(list_of_scripts_abs_path, list):
        list_of_scripts_abs_path = (list_of_scripts_abs_path.split(" "))

    g.log.info("Scripts to upload: %s" % list_of_scripts_abs_path)
    g.log.info("Script upload dir: %s" % upload_dir)

    # Create upload dir on each node
    if not create_dirs(list_of_nodes, upload_dir):
        return False

    # Upload scrpts
    _rc = True
    for script_local_abs_path in list_of_scripts_abs_path:
        if not os.path.exists(script_local_abs_path):
            g.log.error("Script: %s doesn't exists" % script_local_abs_path)
            _rc = False
            break
        for node in list_of_nodes:
            script_name = os.path.basename(script_local_abs_path)
            script_upload_path = os.path.join(upload_dir, script_name)
            g.upload(node, script_local_abs_path, script_upload_path, user)
    if not _rc:
        g.log.error("Failed to upload scripts")
        return False

    # Recursively provide execute permissions to all scripts
    for node in list_of_nodes:
        ret, _, _ = g.run(node, "chmod -R +x %s" % upload_dir)
        if ret != 0:
            g.log.error("Unable to provide execute permissions to upload dir "
                        "'%s' on %s" % (upload_dir, node))
            return False
        else:
            g.log.info("Successfully provided execute permissions to upload "
                       "dir '%s' on %s" % (upload_dir, node))

        ret, out, err = g.run(node, "ls -l %s" % upload_dir)
        if ret != 0:
            g.log.error("Failed to list the dir: %s on node: %s - %s" %
                        (upload_dir, node, err))
        else:
            g.log.info("Listing dir: %s on node: %s - \n%s" %
                       (upload_dir, node, out))

    return True
Пример #7
0
def create_nfs_ganesha_cluster(servers, vips):
    """
    Creating a ganesha HA cluster

    Args:
        servers(list): Hostname of ganesha nodes
        vips(list): VIPs that has to be assigned for each nodes
    Returns:
        True(bool): If configuration of ganesha cluster is success
        False(bool): If failed to configure ganesha cluster
    """
    # pylint: disable=too-many-return-statements
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-statements
    ganesha_mnode = servers[0]

    # Configure ports in ganesha servers for RHEL7
    if is_rhel7(servers):
        g.log.info("Defining statd service ports")
        ret = configure_ports_on_servers(servers)
        if not ret:
            g.log.error("Failed to set statd service ports on nodes.")
            return False

    # Firewall settings for nfs-ganesha
    ret = ganesha_server_firewall_settings(servers)
    if not ret:
        g.log.error("Firewall settings for nfs ganesha has failed.")
        return False
    g.log.info("Firewall settings for nfs ganesha was success.")

    # Do peer probe if not already done
    ret = peer_probe_servers(ganesha_mnode, servers, validate=True)
    if not ret:
        g.log.error("Peer probe failed")
        return False

    # Enable shared storage if not present
    ret, _, _ = g.run(ganesha_mnode,
                      "gluster v list | grep 'gluster_shared_storage'")
    if ret != 0:
        if not enable_shared_storage(ganesha_mnode):
            g.log.error("Failed to enable shared storage")
            return False
        g.log.info("Enabled gluster shared storage.")
    else:
        g.log.info("Shared storage is already enabled.")

    # Enable the glusterfssharedstorage.service and nfs-ganesha service
    for server in servers:
        cmd = "systemctl enable glusterfssharedstorage.service"
        ret, _, _ = g.run(server, cmd)
        if ret != 0:
            g.log.error("Failed to enable glusterfssharedstorage.service "
                        "on %s", server)
            return False

        ret, _, _ = g.run(server, "systemctl enable nfs-ganesha")
        if ret != 0:
            g.log.error("Failed to enable nfs-ganesha service on %s", server)
            return False

    # Password less ssh for nfs
    ret = create_nfs_passwordless_ssh(ganesha_mnode, servers)
    if not ret:
        g.log.error("Password less ssh between nodes failed.")
        return False
    g.log.info("Password less ssh between nodes successful.")

    # Create ganesha-ha.conf file
    tmp_ha_conf = "/tmp/ganesha-ha.conf"
    create_ganesha_ha_conf(servers, vips, tmp_ha_conf)

    # Check whether ganesha-ha.conf file is created
    if not os.path.isfile(tmp_ha_conf):
        g.log.error("Failed to create ganesha-ha.conf")
        return False

    # Cluster auth setup
    ret = cluster_auth_setup(servers)
    if not ret:
        g.log.error("Failed to configure cluster services")
        return False

    # Create nfs-ganesha directory in shared storage
    dpath = '/var/run/gluster/shared_storage/nfs-ganesha'
    mkdir(ganesha_mnode, dpath)

    # Copy the config files to shared storage
    cmd = 'cp -p /etc/ganesha/ganesha.conf %s/' % dpath
    ret, _, _ = g.run(ganesha_mnode, cmd)
    if ret != 0:
        g.log.error("Failed to copy ganesha.conf to %s/", dpath)
        return False

    g.upload(ganesha_mnode, tmp_ha_conf, '%s/' % dpath)

    # Create backup of ganesha-ha.conf file in ganesha_mnode
    g.upload(ganesha_mnode, tmp_ha_conf, '/etc/ganesha/')

    # setsebool ganesha_use_fusefs on
    cmd = "setsebool ganesha_use_fusefs on"
    for server in servers:
        ret, _, _ = g.run(server, cmd)
        if ret:
            g.log.error("Failed to 'setsebool ganesha_use_fusefs on' on %",
                        server)
            return False

        # Verify ganesha_use_fusefs is on
        _, out, _ = g.run(server, "getsebool ganesha_use_fusefs")
        if "ganesha_use_fusefs --> on" not in out:
            g.log.error("Failed to 'setsebool ganesha_use_fusefs on' on %",
                        server)
            return False

    # Enabling ganesha
    g.log.info("Enable nfs-ganesha")
    ret, _, _ = enable_nfs_ganesha(ganesha_mnode)

    if ret != 0:
        g.log.error("Failed to enable ganesha")
        return False

    g.log.info("Successfully created ganesha cluster")

    # pcs status output
    _, _, _ = g.run(ganesha_mnode, "pcs status")

    # pacemaker status output
    _, _, _ = g.run(ganesha_mnode, "systemctl status pacemaker")

    return True