示例#1
0
    def fill_memory():
        """
        Use `stress-ng` tool to stress swap memory for 4 minutes to given
        percentage
        """
        run_time = 240
        SSH = usmssh.get_ssh()
        host = CONF.config["usmqe"]["cluster_member"]

        # get total and swap memory of machine via /proc/meminfo file
        meminfo_cmd = """awk '{if ($1=="MemTotal:" || $1=="SwapTotal:") print $2}' /proc/meminfo"""
        _, stdout, _ = SSH[host].run(meminfo_cmd)
        mem_total, swap_total, _ = stdout.decode("utf-8").split("\n")

        # how much memory is going to be consumed considered both normal memory
        # and swap
        memory_percent = 100 + (int(swap_total) / int(mem_total) *
                                int(request.param))

        stress_cmd = "stress-ng --vm-method flip --vm {} --vm-bytes {}%".format(
            1, int(memory_percent))
        stress_cmd += " --timeout {}s --vm-hang 0 --vm-keep --verify".format(
            run_time)
        stress_cmd += " --syslog"
        retcode, stdout, stderr = SSH[host].run(stress_cmd)
        if retcode != 0:
            raise OSError(stderr)

        teardown_cmd = "sleep 3; swapoff -a && swapon -a; sleep 5"
        SSH[host].run(teardown_cmd)
        return request.param
示例#2
0
def workload_memory_utilization(request, stress_tools):
    """
    Returns:
        dict: contains information about `start` and `stop` time of stress-ng
            command and its `result`
    """
    def fill_memory():
        """
        Use `stress` tool to stress memory for 4 minutes to given percentage
        """
        # stress memory for for 240 seconds
        run_time = 240
        SSH = usmssh.get_ssh()
        host = CONF.config["usmqe"]["cluster_member"]
        stress_cmd = "stress --vm-bytes $(awk '/MemAvailable/{{printf "\
                     "\"%d\\n\" , $2 * ({0}/100);}}' < /proc/meminfo)k "\
                     "--vm-keep -m {1}".format(request.param, 1)
        stress_cmd += " --timeout {}s".format(run_time)
        retcode, stdout, stderr = SSH[host].run(stress_cmd)
        if retcode != 0:
            raise OSError(stderr)
        return request.param

    SSH = usmssh.get_ssh()
    host = CONF.config["usmqe"]["cluster_member"]
    meminfo_cmd = "free -b | awk '{if (NR==2) print $2}'"
    retcode, stdout, stderr = SSH[host].run(meminfo_cmd)
    if retcode != 0:
        raise OSError(stderr)
    mem_total = stdout.decode("utf-8")
    return measure_operation(fill_memory, metadata={'total_memory': mem_total})
示例#3
0
 def __init__(self, nodes):
     """
     Args:
         nodes (list): list of nodes used for command scheduling
     """
     self.nodes = nodes
     self.ssh = usmssh.get_ssh()
示例#4
0
def os_info():
    """
    Return information from /etc/os-release file about current os distribution.
    """
    SSH = usmssh.get_ssh()
    os_release = 'cat /etc/os-release'
    node_connection = SSH[pytest.config.getini("usm_cluster_member")]
    f_content = node_connection.run(os_release)
    f_content = f_content[1].decode("utf-8").replace('"', '')
    config = configparser.ConfigParser()
    config.read_string('[os_info]\n' + f_content)
    LOGGER.debug(config['os_info'])
    return dict(config['os_info'])
示例#5
0
def test_scheduler_workflow():
    conf = UsmConfig()
    nodes = conf.inventory.get_groups_dict()["gluster_servers"]
    scheduler = Scheduler(nodes)
    scheduler.run_at("echo '1' > /tmp/test_task")
    jobs = scheduler.jobs()
    LOGGER.info("Jobs: {}".format(jobs))
    assert len(jobs) == len(nodes)
    time.sleep(70)
    for node in nodes:
        SSH = usmssh.get_ssh()
        _, message, _ = SSH[node].run("cat /tmp/test_task")
        assert message.decode("utf-8").rstrip("\n") == "1"
        # teardown
        SSH[node].run("rm -f /tmp/test_task")
示例#6
0
 def fill_memory():
     """
     Use `stress` tool to stress memory for 4 minutes to given percentage
     """
     # stress memory for for 240 seconds
     run_time = 240
     SSH = usmssh.get_ssh()
     host = CONF.config["usmqe"]["cluster_member"]
     stress_cmd = "stress --vm-bytes $(awk '/MemAvailable/{{printf "\
                  "\"%d\\n\" , $2 * ({0}/100);}}' < /proc/meminfo)k "\
                  "--vm-keep -m {1}".format(request.param, 1)
     stress_cmd += " --timeout {}s".format(run_time)
     retcode, stdout, stderr = SSH[host].run(stress_cmd)
     if retcode != 0:
         raise OSError(stderr)
     return request.param
示例#7
0
 def fill_cpu():
     """
     Use `stress-ng` tool to stress cpu for 3 minutes to given percentage
     """
     # stress cpu for for 180 seconds
     run_time = 180
     SSH = usmssh.get_ssh()
     host = CONF.config["usmqe"]["cluster_member"]
     processors_cmd = "grep -c ^processor /proc/cpuinfo"
     retcode, processors_count, _ = SSH[host].run(processors_cmd)
     stress_cmd = "stress-ng --cpu {} -l {} --timeout {}s".format(
         int(processors_count), request.param, run_time)
     retcode, stdout, stderr = SSH[host].run(stress_cmd)
     if retcode != 0:
         raise OSError(stderr)
     return request.param
示例#8
0
def delete_new_user(user_data):
    """
    Delete user with given user_data.
    """
    auth = login(pytest.config.getini("usm_username"),
                 pytest.config.getini("usm_password"))
    admin = tendrlapi_user.ApiUser(auth=auth)
    if user_data['email'].endswith(
            usmqe.inventory.role2hosts("usm_client")[0]):
        SSH = usmssh.get_ssh()
        node_connection = SSH[usmqe.inventory.role2hosts("usm_client")[0]]
        userdel = 'userdel {}'.format(user_data['username'])
        userdel_response = node_connection.run(userdel)
        # userdel command returned 0 return code
        assert userdel_response[0] == 0
    admin.del_user(user_data["username"])
    logout(auth=auth)
示例#9
0
def volume_mount_points():
    """
    Returns dictionary where keys are volume names in gluster and values
    are directory paths to volume mount points.
    """
    SSH = usmssh.get_ssh()
    host = CONF.inventory.get_groups_dict()["usm_client"][0]
    gluster_volume = GlusterVolume()
    volumes = gluster_volume.list()
    mount_points = {}

    for volume in volumes:
        mount_point_cmd = "mount | awk '/{}/ {{print $3}}'".format(volume)
        retcode, mount_point, stderr = SSH[host].run(mount_point_cmd)
        if retcode != 0:
            raise OSError(stderr.decode("utf-8"))
        mount_points[volume] = mount_point.decode("utf-8")
    return mount_points
示例#10
0
def delete_new_user(user_data):
    """
    Delete user with given user_data.
    """
    auth = login(CONF.config["usmqe"]["username"],
                 CONF.config["usmqe"]["password"])
    admin = tendrlapi_user.ApiUser(auth=auth)
    if user_data['email'].endswith(
            CONF.inventory.get_groups_dict()["usm_client"][0]):
        SSH = usmssh.get_ssh()
        node_connection = SSH[CONF.inventory.get_groups_dict()["usm_client"]
                              [0]]
        userdel = 'userdel {}'.format(user_data['username'])
        userdel_response = node_connection.run(userdel)
        # userdel command returned 0 return code
        assert userdel_response[0] == 0
    admin.del_user(user_data["username"])
    logout(auth=auth)
示例#11
0
def create_new_user(user_data):
    """
    Create user from given user_data.
    """
    auth = login(pytest.config.getini("usm_username"),
                 pytest.config.getini("usm_password"))
    admin = tendrlapi_user.ApiUser(auth=auth)
    admin.add_user(user_data)

    if user_data['email'].endswith(
            usmqe.inventory.role2hosts("usm_client")[0]):
        SSH = usmssh.get_ssh()
        useradd = 'useradd {}'.format(user_data['username'])
        node_connection = SSH[usmqe.inventory.role2hosts("usm_client")[0]]
        node_connection.run(useradd)
        passwd = 'echo "{}" | passwd --stdin {}'.format(
            user_data['password'], user_data['username'])
        passwd_response = node_connection.run(passwd)
        # passwd command returned 0 return code
        assert passwd_response[0] == 0
示例#12
0
def create_new_user(user_data):
    """
    Create user from given user_data.
    """
    auth = login(CONF.config["usmqe"]["username"],
                 CONF.config["usmqe"]["password"])
    admin = tendrlapi_user.ApiUser(auth=auth)
    admin.add_user(user_data)

    if user_data['email'].endswith(
            CONF.inventory.get_groups_dict()["usm_client"][0]):
        SSH = usmssh.get_ssh()
        useradd = 'useradd {}'.format(user_data['username'])
        node_connection = SSH[CONF.inventory.get_groups_dict()["usm_client"]
                              [0]]
        node_connection.run(useradd)
        passwd = 'echo "{}" | passwd --stdin {}'.format(
            user_data['password'], user_data['username'])
        passwd_response = node_connection.run(passwd)
        # passwd command returned 0 return code
        assert passwd_response[0] == 0
示例#13
0
def test_alerting_settings(application, receive_alerts,
                           valid_normal_user_data):
    """
    Create normal user with email notifications switched on or off.
    Check that alerts appear in the mailbox according to notification settings.
    """
    """
    :step:
      Change admin's email to avoid collision.
    :result:
      Admin's email is changed
    """
    # create a user with the email address configured in ansible playbook
    new_data = {
        "email": "alerting_test" + str(receive_alerts) + "@example.com",
        "password": CONF.config["usmqe"]["password"],
        "confirm_password": CONF.config["usmqe"]["password"]
    }
    application.collections.users.edit_logged_in_user(new_data)
    """
    :step:
      Create normal user with notifications switched on or off
      and email configured in ansible playbook
    :result:
      Normal user is created
    """
    user = application.collections.users.create(
        user_id=valid_normal_user_data["username"],
        name=valid_normal_user_data["name"],
        email="root@" + CONF.inventory.get_groups_dict()["usm_client"][0],
        notifications_on=receive_alerts,
        password=valid_normal_user_data["password"],
        role=valid_normal_user_data["role"])
    """
    :step:
      Stop glusterd on one of the cluster nodes.
    :result:
      After some time alerts are generated
    """

    start_time = datetime.datetime.now().timestamp()
    SSH = usmssh.get_ssh()
    host = CONF.config["usmqe"]["cluster_member"]
    stop_cmd = "systemctl stop glusterd"
    time.sleep(90)
    retcode, stdout, stderr = SSH[host].run(stop_cmd)
    if retcode != 0:
        raise OSError(stderr)
    """
    :step:
      Restart glusterd.
    :result:
      After some time alerts disappear
    """

    restart_cmd = "systemctl restart glusterd"
    time.sleep(90)
    retcode, stdout, stderr = SSH[host].run(restart_cmd)
    if retcode != 0:
        raise OSError(stderr)
    stop_time = datetime.datetime.now().timestamp()
    """
    :step:
      Gather e-mails that came after glusterd was stopped and check if there were any.
    :result:
      There should be some or no e-mails according to user settings.
    """

    messages = usmmail.get_msgs_by_time(start_timestamp=start_time,
                                        end_timestamp=stop_time)
    LOGGER.debug("Selected messages count: {}".format(len(messages)))
    alert_received = False
    for message in messages:
        LOGGER.debug("Message date: {}".format(message['Date']))
        LOGGER.debug("Message subject: {}".format(message['Subject']))
        LOGGER.debug("Message body: {}".format(
            message.get_payload(decode=True)))
        if message['Subject'].count("]"):
            alert_received = True
    if receive_alerts:
        pytest.check(alert_received, "Check that alert has been received")
    else:
        pytest.check(not alert_received,
                     "Check that alert hasn't been recieved")
    user.delete()
示例#14
0
def test_ui_alerts(application, managed_cluster):
    """
    Test UI alert appearance and disapearance.
    """
    """
    :step:
      Stop glusterd on one of the cluster nodes.
    :result:
      After some time alerts appear in UI
    """
    SSH = usmssh.get_ssh()
    host = CONF.config["usmqe"]["cluster_member"]
    stop_cmd = "systemctl stop glusterd"
    time.sleep(10)
    retcode, stdout, stderr = SSH[host].run(stop_cmd)
    if retcode != 0:
        raise OSError(stderr)
    time.sleep(40)
    alert_found = False
    alerts = application.collections.alerts.get_alerts()
    for alert in alerts:
        LOGGER.debug("Alert description: {}".format(alert.description))
        LOGGER.debug("Alert date: {}".format(alert.date))
        LOGGER.debug("Alert severity: {}".format(alert.severity))
        if alert.description.find(
                "is Disconnected") > 0 and alert.description.find(host) > 0:
            alert_found = True
            LOGGER.debug("Alert found: {}".format(alert_found))
            pytest.check(
                alert.severity == "warning",
                "Check that severity of alert about disconnection is ``warning``"
            )
            pytest.check(
                int(alert.date.split(" ")[2]) > 2018,
                "Check that the year in the alert date is integer, 2019 or greater"
            )
    pytest.check(
        alert_found,
        "Check that the alert about disconnection exists in the list of UI alerts"
    )
    """
    :step:
      Restart glusterd.
    :result:
      After some time alerts disappear
    """
    restart_cmd = "systemctl restart glusterd"
    time.sleep(10)
    retcode, stdout, stderr = SSH[host].run(restart_cmd)
    time.sleep(50)
    if retcode != 0:
        raise OSError(stderr)
    alerts = application.collections.alerts.get_alerts()
    alert_found = False
    for alert in alerts:
        LOGGER.debug("Alert description: {}".format(alert.description))
        LOGGER.debug("Alert date: {}".format(alert.date))
        LOGGER.debug("Alert severity: {}".format(alert.severity))
        if alert.description.find(
                "is Connected") > 0 and alert.description.find(host) > 0:
            alert_found = True
            LOGGER.debug("Alert found: {}".format(alert_found))
            pytest.check(
                alert.severity == "info",
                "Check that severity of the alert about re-connection is ``info``"
            )
            pytest.check(
                int(alert.date.split(" ")[2]) > 2018,
                "Check that the year in the alert date is integer, 2019 or greater"
            )
    pytest.check(
        alert_found,
        "Check that the alert about re-connection exists in the list of UI alerts"
    )
示例#15
0
def workload_capacity_utilization(request, volume_mount_points):
    """
    Returns:
        dict: contains information about `start` and `stop` time of dd
            command and `result` as number presenting percentage of disk
            utilization.
    """
    volume_name = list(volume_mount_points.keys())[0]
    mount_point = volume_mount_points[volume_name].strip()
    SSH = usmssh.get_ssh()
    host = CONF.inventory.get_groups_dict()["usm_client"][0]

    def fill_volume():
        """
        Use `dd` command to utilize mounted volume.
        """
        disk_space_cmd = "df {0} | awk '/{1}/ " \
            "{{print $3 \" \" $4}}'".format(
                mount_point,
                mount_point.split("/")[-1])
        retcode, disk_space, stderr = SSH[host].run(disk_space_cmd)
        if retcode != 0:
            raise OSError(stderr.decode("utf-8"))

        # disk values in M
        disk_used, disk_available = [
            int(size.rstrip("\n")) / 1024
            for size in disk_space.decode("utf-8").split(" ")
        ]

        # block size = 100M
        block_size = 100
        # compute disk space that is going to be used and number of files
        # to create with regard to already utilized space
        file_count = int(
            ((int(disk_available) / 100 * request.param) - int(disk_used)) /
            block_size)

        stress_cmd = "for x in {{1..{}}}; do dd if=/dev/zero" \
            " of={}/test_file$x count=1 bs={}M; done".format(
                file_count,
                mount_point[:-1] if mount_point.endswith("/") else mount_point,
                block_size)
        retcode, _, stderr = SSH[host].run(stress_cmd)
        if retcode != 0:
            raise OSError(stderr.decode("utf-8"))
        return request.param

    disk_space_cmd = "df {0} | awk '/{1}/ {{print $2}}'".format(
        mount_point,
        mount_point.split("/")[-1])
    retcode, disk_total, stderr = SSH[host].run(disk_space_cmd)
    if retcode != 0:
        raise OSError(stderr.decode("utf-8"))

    time_to_measure = 180
    yield measure_operation(fill_volume,
                            minimal_time=time_to_measure,
                            metadata={
                                "volume_name":
                                volume_name,
                                "total_capacity":
                                int(disk_total.decode("utf-8").rstrip("\n"))
                            },
                            measure_after=True)

    cleanup_cmd = "rm -f {}/test_file*".format(
        mount_point[:-1] if mount_point.endswith("/") else mount_point)
    retcode, _, stderr = SSH[host].run(cleanup_cmd)
    if retcode != 0:
        raise OSError(stderr.decode("utf-8"))
示例#16
0
def test_create_brick_valid(valid_cluster_id, valid_brick_path, valid_devices,
                            valid_session_credentials):
    """@pylatest api/gluster.create_brick_valid
        API-gluster: create_brick
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Create ``GlusterCreateBrick`` job with a ``brick_name`` on specified
        ``valid_device`` with nodes from cluster with ``valid_cluster_id``

        .. test_step:: 1

                Connect to Tendrl API via POST request to ``APIURL/:cluster_id/GlusterCreateBrick``
                Where cluster_id is set to predefined value.

        .. test_result:: 1

                Server should return response in JSON format:

                Return code should be **202** with data ``{"message": "Accepted"}``.
                job should finish.
                """
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    brick_name = re.search("(.*)_mount",
                           os.path.split(valid_brick_path)[1]).group(1)

    cluster_info = [
        x for x in api.get_cluster_list()
        if x["integration_id"] == valid_cluster_id
    ]
    nodes = cluster_info[0]["nodes"]

    job_id = api.create_bricks(valid_cluster_id, nodes, valid_devices,
                               brick_name)["job_id"]
    api.wait_for_job_status(job_id)
    """@pylatest api/gluster.create_brick_valid
        API-gluster: create_brick
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Check if the bricks were created on hosts of cluster with ``valid_cluster_id``.

        .. test_step:: 2

                Via ssh check on cluster nodes that there exists directory with called
                ``brick_name`` in `/tendrl_gluster_bricks/brick_mount`:
                    [ -d /tendrl_gluster_bricks/brick_mount/``brick_name`` ] && echo "exists"

        .. test_result:: 2

                There should be string ``exists`` in output of ssh.
                """
    SSH = usmssh.get_ssh()
    pytest.check(
        len(nodes) > 0,
        "In cluster have to be at least one node. There are {}".format(
            len(nodes)))
    cmd_exists = "[ -d {} ] && echo 'exists'".format(valid_brick_path)
    cmd_fs = 'mount | grep $(df  --output=source {} | tail -1)'.format(
        valid_brick_path)
    expected_output = '/dev/mapper/tendrl{0}_vg-tendrl{0}_lv on {1} type xfs'\
        .format(brick_name, valid_brick_path)
    for node in nodes:
        _, output, _ = SSH[nodes[node]["fqdn"]].run(cmd_exists)
        output = str(output).strip("'b\\n")
        pytest.check(
            output == "exists",
            "Output of command `{}` should be `exists`. Output is: `{}`".
            format(cmd_exists, output))
        """@pylatest api/gluster.create_brick_valid
            API-gluster: create_brick
            ******************************

            .. test_metadata:: author [email protected]

            Description
            ===========

            Check if the bricks have ``xfs`` filesystem and set correct device.

            .. test_step:: 3

                    Via ssh check filesystem and deviceof directory with
                    ``valid_brick_path``:
                        mount | grep $(df  --output=source ``valid_brick_path`` | tail -1)

            .. test_result:: 3

                    Output of the command should be:
                ``/dev/mapper/tendrl`brick_name`_vg-tendrl`brick_name`_lv on `brick_path` type xfs``
                    """
        _, output, _ = SSH[nodes[node]["fqdn"]].run(cmd_fs)
        output = str(output).strip("'b\\n")
        output = re.sub("\s*\(.*\)$", "", output)
        pytest.check(
            output == expected_output,
            "Output of command {} should be `{}`. Output is: `{}`".format(
                cmd_fs, expected_output, output))