Exemple #1
0
def test_nodes_list(
        valid_session_credentials,
        managed_cluster):
    """
    List nodes for given cluster via API.

    :step:
      Connect to Tendrl API via GET request to ``APIURL/:cluster_id/nodes``
      Where cluster_id is set to predefined value.
    :result:
      Server should return response in JSON format:
      Return code should be **200** with data ``{"nodes": [{...}, ...]}``.
    """

    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)

    # list of nodes from Tendrl api
    t_nodes = api.get_node_list(managed_cluster['cluster_id'])
    t_node_names = {node["fqdn"] for node in t_nodes["nodes"]}
    # list of nodes from Gluster command output
    gl = gluster.GlusterCommon()
    g_node_names = set(
        gl.get_hosts_from_trusted_pool(
            CONF.config["usmqe"]["cluster_member"]))

    LOGGER.info("list of nodes from Tendrl api: %s", str(t_node_names))
    LOGGER.info("list of nodes from gluster: %s", g_node_names)
    pytest.check(
        t_node_names == g_node_names,
        "List of nodes from Gluster should be the same as from Tendrl API.")
Exemple #2
0
def valid_devices(valid_session_credentials, count=1):
    """
    Generate device paths.

    Args:
        count (int): How many device paths should be generated.
                     There have to be enough devices.
    """

    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    nodes_free_devs = {
        x["node_id"]: list(x["localstorage"]["blockdevices"]["free"].values())
        for x in api.get_nodes()["nodes"]
        if len(x["localstorage"]["blockdevices"]["free"]) > 0
    }
    nodes_free_kern_name = {}
    for node_id in nodes_free_devs:
        for device in nodes_free_devs[node_id]:
            if node_id not in nodes_free_kern_name:
                nodes_free_kern_name[node_id] = []
            nodes_free_kern_name[node_id].append(device["device_kernel_name"])

    try:
        return {
            node_id: sorted(nodes_free_kern_name[node_id])[0:count]
            for node_id in nodes_free_kern_name
        }
    except IndexError as err:
        raise Exception(
            "TypeError({0}): There are not enough devices. There are: {1}. {2}"
            .format(err.errno, nodes_free_kern_name, err.strerror))
Exemple #3
0
def test_stop_volume_invalid(valid_cluster_id, invalid_volume_name,
                             valid_session_credentials):
    """@pylatest api/gluster.stop_volume_invalid
        API-gluster: stop_volume
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Try to stop volume with given name and cluster id via API.

        .. test_step:: 1

                Connect to Tendrl API via POST request to ``APIURL/:cluster_id/GlusterStopVolume``
                Where cluster_id is set to predefined value.

        .. test_result:: 1

                Server should return response in JSON format:

                Return code should be **202** with data ``{"message": "Accepted"}``.
                Job should fail.
                """

    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    volume_data = {
        "Volume.volname": invalid_volume_name,
    }

    job_id = api.stop_volume(valid_cluster_id, volume_data)["job_id"]
    # TODO check correctly server response or etcd job status
    api.wait_for_job_status(job_id, status="failed")
Exemple #4
0
def test_volumes_list(valid_session_credentials, managed_cluster):
    """
    List volumes for given cluster via API.

    :step:
      Connect to Tendrl API via GET request to ``APIURL/:cluster_id/volumes``
      Where cluster_id is set to predefined value.
    :result:
      Server should return response in JSON format:
      Return code should be **200** with data ``{"volumes": [{...}, ...]}``.
    """

    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    glv_cmd = gluster.GlusterVolume()

    # list of volumes from Tendrl api
    t_volumes = api.get_volume_list(managed_cluster['cluster_id'])
    t_volume_names = [volume["name"] for volume in t_volumes["volumes"]]
    t_volume_names.sort()
    # list of volumes from Gluster command output
    g_volume_names = glv_cmd.get_volume_names()
    g_volume_names.sort()

    LOGGER.info("list of volumes from Tendrl api: %s", str(t_volume_names))
    LOGGER.info("list of volumes from gluster: %s", g_volume_names)
    pytest.check(
        t_volume_names == g_volume_names,
        "List of volumes from Gluster should be the same as from Tendrl API.")

    pytest.check(
        len(t_volume_names) == int(CONF.config["usmqe"]["volume_count"]),
        "Number of volumes from Tendrl API: {}. "
        "Expected number of volumes: {}.".format(
            len(t_volume_names), int(CONF.config["usmqe"]["volume_count"])))
Exemple #5
0
def up_gluster_nodes(valid_session_credentials):
    """
    Generate valid host info from GetNodeList api call related to tendrl/nodes
    for hosts that are UP.
    """
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    cluster_list = api.get_nodes()
    return [x for x in cluster_list["nodes"]
            if "tendrl/node" in x["tags"] and x["status"] == "UP"]
Exemple #6
0
def test_cluster_import(application, valid_session_credentials,
                        unmanaged_cluster):
    """
    Check that Import button really imports the cluster
    """
    """
    :step:
      Log in to Web UI and get the first cluster from the cluster list.
      Check that it's not imported yet.
    :result:
      Cluster is in the correct state to start import
    """
    clusters = application.collections.clusters.get_clusters()
    test_cluster = tools.choose_cluster(clusters,
                                        unmanaged_cluster["cluster_id"],
                                        unmanaged_cluster["short_name"])
    if test_cluster.managed == "Yes":
        test_cluster.unmanage()
    pytest.check(test_cluster.managed == "No",
                 "Value of cluster's Managed attribute: {}".format(
                     test_cluster.managed),
                 issue="No value in a freshly installed cluster")
    """
    :step:
      Get the cluster's details via API. Check that API shows the same state
    :result:
      Cluster state in API is the same as in Web UI
    """
    tendrl_api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    api_cluster = tendrl_api.get_cluster(test_cluster.cluster_id)
    pytest.check(api_cluster["is_managed"] == "no",
                 "is_managed: {}\nThere should be ``no``.".format(
                     api_cluster["is_managed"]),
                 issue="No value in a freshly installed cluster")
    """
    :step:
      Import the cluster in Web UI and check its state has changed in both Web UI and API
    :result:
      Cluster is imported
    """
    import_success = test_cluster.cluster_import()
    if import_success:
        api_cluster = tendrl_api.get_cluster(test_cluster.cluster_id)
        pytest.check(
            api_cluster["is_managed"] == "yes",
            "is_managed: {}\nThere should be ``yes``.".format(
                api_cluster["is_managed"]))
    else:
        pytest.check(False, "Import failed")
        test_cluster.unmanage()
Exemple #7
0
def test_create_volume_invalid(
        valid_cluster_id,
        invalid_volume_name,
        invalid_volume_configuration,
        valid_session_credentials):
    """@pylatest api/gluster.create_volume_invalid
        API-gluster: create_volume
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Get list of attributes needed to use in cluster volume creation with given cluster_id.

        .. test_step:: 1

                Connect to Tendrl API via POST request to ``APIURL/:cluster_id/GlusterCreateVolume``
                Where cluster_id is set to predefined value.

                When some attribute is set to None then in request json is set to ``null``.
                e.g. {
                    "Volume.replica_count": "2",
                    "Volume.bricks": null,
                    "Volume.volname": "Volume_invalid",
                    "Volume.force": true}

        .. test_result:: 1

                Server should return response in JSON format:

                Return code should be **202** with data ``{"message": "Accepted"}``.
                job should fail.
                """

    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)

    job_id = api.create_volume(
        valid_cluster_id,
        invalid_volume_configuration)["job_id"]
    # TODO check correctly server response or etcd job status
    api.wait_for_job_status(
            job_id,
            status="failed")
Exemple #8
0
def valid_bricks_for_crud_volume(valid_session_credentials, managed_cluster,
                                 valid_brick_name, valid_devices):
    """
    Creates bricks for CRUD volume tests.
    """

    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)

    nodes = list(managed_cluster["nodes"].values())

    LOGGER.debug("nodes: {}".format(nodes))
    LOGGER.debug("devices: {}".format(valid_devices))

    job_id = api.create_bricks(managed_cluster["cluster_id"], nodes,
                               valid_devices, valid_brick_name)["job_id"]
    api.wait_for_job_status(job_id)
    import time
    time.sleep(60)
def test_cluster_import_invalid(valid_session_credentials, cluster_id):
    """
    Negative import gluster cluster.
    """
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    """
    :step:
      Create import cluster job via API with invalid cluster id.
    :result:
      API respons with 404 error, no job id is returned.
    """
    asserts = {
        "ok": False,
        "reason": 'Not Found',
        "status": 404,
    }
    response = api.import_cluster(cluster_id, asserts_in=asserts)
    pytest.check("job_id" not in response, "job id is not returned")
    pytest.check("errors" in response, "there is errors field in response")
    LOGGER.info("errors reported in response: %s", response.get("errors"))
Exemple #10
0
def importfail_setup_nodeagent_stopped_on_one_node(request, unmanaged_cluster,
                                                   valid_session_credentials):
    """
    This fixture stops node agent on one storage machine. During teardown it
    makes sure that the node agent is back and then runs unmanage job to
    cleanup state after a failed import.

    Don't use this fixture if you are not running negative test cases for
    import cluster feature.
    """
    with pytest_ansible_playbook.runner(
            request, ["test_setup.tendrl_nodeagent_stopped_on_one_node.yml"],
        ["test_teardown.tendrl_nodeagent_stopped_on_one_node.yml"]):
        yield
    # And now something completely different: we need to run unmanage because
    # the cluster is not managed after a failed import, which would block any
    # future import attempt.
    tendrl = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    job = tendrl.unmanage_cluster(unmanaged_cluster["cluster_id"])
    tendrl.wait_for_job_status(job["job_id"])
Exemple #11
0
def test_volumes_list(valid_session_credentials, cluster_reuse,
                      valid_trusted_pool_reuse):
    """@pylatest api/gluster.volumes_list
        API-gluster: volumes_list
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        List volumes for given cluster via API.

        .. test_step:: 1

                Connect to Tendrl API via GET request to ``APIURL/:cluster_id/volumes``
                Where cluster_id is set to predefined value.

        .. test_result:: 1

                Server should return response in JSON format:

                Return code should be **200** with data ``{"volumes": [{...}, ...]}``.
                """

    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    glv_cmd = gluster.GlusterVolume()

    # list of volumes from Tendrl api
    t_volumes = api.get_volume_list(cluster_reuse['cluster_id'])
    t_volume_names = [volume["name"] for volume in t_volumes["volumes"]]
    t_volume_names.sort()
    # list of volumes from Gluster command output
    g_volume_names = glv_cmd.get_volume_names()
    g_volume_names.sort()

    LOGGER.info("list of volumes from Tendrl api: %s", str(t_volume_names))
    LOGGER.info("list of volumes from gluster: %s", g_volume_names)
    pytest.check(
        t_volume_names == g_volume_names,
        "List of volumes from Gluster should be the same as from Tendrl API.")
def test_cluster_import_invalid_uuid(valid_session_credentials, cluster_id):
    """
    Negative import gluster cluster using cluster id value which completelly
    breaks criteria for uuid.
    """
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    """
    :step:
      Create import cluster job via API with broken cluster id.
    :result:
      API returns some error and refuses to process it.
    """
    asserts = {
        "ok": False,
        "status": 400,
        "reason": "Bad Request",
    }
    response = api.import_cluster(cluster_id, asserts_in=asserts)
    pytest.check("job_id" not in response, "job id is not returned")
    pytest.check("errors" in response, "there is errors field in response")
    LOGGER.info("errors reported in response: %s", response.get("errors"))
Exemple #13
0
def test_cluster_unmanage(application, valid_session_credentials,
                          managed_cluster):
    """
    Unmanage cluster in Web UI
    """
    """
    :step:
      Log in to Web UI and get the first cluster from the cluster list.
      Check that it's imported.
    :result:
      Cluster is in the correct state to start unmanage
    """
    clusters = application.collections.clusters.get_clusters()
    test_cluster = tools.choose_cluster(clusters,
                                        managed_cluster["cluster_id"],
                                        managed_cluster["short_name"])
    tendrl_api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    api_cluster = tendrl_api.get_cluster(test_cluster.cluster_id)
    pytest.check(
        api_cluster["is_managed"] == "yes",
        "is_managed: {}\nThere should be ``yes``.".format(
            api_cluster["is_managed"]))
    """
    :step:
      Unmanage the cluster in Web UI and check its state has changed in both Web UI and API
    :result:
      Cluster is unmanaged
    """
    unmanage_success = test_cluster.unmanage()
    if not unmanage_success:
        pytest.check(False, "Unmanage failed")
        test_cluster.unmanage()
    else:
        api_cluster = tendrl_api.get_cluster(test_cluster.cluster_id)
        pytest.check(
            api_cluster["is_managed"] == "no",
            "is_managed: {}\nThere should be ``no``.".format(
                api_cluster["is_managed"]))
Exemple #14
0
def test_cluster_import_invalid(valid_session_credentials, cluster_id, status):
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    """@pylatest api/gluster.cluster_import
        .. test_step:: 1

            Create import cluster job via API with invalid cluster id.

        .. test_result:: 1

            API returns response with json: `{"job_id":job_id}`
        """
    job_id = api.import_cluster(cluster_id)["job_id"]
    """@pylatest api/gluster.cluster_import
        .. test_step:: 2

            Repeatedly check if job with `job_id` from test_step 1 is
            `finished` or `failed`.

        .. test_result:: 2

            Job status should be in status given by `status` parameter.
        """
    api.wait_for_job_status(job_id, status=status)
Exemple #15
0
def test_volume_brick_list(valid_session_credentials, managed_cluster):
    """
    List bricks for given volume via API.

    :step:
      Connect to Tendrl API via GET request to
      ``APIURL/:cluster_id/volumes/:volume_id/bricks``
      Where cluster_id is set to predefined value.
    :result:
      Server should return response in JSON format:
      Return code should be **200** with data ``{"bricks": [{...}, ...]}``.
    """

    # get list of volumes from Tendrl
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    t_volumes = api.get_volume_list(managed_cluster['cluster_id'])

    # perform brick list test for each volume
    for t_volume in t_volumes["volumes"]:
        LOGGER.info("Compare bricks for volume: %s", t_volume["name"])
        gl_volume = gluster.GlusterVolume(volume_name=t_volume["name"])
        gl_volume.info()

        t_bricks = api.get_brick_list(managed_cluster['cluster_id'],
                                      t_volume["vol_id"])
        t_brick_list = {brick["brick_path"] for brick in t_bricks["bricks"]}

        g_brick_list = set(gl_volume.bricks)

        LOGGER.info("list of bricks for '%s' from Tendrl api: %s",
                    t_volume["name"], str(t_brick_list))
        LOGGER.info("list of bricks for '%s' from gluster: %s",
                    t_volume["name"], g_brick_list)
        pytest.check(
            t_brick_list == g_brick_list,
            "List of bricks for '{}' from Tendrl API should be the same "
            "as from Gluster.".format(t_volume["name"]))
Exemple #16
0
def test_cluster_import_invalid(valid_session_credentials, node_ids, asserts):
    """@pylatest api/gluster.cluster_import
        .. test_step:: 1

        Get list of ids of availible nodes.

        .. test_result:: 1

                Server should return response in JSON format:

                        {
                ...
                  {
                  "fqdn": hostname,
                  "machine_id": some_id,
                  "node_id": node_id
                  },
                ...
                        }

                Return code should be **200** with data ``{"message": "OK"}``.

        """
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    """@pylatest api/gluster.cluster_import
        .. test_step:: 2

            Send POST request to Tendrl API ``APIURL/GlusterImportCluster

        .. test_result:: 2

            Server should return response in JSON format with message set in
            ``asserts`` test parameter.

        """
    api.import_cluster(node_ids, asserts_in=asserts)
Exemple #17
0
def test_cluster_import_valid(valid_session_credentials, cluster_reuse,
                              valid_trusted_pool_reuse):
    """@pylatest api/gluster.cluster_import
        .. test_step:: 1

            Check that fqdns of nodes in tendrl correspond with fqdns
            from ``gluster`` command.

        .. test_result:: 1

            Sets of fqdns of nodes in tendrl and from ``gluster`` command
            should be the same.

        """
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    cluster_id = cluster_reuse["cluster_id"]
    pytest.check(cluster_id is not None,
                 "Cluster id is: {}".format(cluster_id))
    for _ in range(12):
        cluster = api.get_cluster(cluster_id)
        nodes = [node for node in cluster["nodes"] if node["fqdn"]]
        if len(nodes) == len(valid_trusted_pool_reuse):
            break
        time.sleep(10)
    else:
        pytest.check(
            len(valid_trusted_pool_reuse) == len(cluster["nodes"]),
            "Number of nodes from gluster trusted pool ({}) should be "
            "the same as number of nodes in tendrl ({})".format(
                len(valid_trusted_pool_reuse), len(cluster["nodes"])))
    node_fqdns = [x["fqdn"] for x in nodes]
    pytest.check(
        set(valid_trusted_pool_reuse) == set(node_fqdns),
        "fqdns get from gluster trusted pool ({}) should correspond "
        "with fqdns of nodes in tendrl ({})".format(valid_trusted_pool_reuse,
                                                    node_fqdns))
    """@pylatest api/gluster.cluster_import
        .. test_step:: 2

            Send POST request to Tendrl API ``APIURL/clusters/:cluster_id/import``

        .. test_result:: 2

            Server should return response in JSON format:

                {
                  "job_id": job_id
                }

            Return code should be **202**
                with data ``{"message": "Accepted"}``.

        """
    job_id = api.import_cluster(cluster_id)["job_id"]

    api.wait_for_job_status(job_id)

    integration_id = api.get_job_attribute(
        job_id=job_id,
        attribute="TendrlContext.integration_id",
        section="parameters")
    LOGGER.debug("integration_id: %s" % integration_id)

    imported_clusters = [
        x for x in api.get_cluster_list()
        if x["integration_id"] == integration_id
    ]
    pytest.check(
        len(imported_clusters) == 1, "Job list integration_id '{}' should be "
        "present in cluster list.".format(integration_id))
    # TODO add test case for checking imported machines
    msg = "In tendrl should be a same machines "\
          "as from `gluster peer status` command ({})"
    LOGGER.debug("debug imported clusters: %s" % imported_clusters)
    pytest.check([
        x["fqdn"] in valid_trusted_pool_reuse
        for x in imported_clusters[0]["nodes"]
    ], msg.format(valid_trusted_pool_reuse))
Exemple #18
0
def test_cluster_unmanage_valid(valid_session_credentials, cluster_reuse,
                                valid_trusted_pool_reuse):
    """@pylatest api/gluster.cluster_unmanage
        .. test_step:: 1

            Check that tested cluster is correctly managed by Tendrl.

        .. test_result:: 1

            There is in Tendrl ``"is_managed":"yes"`` for cluster with id [cluster_id].
            Graphite contains data related to health of tested cluster.

        """
    tendrl_api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    graphite_api = graphiteapi.GraphiteApi()

    cluster_id = cluster_reuse["cluster_id"]
    pytest.check(cluster_id is not None,
                 "Cluster id is: {}".format(cluster_id))
    pytest.check(
        cluster_reuse["is_managed"] == "yes",
        "is_managed: {}\nThere should be ``yes``.".format(
            cluster_reuse["is_managed"]))

    # graphite target uses short name if it is set
    if cluster_reuse["short_name"]:
        cluster_target_id = cluster_reuse["short_name"]
    else:
        cluster_target_id = cluster_reuse["cluster_id"]
    # it takes 15 minutes to refresh data Host status panel
    for i in range(31):
        cluster_health = graphite_api.get_datapoints(
            target="tendrl.clusters.{}.status".format(cluster_target_id))

        if cluster_health:
            break
        else:
            time.sleep(30)
    pytest.check(
        cluster_health, """graphite health of cluster {}: {}
        There should be related data.""".format(cluster_id, cluster_health))
    """@pylatest api/gluster.cluster_unmanage
        .. test_step:: 2

            Send POST request to Tendrl API ``APIURL/clusters/:cluster_id/unmanage``.

        .. test_result:: 2

            Server should return response in JSON format:

                {
                  "job_id": job_id
                }

            Return code should be **202**
                with data ``{"message": "Accepted"}``.

        """
    job_id = tendrl_api.unmanage_cluster(cluster_id)["job_id"]

    tendrl_api.wait_for_job_status(job_id)
    """@pylatest api/gluster.cluster_unmanage
        .. test_step:: 3

            Check that tested cluster is correctly managed by Tendrl.

        .. test_result:: 3

            There is in Tendrl ``"is_managed": "no"`` for cluster with id [cluster_id].
            Graphite contains no data related to health of tested cluster.

        """
    # TODO(fbalak) remove this workaround when BZ 1589321 is resolved
    for i in range(15):
        cluster_list = tendrl_api.get_cluster_list()
        if len(cluster_list) > 0:
            break
        else:
            time.sleep(10)
    assert cluster_list
    for cluster in cluster_list:
        if cluster["cluster_id"] == cluster_id:
            unmanaged_cluster = cluster
            break
    pytest.check(
        unmanaged_cluster["is_managed"] == "no",
        "is_managed: {}\nThere should be ``no``.".format(
            unmanaged_cluster["is_managed"]))

    cluster_health = graphite_api.get_datapoints(
        target="tendrl.clusters.{}.status".format(cluster_target_id))
    pytest.check(
        cluster_health == [], """graphite health of cluster {}: `{}`
        There should be `[]`.""".format(cluster_id, cluster_health))
    """@pylatest api/gluster.cluster_unmanage
        .. test_step:: 4

            Reimport cluster and check that tested cluster is correctly managed by Tendrl.

        .. test_result:: 4

            There is ``"is_managed": "yes"`` in Tendrl for cluster with id [cluster_id].
        """
    job_id = tendrl_api.import_cluster(cluster_id)["job_id"]
    tendrl_api.wait_for_job_status(job_id)
    for cluster in tendrl_api.get_cluster_list():
        if cluster["cluster_id"] == cluster_id:
            managed_cluster = cluster
            break
    pytest.check(
        managed_cluster["is_managed"] == "yes",
        "is_managed: {}\nThere should be ``yes``.".format(
            managed_cluster["is_managed"]))
Exemple #19
0
def test_delete_volume_valid(
        cluster_reuse,
        valid_session_credentials):
    """@pylatest api/gluster.delete_volume
        API-gluster: delete_volume
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Delete gluster volume ``Vol_test`` via API.

        .. test_step:: 1

                Connect to Tendrl API via POST request to ``APIURL/:cluster_id/GlusterDeleteVolume``
                Where cluster_id is set to predefined value.

        .. test_result:: 1

                Server should return response in JSON format:

                Return code should be **202** with data ``{"message": "Accepted"}``.
                job should finish.
                """

    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    volume_id = gluster.GlusterVolume(VOLUME_NAME).get_volume_id()
    volume_data = {
        "Volume.volname": VOLUME_NAME,
        "Volume.vol_id": volume_id
    }

    job_id = api.delete_volume(cluster_reuse["cluster_id"], volume_data)["job_id"]
    api.wait_for_job_status(
        job_id)
    """@pylatest api/gluster.create_volume
        API-gluster: create_volume
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Check if there is deleted volume on gluster nodes via CLI.

        .. test_step:: 1

            Connect to gluster node machine via ssh and run
            ``gluster volume info command``

        .. test_result:: 1

            There should not be listed gluster volume named ``Vol_test``.

            """
    storage = gluster.GlusterCommon()
    storage.find_volume_name(VOLUME_NAME, False)
    """@pylatest api/gluster.create_volume
        API-gluster: create_volume
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Check if there is not deleted volume on gluster nodes via CLI.

        .. test_step:: 3

            Get response from ``hostname/api/1.0/:cluster_id:/GetVolumeList``
            API call.

        .. test_result:: 3

            In response should not be listed gluster volume with ``valid_volume_id``

            """
    volumes = api.get_volume_list(cluster_reuse["cluster_id"])
    pytest.check(
        volume_id not in list(volumes),
        "volume id {} should not be among volume ids in tendrl: {}".format(
            volume_id, list(volumes)))
Exemple #20
0
def test_create_volume_valid(
        cluster_reuse,
        valid_session_credentials,
        valid_bricks_for_crud_volume,
        volume_conf_2rep):
    """@pylatest api/gluster.create_volume_valid
        API-gluster: create_volume
        ******************************

        .. test_metadata:: author [email protected]
        .. test_metadata:: author [email protected]

        Description
        ===========

        Get list of attributes needed to use in cluster volume creation with given cluster_id.

        .. test_step:: 1

                Connect to Tendrl API via POST request to ``APIURL/:cluster_id/GlusterCreateVolume``
                Where cluster_id is set to predefined value.

        .. test_result:: 1

                Server should return response in JSON format:

                Return code should be **202** with data ``{"message": "Accepted"}``.
                job should finish.
                """
    volume_conf_2rep["Volume.volname"] = VOLUME_NAME
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)

    job_id = api.create_volume(
        cluster_reuse["cluster_id"],
        volume_conf_2rep)["job_id"]
    if api.wait_for_job_status(job_id) == "finished":
        """@pylatest api/gluster.create_volume
            API-gluster: create_volume
            ******************************

            .. test_metadata:: author [email protected]
            .. test_metadata:: author [email protected]

            Description
            ===========

            Check if there is created volume on gluster nodes via CLI.

            .. test_step:: 2

                Connect to gluster node machine via ssh and run
                ``gluster volume info command``

            .. test_result:: 2

                There should be listed gluster volume named ``Vol_test``.

                """
        storage = gluster.GlusterCommon()
        storage.find_volume_name(VOLUME_NAME)

        volume = gluster.GlusterVolume(VOLUME_NAME)
        volume_id = volume.get_volume_id()
        volumes = api.get_volume_list(cluster_reuse["cluster_id"])
        volume_tendrl = [volume_t[volume_id] for volume_t in volumes
                         if volume_id in volume_t]
        pytest.check(
            len(volume_tendrl) == 1,
            """There should be only one volume
            with id == {}""".format(volume_id))

        if len(volume_tendrl) == 1:
            volume_tendrl = volume_tendrl[0]
            for (x, y, z) in [
                 ("name", volume.name, volume_tendrl["name"]),
                 ("id", volume.id, volume_tendrl["vol_id"]),
                 ("status", volume.status, volume_tendrl["status"]),
                 ("stripe_count", volume.stripe_count, volume_tendrl["stripe_count"]),
                 ("replica_count", volume.replica_count, volume_tendrl["replica_count"]),
                 ("brick_count", volume.brick_count, volume_tendrl["brick_count"]),
                 ("snapshot_count", volume.snap_count, volume_tendrl["snap_count"])]:
                pytest.check(
                    y == z,
                    """Volume {} in storage {} and in Tendrl {} are the same.""".format(x, y, z))
Exemple #21
0
def test_create_brick_valid(valid_cluster_id, valid_brick_path, valid_devices,
                            valid_session_credentials):
    """@pylatest api/gluster.create_brick_valid
        API-gluster: create_brick
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Create ``GlusterCreateBrick`` job with a ``brick_name`` on specified
        ``valid_device`` with nodes from cluster with ``valid_cluster_id``

        .. test_step:: 1

                Connect to Tendrl API via POST request to ``APIURL/:cluster_id/GlusterCreateBrick``
                Where cluster_id is set to predefined value.

        .. test_result:: 1

                Server should return response in JSON format:

                Return code should be **202** with data ``{"message": "Accepted"}``.
                job should finish.
                """
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    brick_name = re.search("(.*)_mount",
                           os.path.split(valid_brick_path)[1]).group(1)

    cluster_info = [
        x for x in api.get_cluster_list()
        if x["integration_id"] == valid_cluster_id
    ]
    nodes = cluster_info[0]["nodes"]

    job_id = api.create_bricks(valid_cluster_id, nodes, valid_devices,
                               brick_name)["job_id"]
    api.wait_for_job_status(job_id)
    """@pylatest api/gluster.create_brick_valid
        API-gluster: create_brick
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Check if the bricks were created on hosts of cluster with ``valid_cluster_id``.

        .. test_step:: 2

                Via ssh check on cluster nodes that there exists directory with called
                ``brick_name`` in `/tendrl_gluster_bricks/brick_mount`:
                    [ -d /tendrl_gluster_bricks/brick_mount/``brick_name`` ] && echo "exists"

        .. test_result:: 2

                There should be string ``exists`` in output of ssh.
                """
    SSH = usmssh.get_ssh()
    pytest.check(
        len(nodes) > 0,
        "In cluster have to be at least one node. There are {}".format(
            len(nodes)))
    cmd_exists = "[ -d {} ] && echo 'exists'".format(valid_brick_path)
    cmd_fs = 'mount | grep $(df  --output=source {} | tail -1)'.format(
        valid_brick_path)
    expected_output = '/dev/mapper/tendrl{0}_vg-tendrl{0}_lv on {1} type xfs'\
        .format(brick_name, valid_brick_path)
    for node in nodes:
        _, output, _ = SSH[nodes[node]["fqdn"]].run(cmd_exists)
        output = str(output).strip("'b\\n")
        pytest.check(
            output == "exists",
            "Output of command `{}` should be `exists`. Output is: `{}`".
            format(cmd_exists, output))
        """@pylatest api/gluster.create_brick_valid
            API-gluster: create_brick
            ******************************

            .. test_metadata:: author [email protected]

            Description
            ===========

            Check if the bricks have ``xfs`` filesystem and set correct device.

            .. test_step:: 3

                    Via ssh check filesystem and deviceof directory with
                    ``valid_brick_path``:
                        mount | grep $(df  --output=source ``valid_brick_path`` | tail -1)

            .. test_result:: 3

                    Output of the command should be:
                ``/dev/mapper/tendrl`brick_name`_vg-tendrl`brick_name`_lv on `brick_path` type xfs``
                    """
        _, output, _ = SSH[nodes[node]["fqdn"]].run(cmd_fs)
        output = str(output).strip("'b\\n")
        output = re.sub("\s*\(.*\)$", "", output)
        pytest.check(
            output == expected_output,
            "Output of command {} should be `{}`. Output is: `{}`".format(
                cmd_fs, expected_output, output))
def test_cluster_import_fail_with_one_nodeagent_down(
        valid_session_credentials, unmanaged_cluster,
        importfail_setup_nodeagent_stopped_on_one_node):
    """
    Negative import gluster cluster when node agent is not running on one
    storage machine. Import should fail in such case.
    """
    tendrl = glusterapi.TendrlApiGluster(auth=valid_session_credentials)

    # this test can't go on if we don't have proper cluster id at this point
    assert unmanaged_cluster["cluster_id"] is not None

    # TODO: this comes from test_cluster_import_valid, move this into cluster reuse fixture?
    """
    :step:
      Check that fqdns of nodes in tendrl correspond with fqdns
      from ``gluster`` command.
    :result:
      Sets of fqdns of nodes in tendrl and from ``gluster`` command
      should be the same.
    """
    # get nodes from gluster interface
    gl = gluster.GlusterCommon()
    gl_nodes = gl.get_hosts_from_trusted_pool(
        CONF.config["usmqe"]["cluster_member"])

    retry_num = 12
    for i in range(retry_num):
        cluster = tendrl.get_cluster(unmanaged_cluster["cluster_id"])
        if len(cluster["nodes"]) == len(gl_nodes):
            LOGGER.debug(
                "cluster (via tendrl API) has expected number of nodes")
            break
        if i != retry_num - 1:
            msg = "cluster (via tendrl API) has unexpected number of nodes, retrying API query"
            LOGGER.info(msg)
            time.sleep(10)
    else:
        assert len(cluster["nodes"]) == len(gl_nodes)
    node_fqdn_list = [
        node["fqdn"] for node in cluster["nodes"] if node["fqdn"]
    ]
    assert set(gl_nodes) == set(node_fqdn_list)
    """
    :step:
      Start import job for the cluster.
    :result:
      The job starts and finishes with failed status after some time.
    """
    LOGGER.info("starting import cluster job")
    import_job = tendrl.import_cluster(unmanaged_cluster["cluster_id"])
    LOGGER.info("import (job id {}) submited, waiting for completion".format(
        import_job["job_id"]))
    tendrl.wait_for_job_status(import_job["job_id"], status="failed")
    """
    :step:
      Using integration id from the import job, find cluster we tried to import
      in a cluster list.
    :result:
      There is exactly one such cluster, and it's not managed (aka not imported).
    """
    integration_id = tendrl.get_job_attribute(
        job_id=import_job["job_id"],
        attribute="TendrlContext.integration_id",
        section="parameters")
    LOGGER.debug("integration_id: %s" % integration_id)
    clusters = [
        x for x in tendrl.get_cluster_list()
        if x["integration_id"] == integration_id
    ]
    pytest.check(
        len(clusters) == 1, "Job list integration_id '{}' should be "
        "present in cluster list.".format(integration_id))
    pytest.check(clusters[0]['is_managed'] == 'no',
                 'cluster we tried to import should be in unmanaged state')
Exemple #23
0
def test_cluster_import_valid(valid_session_credentials, valid_trusted_pool):
    """@pylatest api/gluster.cluster_import
        .. test_step:: 1

        Get list of ids of availible nodes.

        .. test_result:: 1

                Server should return response in JSON format:

                        {
                ...
                  {
                  "fqdn": hostname,
                  "machine_id": some_id,
                  "node_id": node_id
                  },
                ...
                        }

                Return code should be **200** with data ``{"message": "OK"}``.

        """
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    """@pylatest api/gluster.cluster_import
        .. test_step:: 2

            Send POST request to Tendrl API ``APIURL/GlusterImportCluster

        .. test_result:: 2

            Server should return response in JSON format:

                {
                  "job_id": job_id
                }

            Return code should be **202**
                with data ``{"message": "Accepted"}``.

        """
    nodes = api.get_nodes()
    node_ids = None
    for cluster in nodes["clusters"]:
        if cluster["sds_name"] == "gluster":
            node_ids = cluster["node_ids"]
            break
    node_fqdns = []
    msg = "`sds_pkg_name` of node {} should be `gluster`, it is {}"
    for node in nodes["nodes"]:
        if node["node_id"] in node_ids:
            pytest.check(
                node["detectedcluster"]["sds_pkg_name"] == "gluster",
                msg.format(node["fqdn"],
                           node["detectedcluster"]["sds_pkg_name"]))
            node_fqdns.append(node["fqdn"])
    node_ids = [
        x["node_id"] for x in nodes["nodes"] if x["fqdn"] in valid_trusted_pool
    ]
    pytest.check(
        len(valid_trusted_pool) == len(node_ids),
        "number of nodes in trusted pool ({}) should correspond "
        "with number of imported nodes ({})".format(len(valid_trusted_pool),
                                                    len(node_ids)))

    job_id = api.import_cluster(node_ids)["job_id"]

    api.wait_for_job_status(job_id)

    integration_id = api.get_job_attribute(
        job_id=job_id,
        attribute="TendrlContext.integration_id",
        section="parameters")
    LOGGER.debug("integration_id: %s" % integration_id)

    imported_clusters = [
        x for x in api.get_cluster_list()
        if x["integration_id"] == integration_id
    ]
    pytest.check(
        len(imported_clusters) == 1, "Job list integration_id '{}' should be "
        "present in cluster list.".format(integration_id))
    # TODO add test case for checking imported machines
    msg = "In tendrl should be a same machines "\
          "as from `gluster peer status` command ({})"
    LOGGER.debug("debug imported clusters: %s" % imported_clusters)
    pytest.check([
        x["fqdn"] in valid_trusted_pool
        for x in imported_clusters[0]["nodes"].values()
    ], msg.format(valid_trusted_pool))
Exemple #24
0
def test_cluster_create_valid(valid_session_credentials, valid_nodes,
                              cluster_name):
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    """@pylatest api/gluster.cluster_import
        .. test_step:: 1

            Send POST request to Tendrl API ``APIURL/GlusterCreateCluster

        .. test_result:: 1

            Server should return response in JSON format:

                {
                  "job_id": job_id
                }

            Return code should be **202**
                with data ``{"message": "Accepted"}``.

        """
    nodes = []
    provisioner_ip = None
    network = pytest.config.getini("usm_network_subnet")
    node_ids = []
    ips = None
    for x in valid_nodes:
        if "tendrl/server" in x["tags"]:
            continue
        for y in x["networks"]:
            if y["subnet"] == network:
                ips = y["ipv4"]
                break
        pytest.check(
            type(ips) == list,
            "type of ip addresses returned from api have to be list,"
            " it is: {}".format(type(ips)))
        pytest.check(
            len(ips) == 1,
            "length of ipv4 addresses list have to be 1, otherwise it is not valid"
            " configuration for this test, it is: {}".format(len(ips)))
        nodes.append({"role": "glusterfs/node", "ip": ips[0]})
        node_ids.append(x["node_id"])
        if "provisioner/gluster" in x["tags"]:
            provisioner_ip = ips[0]
    LOGGER.debug("node_ips: %s" % nodes)
    LOGGER.debug("provisioner: %s" % provisioner_ip)
    """@pylatest api/gluster.cluster_create
        .. test_step:: 2

        Check if there is at least one gluster node for cluster creation.

        .. test_result:: 2

        Test passes if there is at least one gluster node.
        """
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    pytest.check(
        len(nodes) > 0, "There have to be at least one gluster node."
        "There are {}".format(len(valid_nodes)))
    job_id = api.create_cluster(cluster_name, str(uuid.uuid4()), nodes,
                                provisioner_ip, network)["job_id"]

    api.wait_for_job_status(job_id)

    integration_id = api.get_job_attribute(
        job_id=job_id,
        attribute="TendrlContext.integration_id",
        section="parameters")
    LOGGER.debug("integration_id: %s" % integration_id)

    api.get_cluster_list()
    # TODO(fbalak) remove this sleep after
    #              https://github.com/Tendrl/api/issues/159 is resolved.
    import time
    time.sleep(30)

    imported_clusters = [
        x for x in api.get_cluster_list()
        if x["integration_id"] == integration_id
    ]
    pytest.check(
        len(imported_clusters) == 1, "Job list integration_id '{}' should be "
        "present in cluster list.".format(integration_id))

    imported_nodes = imported_clusters[0]["nodes"]
    pytest.check(
        len(imported_nodes) == len(nodes),
        "In cluster should be the same amount of hosts"
        "(is {}) as is in API call for cluster creation."
        "(is {})".format(len(imported_nodes), len(nodes)))

    pytest.check(
        set(node_ids) == set(imported_nodes.keys()),
        "There should be imported these nodes: {}"
        "There are: {}".format(node_ids, imported_nodes.keys()))