Пример #1
0
def valid_trusted_pool():
    """
    Return list of node hostname from created trusted pool.
    """
    storage = gluster.GlusterCommon()
    host = inventory.role2hosts(pytest.config.getini("usm_gluster_role"))[0]
    return storage.get_hosts_from_trusted_pool(host)
Пример #2
0
def test_nodes_list(
        valid_session_credentials,
        managed_cluster):
    """
    List nodes for given cluster via API.

    :step:
      Connect to Tendrl API via GET request to ``APIURL/:cluster_id/nodes``
      Where cluster_id is set to predefined value.
    :result:
      Server should return response in JSON format:
      Return code should be **200** with data ``{"nodes": [{...}, ...]}``.
    """

    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)

    # list of nodes from Tendrl api
    t_nodes = api.get_node_list(managed_cluster['cluster_id'])
    t_node_names = {node["fqdn"] for node in t_nodes["nodes"]}
    # list of nodes from Gluster command output
    gl = gluster.GlusterCommon()
    g_node_names = set(
        gl.get_hosts_from_trusted_pool(
            CONF.config["usmqe"]["cluster_member"]))

    LOGGER.info("list of nodes from Tendrl api: %s", str(t_node_names))
    LOGGER.info("list of nodes from gluster: %s", g_node_names)
    pytest.check(
        t_node_names == g_node_names,
        "List of nodes from Gluster should be the same as from Tendrl API.")
Пример #3
0
def valid_trusted_pool_reuse():
    """
    Return list of node hostname from created trusted pool with node specified
    by usm_cluster_member option in usm.ini.
    """
    storage = gluster.GlusterCommon()
    return storage.get_hosts_from_trusted_pool(
        pytest.config.getini("usm_cluster_member"))
Пример #4
0
    def import_cluster(self, hosts=None):
        """
        import SELECTED cluster

        Parameters:
            hosts (list): list of dictionaries
                          {'hostname': <hostname>, 'release': <release>, ...
                          for check only

        Returns:
            hosts list
        """
        import time
        if hosts is None:
            # get gluster hosts
            host = next(iter(self.hosts)).name
            storage = gluster.GlusterCommon()
            hosts = [{
                'hostname': hostname,
                'release': None,
                'role': 'Peer'
            } for hostname in storage.get_hosts_from_trusted_pool(host)]


# ceph variant if needed, cluster name is required
#                # get ceph hosts
#                # TODO get the cluster name from somewhere
#                #       - configuration, cluster_id param or ...
#                cluster_name = cluster_name or 'test_name'
#                # NOTE there are no full hostnames available in ceph
#                monitors = []
#                for host in self.hosts:
#                    if host.role.lower() == 'monitor':
#                        monitors.append(host.name)
#                pytest.check(
#                    monitors != [],
#                    'There has to be a host with Monitor role '
#                    'in ceph cluster')
#                storage = ceph_cluster.CephCluster(cluster_name, monitors)
#                ceph_mons = storage.mon.stat()['mons'].keys()
#                ceph_osds = []
#                ceph_all_osds = storage.osd.tree()['nodes']
#                for ceph_osd in ceph_all_osds:
#                    if ceph_osd['type'] == 'host':
#                        ceph_osds.append(ceph_osd['name'])
#                ceph_mon_osd = set(ceph_mons).intersection(ceph_osds)
#                # remove intersection
#                ceph_mons = set(ceph_mons) - ceph_mon_osd
#                ceph_osds = set(ceph_osds) - ceph_mon_osd
#                # TODO make sure how the role should look like on UI
#                mon_osd_hosts = [
#                    {'hostname': hostname,
#                     'release': release,
#                     'role': ['Monitor', 'OSD Hosts']}
#                    for hostname in ceph_mon_osd]
#                mon_hosts = [
#                    {'hostname': hostname,
#                     'release': release,
#                     'role': 'Monitor'}
#                    for hostname in ceph_mons]
#                osds_hosts = [
#                    {'hostname': hostname,
#                     'release': release,
#                     'role': 'OSD Host'}
#                    for hostname in ceph_osds]
#                hosts = mon_hosts + osds_hosts + mon_osd_hosts

# check hosts
        check_hosts(hosts, self.hosts)

        self.import_click()
        # the page is not loaded completely, better to wait a little
        time.sleep(1)
        final_import_page = ImportClusterSummary(self.driver)
        final_import_page.view_task()
        return hosts
Пример #5
0
def test_cluster_import_valid(valid_session_credentials, unmanaged_cluster):
    """
    Positive import gluster cluster.
    """
    """
    :step:
      Check that fqdns of nodes in tendrl correspond with fqdns
      from ``gluster`` command.
    :result:
      Sets of fqdns of nodes in tendrl and from ``gluster`` command
      should be the same.
    """
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    cluster_id = unmanaged_cluster["cluster_id"]
    pytest.check(cluster_id is not None,
                 "Cluster id is: {}".format(cluster_id))
    # get nodes from gluster interface
    gl = gluster.GlusterCommon()
    gl_nodes = gl.get_hosts_from_trusted_pool(
        CONF.config["usmqe"]["cluster_member"])

    for _ in range(12):
        cluster = api.get_cluster(cluster_id)
        nodes = [node for node in cluster["nodes"] if node["fqdn"]]
        if len(nodes) == len(gl_nodes):
            break
        time.sleep(10)
    else:
        pytest.check(
            len(gl_nodes) == len(cluster["nodes"]),
            "Number of nodes from gluster trusted pool ({}) should be "
            "the same as number of nodes in tendrl ({})".format(
                len(gl_nodes), len(cluster["nodes"])))
    node_fqdns = [x["fqdn"] for x in nodes]
    pytest.check(
        set(gl_nodes) == set(node_fqdns),
        "fqdns get from gluster trusted pool ({}) should correspond "
        "with fqdns of nodes in tendrl ({})".format(gl_nodes, node_fqdns))
    """
    :step:
      Send POST request to Tendrl API ``APIURL/clusters/:cluster_id/import``
    :result:
      Server should return response in JSON format:

          {
            "job_id": job_id
          }

      Return code should be **202**
          with data ``{"message": "Accepted"}``.
    """
    job_id = api.import_cluster(cluster_id)["job_id"]

    api.wait_for_job_status(job_id)

    integration_id = api.get_job_attribute(
        job_id=job_id,
        attribute="TendrlContext.integration_id",
        section="parameters")
    LOGGER.debug("integration_id: %s" % integration_id)

    imported_clusters = [
        x for x in api.get_cluster_list()
        if x["integration_id"] == integration_id
    ]
    pytest.check(
        len(imported_clusters) == 1, "Job list integration_id '{}' should be "
        "present in cluster list.".format(integration_id))
    # TODO add test case for checking imported machines
    msg = "In tendrl should be a same machines "\
          "as from `gluster peer status` command ({})"
    LOGGER.debug("debug imported clusters: %s" % imported_clusters)
    pytest.check(
        [x["fqdn"] in gl_nodes for x in imported_clusters[0]["nodes"]],
        msg.format(gl_nodes))
Пример #6
0
def test_cluster_import_fail_with_one_nodeagent_down(
        valid_session_credentials, unmanaged_cluster,
        importfail_setup_nodeagent_stopped_on_one_node):
    """
    Negative import gluster cluster when node agent is not running on one
    storage machine. Import should fail in such case.
    """
    tendrl = glusterapi.TendrlApiGluster(auth=valid_session_credentials)

    # this test can't go on if we don't have proper cluster id at this point
    assert unmanaged_cluster["cluster_id"] is not None

    # TODO: this comes from test_cluster_import_valid, move this into cluster reuse fixture?
    """
    :step:
      Check that fqdns of nodes in tendrl correspond with fqdns
      from ``gluster`` command.
    :result:
      Sets of fqdns of nodes in tendrl and from ``gluster`` command
      should be the same.
    """
    # get nodes from gluster interface
    gl = gluster.GlusterCommon()
    gl_nodes = gl.get_hosts_from_trusted_pool(
        CONF.config["usmqe"]["cluster_member"])

    retry_num = 12
    for i in range(retry_num):
        cluster = tendrl.get_cluster(unmanaged_cluster["cluster_id"])
        if len(cluster["nodes"]) == len(gl_nodes):
            LOGGER.debug(
                "cluster (via tendrl API) has expected number of nodes")
            break
        if i != retry_num - 1:
            msg = "cluster (via tendrl API) has unexpected number of nodes, retrying API query"
            LOGGER.info(msg)
            time.sleep(10)
    else:
        assert len(cluster["nodes"]) == len(gl_nodes)
    node_fqdn_list = [
        node["fqdn"] for node in cluster["nodes"] if node["fqdn"]
    ]
    assert set(gl_nodes) == set(node_fqdn_list)
    """
    :step:
      Start import job for the cluster.
    :result:
      The job starts and finishes with failed status after some time.
    """
    LOGGER.info("starting import cluster job")
    import_job = tendrl.import_cluster(unmanaged_cluster["cluster_id"])
    LOGGER.info("import (job id {}) submited, waiting for completion".format(
        import_job["job_id"]))
    tendrl.wait_for_job_status(import_job["job_id"], status="failed")
    """
    :step:
      Using integration id from the import job, find cluster we tried to import
      in a cluster list.
    :result:
      There is exactly one such cluster, and it's not managed (aka not imported).
    """
    integration_id = tendrl.get_job_attribute(
        job_id=import_job["job_id"],
        attribute="TendrlContext.integration_id",
        section="parameters")
    LOGGER.debug("integration_id: %s" % integration_id)
    clusters = [
        x for x in tendrl.get_cluster_list()
        if x["integration_id"] == integration_id
    ]
    pytest.check(
        len(clusters) == 1, "Job list integration_id '{}' should be "
        "present in cluster list.".format(integration_id))
    pytest.check(clusters[0]['is_managed'] == 'no',
                 'cluster we tried to import should be in unmanaged state')
Пример #7
0
def test_create_volume_valid(
        cluster_reuse,
        valid_session_credentials,
        valid_bricks_for_crud_volume,
        volume_conf_2rep):
    """@pylatest api/gluster.create_volume_valid
        API-gluster: create_volume
        ******************************

        .. test_metadata:: author [email protected]
        .. test_metadata:: author [email protected]

        Description
        ===========

        Get list of attributes needed to use in cluster volume creation with given cluster_id.

        .. test_step:: 1

                Connect to Tendrl API via POST request to ``APIURL/:cluster_id/GlusterCreateVolume``
                Where cluster_id is set to predefined value.

        .. test_result:: 1

                Server should return response in JSON format:

                Return code should be **202** with data ``{"message": "Accepted"}``.
                job should finish.
                """
    volume_conf_2rep["Volume.volname"] = VOLUME_NAME
    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)

    job_id = api.create_volume(
        cluster_reuse["cluster_id"],
        volume_conf_2rep)["job_id"]
    if api.wait_for_job_status(job_id) == "finished":
        """@pylatest api/gluster.create_volume
            API-gluster: create_volume
            ******************************

            .. test_metadata:: author [email protected]
            .. test_metadata:: author [email protected]

            Description
            ===========

            Check if there is created volume on gluster nodes via CLI.

            .. test_step:: 2

                Connect to gluster node machine via ssh and run
                ``gluster volume info command``

            .. test_result:: 2

                There should be listed gluster volume named ``Vol_test``.

                """
        storage = gluster.GlusterCommon()
        storage.find_volume_name(VOLUME_NAME)

        volume = gluster.GlusterVolume(VOLUME_NAME)
        volume_id = volume.get_volume_id()
        volumes = api.get_volume_list(cluster_reuse["cluster_id"])
        volume_tendrl = [volume_t[volume_id] for volume_t in volumes
                         if volume_id in volume_t]
        pytest.check(
            len(volume_tendrl) == 1,
            """There should be only one volume
            with id == {}""".format(volume_id))

        if len(volume_tendrl) == 1:
            volume_tendrl = volume_tendrl[0]
            for (x, y, z) in [
                 ("name", volume.name, volume_tendrl["name"]),
                 ("id", volume.id, volume_tendrl["vol_id"]),
                 ("status", volume.status, volume_tendrl["status"]),
                 ("stripe_count", volume.stripe_count, volume_tendrl["stripe_count"]),
                 ("replica_count", volume.replica_count, volume_tendrl["replica_count"]),
                 ("brick_count", volume.brick_count, volume_tendrl["brick_count"]),
                 ("snapshot_count", volume.snap_count, volume_tendrl["snap_count"])]:
                pytest.check(
                    y == z,
                    """Volume {} in storage {} and in Tendrl {} are the same.""".format(x, y, z))
Пример #8
0
def test_delete_volume_valid(
        cluster_reuse,
        valid_session_credentials):
    """@pylatest api/gluster.delete_volume
        API-gluster: delete_volume
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Delete gluster volume ``Vol_test`` via API.

        .. test_step:: 1

                Connect to Tendrl API via POST request to ``APIURL/:cluster_id/GlusterDeleteVolume``
                Where cluster_id is set to predefined value.

        .. test_result:: 1

                Server should return response in JSON format:

                Return code should be **202** with data ``{"message": "Accepted"}``.
                job should finish.
                """

    api = glusterapi.TendrlApiGluster(auth=valid_session_credentials)
    volume_id = gluster.GlusterVolume(VOLUME_NAME).get_volume_id()
    volume_data = {
        "Volume.volname": VOLUME_NAME,
        "Volume.vol_id": volume_id
    }

    job_id = api.delete_volume(cluster_reuse["cluster_id"], volume_data)["job_id"]
    api.wait_for_job_status(
        job_id)
    """@pylatest api/gluster.create_volume
        API-gluster: create_volume
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Check if there is deleted volume on gluster nodes via CLI.

        .. test_step:: 1

            Connect to gluster node machine via ssh and run
            ``gluster volume info command``

        .. test_result:: 1

            There should not be listed gluster volume named ``Vol_test``.

            """
    storage = gluster.GlusterCommon()
    storage.find_volume_name(VOLUME_NAME, False)
    """@pylatest api/gluster.create_volume
        API-gluster: create_volume
        ******************************

        .. test_metadata:: author [email protected]

        Description
        ===========

        Check if there is not deleted volume on gluster nodes via CLI.

        .. test_step:: 3

            Get response from ``hostname/api/1.0/:cluster_id:/GetVolumeList``
            API call.

        .. test_result:: 3

            In response should not be listed gluster volume with ``valid_volume_id``

            """
    volumes = api.get_volume_list(cluster_reuse["cluster_id"])
    pytest.check(
        volume_id not in list(volumes),
        "volume id {} should not be among volume ids in tendrl: {}".format(
            volume_id, list(volumes)))