def test_volumes_list(valid_session_credentials, managed_cluster): """ List volumes for given cluster via API. :step: Connect to Tendrl API via GET request to ``APIURL/:cluster_id/volumes`` Where cluster_id is set to predefined value. :result: Server should return response in JSON format: Return code should be **200** with data ``{"volumes": [{...}, ...]}``. """ api = glusterapi.TendrlApiGluster(auth=valid_session_credentials) glv_cmd = gluster.GlusterVolume() # list of volumes from Tendrl api t_volumes = api.get_volume_list(managed_cluster['cluster_id']) t_volume_names = [volume["name"] for volume in t_volumes["volumes"]] t_volume_names.sort() # list of volumes from Gluster command output g_volume_names = glv_cmd.get_volume_names() g_volume_names.sort() LOGGER.info("list of volumes from Tendrl api: %s", str(t_volume_names)) LOGGER.info("list of volumes from gluster: %s", g_volume_names) pytest.check( t_volume_names == g_volume_names, "List of volumes from Gluster should be the same as from Tendrl API.") pytest.check( len(t_volume_names) == int(CONF.config["usmqe"]["volume_count"]), "Number of volumes from Tendrl API: {}. " "Expected number of volumes: {}.".format( len(t_volume_names), int(CONF.config["usmqe"]["volume_count"])))
def test_cluster_disable_profiling(application, managed_cluster, gluster_volume): """ Disable cluster profiling in Web UI """ """ :step: Log in to Web UI and get the first cluster from the cluster list. Check that its profiling is enabled :result: Cluster is in the correct state to disable profiling """ clusters = application.collections.clusters.get_clusters() test_cluster = tools.choose_cluster(clusters, managed_cluster["cluster_id"], managed_cluster["short_name"]) if test_cluster.profiling != "Enabled": test_cluster.enable_profiling() gluster_cluster = gluster.GlusterVolume() pytest.check( gluster_cluster.get_clusterwide_profiling() == "enabled", "Check that all volumes have profiling enabled according to gluster command" ) """ :step: Disable profiling in Web UI and check its state has changed in both Web UI and API :result: Cluster profiling has been disabled """ test_cluster.disable_profiling() pytest.check( gluster_cluster.get_clusterwide_profiling() == "disabled", "Check that all profiling has been disabled according to gluster command" )
def test_volume_attributes(application, valid_session_credentials, managed_cluster, gluster_volume): """ Test that all volumes are listed on cluster's Volumes page. Check all common volume attributes """ """ :step: Log in to Web UI and get the cluster identified by cluster_member. Get the list of its volumes. :result: Volume objects are initiated and their attributes are read from the page """ clusters = application.collections.clusters.get_clusters() test_cluster = tools.choose_cluster(clusters, managed_cluster["cluster_id"], managed_cluster["short_name"]) assert test_cluster.managed == "Yes" volumes = test_cluster.volumes.get_volumes() """ :step: Get the list of volumes using Gluster command and check it's the same as in UI :result: The list of volumes in UI and in Gluster command are equal """ glv_cmd = gluster.GlusterVolume() g_volume_names = glv_cmd.get_volume_names() pytest.check( set([volume.volname for volume in volumes]) == set(g_volume_names), "Check that UI volumes list is the same as in gluster volume info") LOGGER.debug("UI volume names: {}".format( [volume.volname for volume in volumes])) LOGGER.debug("Gluster command volume names: {}".format(g_volume_names)) """ :step: Check common volume attributes :result: Common volume attributes have expected values """ for volume in volumes: pytest.check( volume.volname.find("olume_") == 1, "Check that volume name contains ``olume_``") pytest.check( volume.running == "Yes", "Check that volume ``Running`` attribute has value ``Yes``") pytest.check( volume.rebalance == "Not Started", "Check that volume ``Rebalance`` attribute has value ``Not Started``" ) pytest.check( int(volume.alerts) >= 0, "Check that volume's number of alerts is a non-negative integer")
def test_volume_profiling_switch(application, managed_cluster, gluster_volume): """ Test disabling and enabling volume profiling in UI """ """ :step: Log in to Web UI and get the cluster identified by cluster_member. Get the list of its volumes. :result: Volume objects are initiated and their attributes are read from the page. """ clusters = application.collections.clusters.get_clusters() test_cluster = tools.choose_cluster(clusters, managed_cluster["cluster_id"], managed_cluster["short_name"]) assert test_cluster.managed == "Yes" volumes = test_cluster.volumes.get_volumes() for volume in volumes: """ :step: For each volume in the volume list, disable profiling and check its profiling status both in UI and using Gluster command. :result: Volume profiling is disabled. """ glv_cmd = gluster.GlusterVolume(volume_name=volume.volname) volume.disable_profiling() pytest.check( not glv_cmd.is_profiling_enabled(), "Check that profiling status has changed to disabled usig gluster command" ) pytest.check(volume.profiling == "Disabled", "Check that profiling attribute in UI is ``Disabled``") """ :step: For each volume in the volume list, enable profiling and check its profiling status both in UI and using Gluster command. :result: Volume profiling is enabled. """ volume.enable_profiling() pytest.check( glv_cmd.is_profiling_enabled(), "Check that profiling status has changed to enabled usig gluster command" ) pytest.check(volume.profiling == "Enabled", "Check that profiling attribute in UI is ``Enabled``")
def test_volumes_list(valid_session_credentials, cluster_reuse, valid_trusted_pool_reuse): """@pylatest api/gluster.volumes_list API-gluster: volumes_list ****************************** .. test_metadata:: author [email protected] Description =========== List volumes for given cluster via API. .. test_step:: 1 Connect to Tendrl API via GET request to ``APIURL/:cluster_id/volumes`` Where cluster_id is set to predefined value. .. test_result:: 1 Server should return response in JSON format: Return code should be **200** with data ``{"volumes": [{...}, ...]}``. """ api = glusterapi.TendrlApiGluster(auth=valid_session_credentials) glv_cmd = gluster.GlusterVolume() # list of volumes from Tendrl api t_volumes = api.get_volume_list(cluster_reuse['cluster_id']) t_volume_names = [volume["name"] for volume in t_volumes["volumes"]] t_volume_names.sort() # list of volumes from Gluster command output g_volume_names = glv_cmd.get_volume_names() g_volume_names.sort() LOGGER.info("list of volumes from Tendrl api: %s", str(t_volume_names)) LOGGER.info("list of volumes from gluster: %s", g_volume_names) pytest.check( t_volume_names == g_volume_names, "List of volumes from Gluster should be the same as from Tendrl API.")
def test_volume_brick_list(valid_session_credentials, managed_cluster): """ List bricks for given volume via API. :step: Connect to Tendrl API via GET request to ``APIURL/:cluster_id/volumes/:volume_id/bricks`` Where cluster_id is set to predefined value. :result: Server should return response in JSON format: Return code should be **200** with data ``{"bricks": [{...}, ...]}``. """ # get list of volumes from Tendrl api = glusterapi.TendrlApiGluster(auth=valid_session_credentials) t_volumes = api.get_volume_list(managed_cluster['cluster_id']) # perform brick list test for each volume for t_volume in t_volumes["volumes"]: LOGGER.info("Compare bricks for volume: %s", t_volume["name"]) gl_volume = gluster.GlusterVolume(volume_name=t_volume["name"]) gl_volume.info() t_bricks = api.get_brick_list(managed_cluster['cluster_id'], t_volume["vol_id"]) t_brick_list = {brick["brick_path"] for brick in t_bricks["bricks"]} g_brick_list = set(gl_volume.bricks) LOGGER.info("list of bricks for '%s' from Tendrl api: %s", t_volume["name"], str(t_brick_list)) LOGGER.info("list of bricks for '%s' from gluster: %s", t_volume["name"], g_brick_list) pytest.check( t_brick_list == g_brick_list, "List of bricks for '{}' from Tendrl API should be the same " "as from Gluster.".format(t_volume["name"]))
def wait(): gl_volumes = gluster.GlusterVolume() LOGGER.info("Measure time when volumes are stopped.") time.sleep(180) return gl_volumes.list()
def test_create_volume_valid( cluster_reuse, valid_session_credentials, valid_bricks_for_crud_volume, volume_conf_2rep): """@pylatest api/gluster.create_volume_valid API-gluster: create_volume ****************************** .. test_metadata:: author [email protected] .. test_metadata:: author [email protected] Description =========== Get list of attributes needed to use in cluster volume creation with given cluster_id. .. test_step:: 1 Connect to Tendrl API via POST request to ``APIURL/:cluster_id/GlusterCreateVolume`` Where cluster_id is set to predefined value. .. test_result:: 1 Server should return response in JSON format: Return code should be **202** with data ``{"message": "Accepted"}``. job should finish. """ volume_conf_2rep["Volume.volname"] = VOLUME_NAME api = glusterapi.TendrlApiGluster(auth=valid_session_credentials) job_id = api.create_volume( cluster_reuse["cluster_id"], volume_conf_2rep)["job_id"] if api.wait_for_job_status(job_id) == "finished": """@pylatest api/gluster.create_volume API-gluster: create_volume ****************************** .. test_metadata:: author [email protected] .. test_metadata:: author [email protected] Description =========== Check if there is created volume on gluster nodes via CLI. .. test_step:: 2 Connect to gluster node machine via ssh and run ``gluster volume info command`` .. test_result:: 2 There should be listed gluster volume named ``Vol_test``. """ storage = gluster.GlusterCommon() storage.find_volume_name(VOLUME_NAME) volume = gluster.GlusterVolume(VOLUME_NAME) volume_id = volume.get_volume_id() volumes = api.get_volume_list(cluster_reuse["cluster_id"]) volume_tendrl = [volume_t[volume_id] for volume_t in volumes if volume_id in volume_t] pytest.check( len(volume_tendrl) == 1, """There should be only one volume with id == {}""".format(volume_id)) if len(volume_tendrl) == 1: volume_tendrl = volume_tendrl[0] for (x, y, z) in [ ("name", volume.name, volume_tendrl["name"]), ("id", volume.id, volume_tendrl["vol_id"]), ("status", volume.status, volume_tendrl["status"]), ("stripe_count", volume.stripe_count, volume_tendrl["stripe_count"]), ("replica_count", volume.replica_count, volume_tendrl["replica_count"]), ("brick_count", volume.brick_count, volume_tendrl["brick_count"]), ("snapshot_count", volume.snap_count, volume_tendrl["snap_count"])]: pytest.check( y == z, """Volume {} in storage {} and in Tendrl {} are the same.""".format(x, y, z))
def test_delete_volume_valid( cluster_reuse, valid_session_credentials): """@pylatest api/gluster.delete_volume API-gluster: delete_volume ****************************** .. test_metadata:: author [email protected] Description =========== Delete gluster volume ``Vol_test`` via API. .. test_step:: 1 Connect to Tendrl API via POST request to ``APIURL/:cluster_id/GlusterDeleteVolume`` Where cluster_id is set to predefined value. .. test_result:: 1 Server should return response in JSON format: Return code should be **202** with data ``{"message": "Accepted"}``. job should finish. """ api = glusterapi.TendrlApiGluster(auth=valid_session_credentials) volume_id = gluster.GlusterVolume(VOLUME_NAME).get_volume_id() volume_data = { "Volume.volname": VOLUME_NAME, "Volume.vol_id": volume_id } job_id = api.delete_volume(cluster_reuse["cluster_id"], volume_data)["job_id"] api.wait_for_job_status( job_id) """@pylatest api/gluster.create_volume API-gluster: create_volume ****************************** .. test_metadata:: author [email protected] Description =========== Check if there is deleted volume on gluster nodes via CLI. .. test_step:: 1 Connect to gluster node machine via ssh and run ``gluster volume info command`` .. test_result:: 1 There should not be listed gluster volume named ``Vol_test``. """ storage = gluster.GlusterCommon() storage.find_volume_name(VOLUME_NAME, False) """@pylatest api/gluster.create_volume API-gluster: create_volume ****************************** .. test_metadata:: author [email protected] Description =========== Check if there is not deleted volume on gluster nodes via CLI. .. test_step:: 3 Get response from ``hostname/api/1.0/:cluster_id:/GetVolumeList`` API call. .. test_result:: 3 In response should not be listed gluster volume with ``valid_volume_id`` """ volumes = api.get_volume_list(cluster_reuse["cluster_id"]) pytest.check( volume_id not in list(volumes), "volume id {} should not be among volume ids in tendrl: {}".format( volume_id, list(volumes)))
def test_volume_bricks(application, managed_cluster, gluster_volume): """ Test volume brick attributes and their division into replica sets/subvolumes. """ """ :step: Log in to Web UI and get the cluster identified by cluster_member. Get the list of its volumes. :result: Volume objects are initiated and their attributes are read from the page. """ clusters = application.collections.clusters.get_clusters() test_cluster = tools.choose_cluster(clusters, managed_cluster["cluster_id"], managed_cluster["short_name"]) assert test_cluster.managed == "Yes" volumes = test_cluster.volumes.get_volumes() pytest.check( volumes != [], "Check that there is at least one volume in the UI volumes list") for volume in volumes: """ :step: For each volume calculate the expected subvolume/replica set size from the volume name depending on volume type. :result: Subvolume/replica set size is calculated. """ if volume.volname.split("_")[2] in {"arbiter", "disperse"}: part_size = int(volume.volname.split("_")[3]) + \ int(volume.volname.split("_")[5].split('x')[0]) elif volume.volname.split("_")[2] == "distrep": part_size = int(volume.volname.split("_")[3].split('x')[1]) else: pytest.check( False, "Volume type isn't ``arbiter``, ``distrep`` or ``disperse``") LOGGER.debug("Unexpected volume type") part_size = 0 """ :step: For each volume part get its bricks. Check that actual brick count per subvolume/replica set is as expected. :result: Each subvolume/replica set has the expected number of bricks. """ volume_parts = volume.parts.get_parts() pytest.check(volume_parts != [], "Check that there is at least one subvolume/replica set") all_bricks = [] for part in volume_parts: bricks = part.bricks.get_bricks() pytest.check( len(bricks) == part_size, "Check that the number of bricks in the table is as expected") for brick in bricks: """ :step: Check each brick's attributes. :result: All brick's attributes are as expected. """ pytest.check( brick.brick_path.find('/mnt/brick') == 0, "Check that Brick Path starts with ``/mnt/brick``") pytest.check( brick.utilization.find('% U') > 0, "Check that Utilization column values inclusde ``%``") pytest.check( brick.disk_device_path.find('/dev/') == 0, "Check that Disk Device Path starts with ``/dev/``") pytest.check( int(brick.port) > 1000, "Check that Port number is an integer greater than 1000") all_bricks = all_bricks + bricks """ :step: Check that the list of all bricks of the volume in UI is the same as the result of gluster volume info command. :result: The list of all volume's bricks in the UI is correct. """ glv_cmd = gluster.GlusterVolume(volume_name=volume.volname) glv_cmd.info() LOGGER.debug("Gluster bricks: {}".format(glv_cmd.bricks)) ui_brick_names = [b.hostname + ":" + b.brick_path for b in all_bricks] LOGGER.debug("UI bricks: {}".format(ui_brick_names)) pytest.check( glv_cmd.bricks == ui_brick_names, "Check that volume bricks in UI are the same as in gluster CLI")