def wait_for_job_status(self, job_id, max_count=30, status="finished", issue=None): """ Repeatedly check if status of job with provided id is in reqquired state. Args: job_id: id provided by api request max_count: maximum of iterations status: expected status of job that is checked issue: pytest issue message (usually github issue link) """ count = 0 current_status = "" while (current_status != status and count < max_count): current_status = self.get_job_attribute(job_id, "status") count += 1 time.sleep(1) LOGGER.debug("status: %s" % current_status) pytest.check(current_status == status, msg="Job status is {} and should be {}".format( current_status, status), issue=issue) return current_status
def add_user(self, user_in, asserts_in=None): """ Add user throught **users**. Name: "POST_users", Method: "POST", Pattern: "users", Args: user_in: dictionary info about user have to contain: name, username, email, role, password, password_confirmation asserts_in: assert values for this call and this method """ asserts_in = asserts_in or { "cookies": None, "ok": True, "reason": 'Created', "status": 201 } pattern = "users" request = requests.post(pytest.config.getini("usm_api_url") + pattern, data=json.dumps(user_in), auth=self._auth) self.print_req_info(request) self.check_response(request, asserts_in) sent_user = {k: user_in[k] for k in USERDATA_KEYS} stored_user = self.get_user(user_in["username"]) pytest.check( sent_user == stored_user, """Information sent: {}, information stored in database: {}, These should match""".format(sent_user, stored_user)) return stored_user
def test_cluster_import_unmanage_view_progress(application, unmanaged_cluster): """ Import cluster and view import progress. Then unmanage the cluster and view unmanage progress. """ """ :step: Log in to Web UI. Import the first cluster from the clusters list viewing import progress. :result: Cluster is imported and its name is shown in the clusters list """ clusters = application.collections.clusters.get_clusters() test_cluster = tools.choose_cluster(clusters, unmanaged_cluster["cluster_id"], unmanaged_cluster["short_name"]) if test_cluster.managed == "Yes": test_cluster.unmanage() import_success = test_cluster.cluster_import(view_progress=True) if not import_success: pytest.check(False, "Import failed") """ :step: Unmanage the cluster and view unmanage progress. :result: Cluster is unmanaged """ unmanage_success = test_cluster.unmanage(view_progress=True) if not unmanage_success: pytest.check(False, "Unmanage failed") test_cluster.unmanage()
def test_cluster_disable_profiling(application, managed_cluster, gluster_volume): """ Disable cluster profiling in Web UI """ """ :step: Log in to Web UI and get the first cluster from the cluster list. Check that its profiling is enabled :result: Cluster is in the correct state to disable profiling """ clusters = application.collections.clusters.get_clusters() test_cluster = tools.choose_cluster(clusters, managed_cluster["cluster_id"], managed_cluster["short_name"]) if test_cluster.profiling != "Enabled": test_cluster.enable_profiling() gluster_cluster = gluster.GlusterVolume() pytest.check( gluster_cluster.get_clusterwide_profiling() == "enabled", "Check that all volumes have profiling enabled according to gluster command" ) """ :step: Disable profiling in Web UI and check its state has changed in both Web UI and API :result: Cluster profiling has been disabled """ test_cluster.disable_profiling() pytest.check( gluster_cluster.get_clusterwide_profiling() == "disabled", "Check that all profiling has been disabled according to gluster command" )
def test_user_creation_password_invalid(application, valid_session_credentials, valid_normal_user_data, invalid_password): """ Attempt to create a user with an invalid password. """ """ :step: Attempt to add user with invalid password. :result: User is not created. """ user = application.collections.users.create( user_id=valid_normal_user_data["username"], name=valid_normal_user_data["name"], email=valid_normal_user_data["email"], notifications_on=valid_normal_user_data["email_notifications"], password=invalid_password, role=valid_normal_user_data["role"]) pytest.check(not user.exists, "Check that new user can't be found on UI Users page") """ :step: Check that user hasn't been created using API :result: There's no user with the specified username """ test = tendrlapi_user.ApiUser(auth=valid_session_credentials) user_data_password_invalid = copy.deepcopy(valid_normal_user_data) user_data_password_invalid["password"] = invalid_password asserts = {"ok": False, "reason": 'Not Found', "status": 404} not_found = test.get_user(user_data_password_invalid["username"], asserts_in=asserts) pytest.check("Not found" in str(not_found), "Check that new user can't be found using API")
def users(self, asserts_in=None): """ Get users. Name: "GET_users", Method: "GET", Pattern: "users", Args: asserts_in: assert values for this call and this method """ asserts = ApiUser.default_asserts.copy() asserts.update({ "users": ["admin"], }) if asserts_in: asserts.update(asserts_in) req = requests.get(pytest.config.getini("usm_api_url") + "users/", cookies=self.cookies, verify=self.verify) ApiUser.print_req_info(req) ApiUser.check_response(req, asserts) json_v = {} for user in asserts["users"]: temp = ApiUser.user_info.copy() temp["username"] %= user temp["email"] %= user json_v[user] = temp if req.ok: for item in req.json(encoding='unicode'): user = item["username"] pytest.check( all(key in item and json_v[user][key] == item[key] for key in json_v[user].keys()), "User {0} should contain: {1}\n\tUser {0} contains: {2}". format(user, json_v[user], item)) return req.json(encoding='unicode')
def test_nodes_list( valid_session_credentials, managed_cluster): """ List nodes for given cluster via API. :step: Connect to Tendrl API via GET request to ``APIURL/:cluster_id/nodes`` Where cluster_id is set to predefined value. :result: Server should return response in JSON format: Return code should be **200** with data ``{"nodes": [{...}, ...]}``. """ api = glusterapi.TendrlApiGluster(auth=valid_session_credentials) # list of nodes from Tendrl api t_nodes = api.get_node_list(managed_cluster['cluster_id']) t_node_names = {node["fqdn"] for node in t_nodes["nodes"]} # list of nodes from Gluster command output gl = gluster.GlusterCommon() g_node_names = set( gl.get_hosts_from_trusted_pool( CONF.config["usmqe"]["cluster_member"])) LOGGER.info("list of nodes from Tendrl api: %s", str(t_node_names)) LOGGER.info("list of nodes from gluster: %s", g_node_names) pytest.check( t_node_names == g_node_names, "List of nodes from Gluster should be the same as from Tendrl API.")
def choose_cluster(driver, cluster_type=None): """ select a cluster which should be imported NOTE: Chooses always the first cluster Parameters: driver: selenium driver cluster_type (str): which type of cluster should be imported 'ceph' or 'gluster' None means that cluster type doesn't matter NOTE not used for now Returns: ClustersRow instance """ # Choose which cluster should be imported cluster_list = ClustersList(driver) pytest.check(len(cluster_list) > 0, 'There should be some cluster available', hard=True) if cluster_type is None: cluster = next(iter(cluster_list)) # TODO use cluster type # elif cluster_type.lower() == 'gluster': # # Select first gluster cluster in the list of available clusters # elif cluster_type.lower() == 'ceph': # # Select first ceph cluster in the list of available clusters # else: # pytest.check(False, 'Unknown cluster type - {}'.format(cluster_type), # hard=True) return cluster
def user(self, user_in, asserts_in=None): """ Get user info.. Name: "GET_user", Method: "GET", Pattern: "users/{username}", Args: user_in: user name asserts_in: assert values for this call and this method """ asserts = ApiUser.default_asserts.copy() if asserts_in: asserts.update(asserts_in) req = requests.get( pytest.config.getini("usm_api_url") + "users/%s" % user_in, cookies=self.cookies, verify=self.verify) ApiUser.print_req_info(req) ApiUser.check_response(req, asserts) if "json" not in asserts: asserts["json"] = ApiUser.user_info.copy() asserts["json"]["email"] %= user_in asserts["json"]["username"] %= user_in real_vals = req.json(encoding='unicode') pytest.check( all(key in real_vals and asserts["json"][key] == real_vals[key] for key in asserts["json"].keys()), "Json of added user should contain: {0}\n\tIt contains: {1}". format(asserts["json"], real_vals)) return req.json(encoding='unicode')
def get_users(self, asserts_in=None): """ Get users. Name: "GET_users", Method: "GET", Pattern: "users", Args: asserts_in: assert values for this call and this method """ pattern = "users" request = requests.get( pytest.config.getini("usm_api_url") + pattern, auth=self._auth) self.print_req_info(request) self.check_response(request, asserts_in) if not request.ok: return False msg = "User {0} should contain: {1}\n\tUser {0} contains: {2}" for item in request.json(encoding='unicode'): user = item["username"] response_keys = set(item.keys()) pytest.check( response_keys == USERDATA_KEYS, msg.format(user, USERDATA_KEYS, response_keys)) return request.json(encoding='unicode')
def test_repoclosure(tendrl_repos, centos_repos): cmd = ["repoclosure", "--newest"] # configure systemd default repositories for name, url in centos_repos.items(): cmd.append("--repofrompath") cmd.append("{},{}".format(name, url)) cmd.append("--lookaside={}".format(name)) # configure tendrl repository (passed via tendrl_repos fixture) for name, baseurl in tendrl_repos.items(): cmd.append("--repofrompath") cmd.append("{},{}".format(name, baseurl)) # we expect that other repositories are for dependencies if name != "tendrl-core": cmd.append("--lookaside={}".format(name)) cmd.append("--repoid=tendrl-core") # running repoclosure LOGGER.info(" ".join(cmd)) with tempfile.TemporaryDirectory() as tmpdirname: cp = subprocess.run( cmd, cwd=tmpdirname, stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOGGER.debug("STDOUT: %s", cp.stdout) LOGGER.debug("STDERR: %s", cp.stderr) check_msg = "repoclosure return code should be 0 indicating no errors" pytest.check(cp.returncode == 0, msg=check_msg) # when the check fails, report the error in readable way if cp.returncode != 0: for line in cp.stdout.splitlines(): LOGGER.failed(line.decode()) for line in cp.stderr.splitlines(): LOGGER.failed(line.decode())
def logout(self, asserts_in=None): """ Logout from REST API Name: "logout", Method: "POST", Pattern: "auth/logout", Args: asserts_in: assert values for this call and this method """ asserts = Api.default_asserts.copy() asserts.update({ "cookies": None, "json": json.loads('''{"message": "Logged out"}'''), }) if asserts_in: asserts.update(asserts_in) req = requests.post(pytest.config.getini("usm_api_url") + "auth/logout", cookies=self.cookies, verify=self.verify) Api.print_req_info(req) Api.check_response(req, asserts) pytest.check(req.cookies.keys() == [], "There should be empty logout cookie.") pytest.check( req.json(encoding='unicode') == asserts["json"], "There should be logout message.") return req.json(encoding='unicode')
def compare_structure(self, structure, slug): """Compare provided data structure with layout defined in Grafana. Args: structure (object): structure of grafana dashboard for comparison slug (str): Slug of dashboard uri. Slug is the url friendly version of the dashboard title. """ layout = self.get_dashboard(slug) pytest.check( len(layout) > 0, "{} dashboard should not be empty".format(slug)) structure_grafana = {} for row in layout["dashboard"]["rows"]: structure_grafana[row["title"]] = [] for panel in row["panels"]: if panel["title"]: structure_grafana[row["title"]].append(panel["title"]) elif "displayName" in panel.keys() and panel["displayName"]: structure_grafana[row["title"]].append( panel["displayName"]) LOGGER.debug("defined layout structure = {}".format(structure)) LOGGER.debug( "layout structure in grafana = {}".format(structure_grafana)) d = Differ() LOGGER.debug("reduced diff between the layouts: {}".format("".join([ x.strip() for x in d.compare(json.dumps(structure, sort_keys=True), json.dumps(structure_grafana, sort_keys=True)) ]))) pytest.check( structure == structure_grafana, "defined structure of panels should " + "be equal to structure in grafana")
def test_volumes_list(valid_session_credentials, managed_cluster): """ List volumes for given cluster via API. :step: Connect to Tendrl API via GET request to ``APIURL/:cluster_id/volumes`` Where cluster_id is set to predefined value. :result: Server should return response in JSON format: Return code should be **200** with data ``{"volumes": [{...}, ...]}``. """ api = glusterapi.TendrlApiGluster(auth=valid_session_credentials) glv_cmd = gluster.GlusterVolume() # list of volumes from Tendrl api t_volumes = api.get_volume_list(managed_cluster['cluster_id']) t_volume_names = [volume["name"] for volume in t_volumes["volumes"]] t_volume_names.sort() # list of volumes from Gluster command output g_volume_names = glv_cmd.get_volume_names() g_volume_names.sort() LOGGER.info("list of volumes from Tendrl api: %s", str(t_volume_names)) LOGGER.info("list of volumes from gluster: %s", g_volume_names) pytest.check( t_volume_names == g_volume_names, "List of volumes from Gluster should be the same as from Tendrl API.") pytest.check( len(t_volume_names) == int(CONF.config["usmqe"]["volume_count"]), "Number of volumes from Tendrl API: {}. " "Expected number of volumes: {}.".format( len(t_volume_names), int(CONF.config["usmqe"]["volume_count"])))
def test_cluster_import_unmanage_naming(application, unmanaged_cluster): """ Import cluster and give it a custom name. Then unmanage it. """ """ :step: Log in to Web UI and import the first cluster from the clusters list. Give it custom name 'TestClusterName' :result: Cluster is imported and its name is shown in the clusters list """ clusters = application.collections.clusters.get_clusters() test_cluster = tools.choose_cluster(clusters, unmanaged_cluster["cluster_id"], unmanaged_cluster["short_name"]) if test_cluster.managed == "Yes": test_cluster.unmanage() original_id = test_cluster.name import_success = test_cluster.cluster_import( cluster_name="TestClusterName") if not import_success: pytest.check(False, "Import failed") """ :step: Unmanage the cluster :result: Cluster is unmanaged and its name is no longer shown in the clusters list. Its id is shown instead. """ unmanage_success = test_cluster.unmanage(original_id=original_id) if not unmanage_success: pytest.check(False, "Unmanage failed") test_cluster.unmanage()
def test_hosts(ansible_playbook, workload_stop_nodes, managed_cluster): """ Check that Grafana panel *Hosts* is showing correct values. """ if managed_cluster["short_name"]: cluster_identifier = managed_cluster["short_name"] else: cluster_identifier = managed_cluster["integration_id"] grafana = grafanaapi.GrafanaApi() graphite = graphiteapi.GraphiteApi() hosts_panel = grafana.get_panel("Hosts", row_title="At-a-glance", dashboard="cluster-dashboard") """ :step: Send **GET** request to ``GRAPHITE/render?target=[target]&format=json`` where [target] is part of uri obtained from previous GRAFANA call. There should be target for statuses of a hosts. Compare number of hosts from Graphite with value retrieved from ``workload_stop_nodes`` fixture. :result: JSON structure containing data related to hosts status is similar to values set by ``workload_stop_nodes`` fixture in given time. """ # get graphite target pointing at data containing numbers of hosts targets = grafana.get_panel_chart_targets(hosts_panel, cluster_identifier) targets_used = (targets[0][0], targets[1][0], targets[2][0]) targets_expected = ('nodes_count.total', 'nodes_count.up', 'nodes_count.down') for idx, target in enumerate(targets_used): pytest.check( target.endswith(targets_expected[idx]), "There is used target that ends with `{}`".format( targets_expected[idx])) # make sure that all data in graphite are saved time.sleep(3) # check value *Total* of hosts graphite.compare_data_mean( workload_stop_nodes["result"], (targets_used[0], ), workload_stop_nodes["start"], workload_stop_nodes["end"], divergence=1, issue="https://bugzilla.redhat.com/show_bug.cgi?id=1687333") # check value *Up* of hosts graphite.compare_data_mean( 0.0, (targets_used[1], ), workload_stop_nodes["start"], workload_stop_nodes["end"], divergence=1, issue="https://bugzilla.redhat.com/show_bug.cgi?id=1687333") # check value *Down* of hosts graphite.compare_data_mean( workload_stop_nodes["result"], (targets_used[2], ), workload_stop_nodes["start"], workload_stop_nodes["end"], divergence=1, issue="https://bugzilla.redhat.com/show_bug.cgi?id=1687333")
def check_status(self, status="Started"): """ Check if volume status corresponds with specified status. """ self.info() real_status = self.status pytest.check( status == real_status, "Volume status is {}, should be {}".format(real_status, status))
def pre_check(init_object): """ next cluster workflow pre-check Parameters: init_object: WebstrPage instance of page which is loaded after log in """ pytest.check(init_object._label == 'main page - menu bar', 'Tendrl should route to page with a menu', hard=True)
def test_memory_available( ansible_playbook, workload_memory_utilization, managed_cluster): """ Check that Grafana panel *Memory Available* is showing correct values. """ # TODO(fbalak): get this number dynamically # number of samples from graphite target per minute if managed_cluster["short_name"]: cluster_identifier = managed_cluster["short_name"] else: cluster_identifier = managed_cluster["integration_id"] grafana = grafanaapi.GrafanaApi() graphite = graphiteapi.GraphiteApi() memory_panel = grafana.get_panel( "Memory Available", row_title="At-a-Glance", dashboard="host-dashboard") """ :step: Send **GET** request to ``GRAPHITE/render?target=[target]&format=json`` where [target] is part of uri obtained from previous GRAFANA call. There should be target for memory utilization of a host. Compare number of hosts from Graphite with value retrieved from ``workload_memory_utilization`` fixture. :result: JSON structure containing data related to memory utilization is similar to values set by ``workload_memory_utilization`` fixture in given time. """ # get graphite target pointing at data containing number of host targets = grafana.get_panel_chart_targets(memory_panel, cluster_identifier) LOGGER.debug(targets) targets_used = (targets[0][0], targets[0][1]) for key, target_expected in enumerate(( "memory.memory-cached", "memory.memory-free")): pytest.check( targets_used[key].endswith(target_expected), "There is used target that ends with `{}`".format(target_expected)) # make sure that all data in graphite are saved time.sleep(2) expected_available_mem = ( 1 - workload_memory_utilization["result"]/100) * int( workload_memory_utilization['metadata']['total_memory']) graphite.compare_data_mean( expected_available_mem, targets_used, workload_memory_utilization["start"], workload_memory_utilization["end"], divergence=0.15*expected_available_mem)
def test_volume_attributes(application, valid_session_credentials, managed_cluster, gluster_volume): """ Test that all volumes are listed on cluster's Volumes page. Check all common volume attributes """ """ :step: Log in to Web UI and get the cluster identified by cluster_member. Get the list of its volumes. :result: Volume objects are initiated and their attributes are read from the page """ clusters = application.collections.clusters.get_clusters() test_cluster = tools.choose_cluster(clusters, managed_cluster["cluster_id"], managed_cluster["short_name"]) assert test_cluster.managed == "Yes" volumes = test_cluster.volumes.get_volumes() """ :step: Get the list of volumes using Gluster command and check it's the same as in UI :result: The list of volumes in UI and in Gluster command are equal """ glv_cmd = gluster.GlusterVolume() g_volume_names = glv_cmd.get_volume_names() pytest.check( set([volume.volname for volume in volumes]) == set(g_volume_names), "Check that UI volumes list is the same as in gluster volume info") LOGGER.debug("UI volume names: {}".format( [volume.volname for volume in volumes])) LOGGER.debug("Gluster command volume names: {}".format(g_volume_names)) """ :step: Check common volume attributes :result: Common volume attributes have expected values """ for volume in volumes: pytest.check( volume.volname.find("olume_") == 1, "Check that volume name contains ``olume_``") pytest.check( volume.running == "Yes", "Check that volume ``Running`` attribute has value ``Yes``") pytest.check( volume.rebalance == "Not Started", "Check that volume ``Rebalance`` attribute has value ``Not Started``" ) pytest.check( int(volume.alerts) >= 0, "Check that volume's number of alerts is a non-negative integer")
def test_capacity_available(ansible_playbook, workload_capacity_utilization, managed_cluster, gluster_volume): """ Check that Grafana panel *Capacity Available* is showing correct values. """ if managed_cluster["short_name"]: cluster_identifier = managed_cluster["short_name"] else: cluster_identifier = managed_cluster["integration_id"] grafana = grafanaapi.GrafanaApi() graphite = graphiteapi.GraphiteApi() capacity_panel = grafana.get_panel("Capacity Available", row_title="Capacity", dashboard="volume-dashboard") """ :step: Send **GET** request to ``GRAPHITE/render?target=[target]&format=json`` where [target] is part of uri obtained from previous GRAFANA call. There should be target for capacity utilization of a host. Compare number of hosts from Graphite with value retrieved from ``workload_capacity_utilization`` fixture. :result: JSON structure containing data related to capacity utilization is similar to values set by ``workload_capacity_utilization`` fixture in given time. """ # get graphite target pointing at data containing number of host targets = grafana.get_panel_chart_targets( capacity_panel, cluster_identifier, workload_capacity_utilization["metadata"]["volume_name"]) targets_used = (targets[0][0], targets[0][1]) for key, target_expected in enumerate( ("usable_capacity", "used_capacity")): pytest.check( targets_used[key].endswith(target_expected), "There is used target that ends with `{}`".format(target_expected)) # make sure that all data in graphite are saved time.sleep(2) expected_available = workload_capacity_utilization["metadata"][ "total_capacity"] * (1 - workload_capacity_utilization["result"] * 0.01) divergence = workload_capacity_utilization["metadata"][ "total_capacity"] * 0.05 graphite.compare_data_mean(expected_available, targets_used, workload_capacity_utilization["start"], workload_capacity_utilization["end"], divergence=divergence, operation='diff')
def check_user(self, user_data, asserts_in=None): """ Check if there is stored user with given attributes. Args: user_data: user data that are going to be checked asserts_in: assert values for this call and this method """ stored_data = self.get_user(user_data["username"], asserts_in=asserts_in) sent_data = {k: user_data[k] for k in USERDATA_KEYS} msg = "Json of stored user: {0}\n\tAnd of checked one: {1}\n\tShould be equal." pytest.check(stored_data == sent_data, msg.format(stored_data, sent_data))
def check_response(resp, asserts_in=None, issue=None): """ Check default asserts. It checks: *ok*, *status*, *reason*. Args: resp: response to check asserts_in: asserts that are compared with response issue: known issue, log WAIVE """ asserts = ApiBase.default_asserts.copy() if asserts_in: asserts.update(asserts_in) try: json.dumps(resp.json(encoding='unicode')) except ValueError as err: pytest.check( False, "Bad response '{}' json format: '{}'".format(resp, err)) pytest.check(resp.ok == asserts["ok"], "There should be ok == {}".format(str(asserts["ok"])), issue=issue) pytest.check(resp.status_code == asserts["status"], "Status code should equal to {}".format( asserts["status"]), issue=issue) pytest.check(resp.reason == asserts["reason"], "Reason should equal to {}".format(asserts["reason"]), issue=issue)
def test_logout(log_in, testcase_end): """ Test that user can be logged out from UI """ # click on Logout upper_menu = UpperMenu(log_in.driver) upper_menu.open_user_menu().logout() # check login page is displayed pytest.check( log_in.loginpage.is_present, "User should be logged out and login page should be displayed") # try to go on some sub-page log_in.driver.get(pytest.config.getini("usm_web_url") + LOCATION) pytest.check(log_in.loginpage.is_present, "Login page should be displayed")
def test_negative_login(testcase_set, testcase_end, username, password, error_message): """ test negative login cases """ if username is True: username = pytest.config.getini("usm_username") if password is True: password = pytest.config.getini("usm_password") loginpage_inst = loginpage.LoginPage(testcase_set.driver, pytest.config.getini("usm_web_url")) LOGGER.debug("Fill invalid credentials") loginpage_inst.fill_form_values(username, password) LOGGER.debug("Click on Log In button") loginpage_inst.get_model_element("login_btn").click() # check error message pytest.check(loginpage_inst.is_present, "Still on the login page") if error_message is not None: tmp_obj = loginpage_inst.get_model_element("error_label") pytest.check(tmp_obj.is_displayed(), "Error message is visible.") pytest.check(tmp_obj.text == error_message, "Error message should be: {}".format(error_message)) pytest.check( tmp_obj.get_attribute("class").index("alert-danger") > -1, "Error message shoul be red.")
def test_positive_login(testcase_set, log_out): """@usmid web/login_positive Login as valid user. """ loginpage_inst = loginpage.LoginPage(testcase_set.driver, pytest.config.getini("usm_web_url")) LOGGER.debug("Fill valid credentials and log in") loginpage_inst.login_user(pytest.config.getini("usm_username"), pytest.config.getini("usm_password")) page_inst = get_landing_page(testcase_set.driver) # Following check is extra, could be removed pytest.check( page_inst.is_present, "A user should be logged in and main page should be present.")
def log_in(testcase_set): """ All tests which don't need to tweak login process and expects that the default user is already logged in for the test to work should extend this class. """ testcase_set.loginpage = loginpage.LoginPage( testcase_set.driver, pytest.config.getini("usm_web_url")) testcase_set.loginpage.login_user(pytest.config.getini("usm_username"), pytest.config.getini("usm_password")) testcase_set.init_object = get_landing_page(testcase_set.driver) msg = "Initial element - Home page or navigation part of the main page "\ "should contain all required components." pytest.check(testcase_set.init_object.is_present, msg) yield testcase_set
def test_login_positive_enter(testcase_set, log_out): """@usmid web/login_positive_enter Submit login form by "Enter" key """ loginpage_inst = loginpage.LoginPage(testcase_set.driver, pytest.config.getini("usm_web_url")) LOGGER.debug("Fill valid credentials") loginpage_inst.fill_form_values(pytest.config.getini("usm_username"), pytest.config.getini("usm_password")) LOGGER.debug("Press Enter") loginpage_inst.get_model_element("password").send_keys(Keys.ENTER) WaitForWebstrPage(loginpage_inst, 10).to_disappear() page_inst = get_landing_page(testcase_set.driver) # Following check is extra, could be removed pytest.check(page_inst.is_present, "User should be logged in")
def disable_profiling(self): """ Click Disable Profiling button and wait for Volume Profiling to change to Disabled """ view = self.application.web_ui.create_view(ClusterVolumesView) view.volumes(self.volname).disable_profiling.click() wait_for( lambda: self.update()[4] == "Disabled", timeout=300, delay=5, message= "Volume's profiling hasn't changed to Enabled in 300 seconds\n" + "Visible text: {}".format(view.browser.elements("*")[0].text)) LOGGER.debug("Volume {} profiling value: {}".format( self.volname, self.profiling)) pytest.check(self.profiling == "Disabled")
def post_check(driver, cluster_name, hosts, login_page=None): """ cluster workflow post-check Parameters: driver: selenium driver cluster_name (str): cluster name hosts (list): list of dictionaries {'hostname': <hostname>, 'release': <release>, ... for check only clusters_nr (int): previous number of clusters in clusters list login_page: LoginPage instance """ try: NavMenuBars(driver).open_clusters(click_only=True) # TODO remove following sleep # sleep a while because of https://github.com/Tendrl/api/issues/159 WebDriverUtils.wait_a_while(90, driver) cluster_list = ClustersList(driver) # Check that cluster is present in the list # open cluster details for the imported cluster present = False for cluster in cluster_list: if cluster_name.lower() in cluster.name.lower(): pytest.check( cluster.managed, 'cluster {} should be managed'.format(cluster.name)) cluster.open_details() present = True break pytest.check( present, 'The imported cluster should be present in the cluster list') # TODO proper hosts list has to be added # # check hosts # if present: # page_hosts_list = ClusterMenu(driver).open_hosts() # check_hosts(hosts, page_hosts_list) except InitPageValidationError: # workaround for missing menu after cluster creation pass