def verify_injected_secretRef(context, secret_ref, cr_name, crd_name, json_path): openshift = Openshift() polling2.poll(lambda: openshift.get_resource_info_by_jsonpath( crd_name, cr_name, context.namespace.name, json_path) == secret_ref, step=5, timeout=400)
def is_field_displayed(driver=None, field: dict = None, web_element: WebElement = None, avoid_move_to: bool = False, wait_until: int = 5): """ Check if the element is displayed. You may not interact with it. :param driver: a selenium web driver :param field: a dictionary corresponding to the field to retrieve the text :param web_element: a webElement to search the field from :param avoid_move_to: Avoid to move into view the WebElement :param wait_until: The default wait field existence and display :return: Boolean. True if element is displayed, false otherwise. """ element = is_field_exist(driver=driver, field=field, web_element=web_element, avoid_move_to=avoid_move_to, until=wait_until) if element is None: log.info(f"Element '{field}' doesn't exist in the DOM") return False try: polling2.poll(lambda: element.is_displayed(), step=0.2, timeout=wait_until, ignore_exceptions=(NoSuchElementException, StaleElementReferenceException, ElementNotInteractableException)) return True except polling2.TimeoutException: log.info(f"Element '{field}' is not displayed") return False
def select_repo(driver, title): lcc.log_info("Selecting repo::{}".format(title)) utilities.click_element(driver, By.LINK_TEXT, locators.MENU_SEARCH_PAGE_LINK_TEXT) # Poll until all the repos are listed in the filter poll(lambda: len( driver.find_elements(By.CLASS_NAME, locators. SELECT_REPO_CHECKBOX_CLASS_NAME)) >= 1, ignore_exceptions=[NoSuchElementException], timeout=15, step=1) utilities.enter_text(driver, By.XPATH, locators.FILTER_BY_REPO_SEARCH_BAR_XPATH, title) # Poll until repo matching the search criteria is listed in the filter poll(lambda: len( driver.find_elements(By.CLASS_NAME, locators. SELECT_REPO_CHECKBOX_CLASS_NAME)) == 1, ignore_exceptions=[NoSuchElementException], timeout=15, step=1) # utilities.wait(7) print( len( driver.find_elements(By.CLASS_NAME, locators.SELECT_REPO_CHECKBOX_CLASS_NAME))) utilities.click_element(driver, By.CLASS_NAME, locators.SELECT_REPO_CHECKBOX_CLASS_NAME) utilities.wait(3)
def assert_file_not_exist(self, file_path): polling2.poll( lambda: requests.get(url=f"http://{self.route_url}{file_path}"), check_success=lambda r: r.status_code == 404, step=5, timeout=400, ignore_exceptions=(requests.exceptions.ConnectionError, ))
def test_poll_forever_with_timeout_max_tries(self): with pytest.raises(AssertionError): polling2.poll(lambda: True, step=1, timeout=1, max_tries=1, poll_forever=True)
def check_modules_api(self): poll(lambda: requests.post(fixture.git_import_server + "/api/progress-update/modules", data=self.status_key), check_success=check_current_status, step=5, timeout=20) modules_response = requests.post(fixture.git_import_server + "/api/progress-update/modules", data=self.status_key) check_that("modules API status code", modules_response.status_code, equal_to(200)) check_that("current status", modules_response.json()["current_status"], equal_to("done")) check_that("number of modules uploaded", modules_response.json()["total_files_uploaded"], equal_to(int(number_of_modules_uploaded))) check_that("modules not uploaded", modules_response.json()["modules_not_uploaded"], equal_to([])) check_that("server message", modules_response.json()["server_message"], equal_to("Accepting requests")) check_that("server status", modules_response.json()["server_status"], equal_to("OK")) check_that("last_uploaded_file", modules_response.json()["last_uploaded_file"], is_not_none()) modules_uploaded = modules_response.json()["modules_uploaded"] check_response_code(modules_uploaded, "response_code", int(number_of_modules_uploaded)) check_not_null(modules_uploaded, "path", int(number_of_modules_uploaded)) check_not_null(modules_uploaded, "response_details", int(number_of_modules_uploaded))
def check_secret_key(context, key): openshift = Openshift() secret = polling2.poll(lambda: get_sbr_secret_name(context), step=100, timeout=1000, ignore_exceptions=(ValueError,), check_success=lambda v: v is not None) json_path = f'{{.data.{key}}}' polling2.poll(lambda: openshift.get_resource_info_by_jsonpath("secrets", secret, context.namespace.name, json_path) == "", step=5, timeout=120, ignore_exceptions=(binascii.Error,))
def validate_absent_sb(context, sb_name): openshift = Openshift() polling2.poll(lambda: openshift.search_resource_in_namespace( "servicebindings", sb_name, context.namespace.name), step=5, timeout=400, check_success=lambda v: v is None)
def perform_create(self, serializer): if self.request.POST.get('container_id') is None: self.request.data['container_id'] = 'src_simulator_1' serializer.save(user=self.request.user, container_id=self.request.data['container_id']) data = { 'model_name': self.request.data['model_name'], 'step_size': self.request.data['step_size'], 'final_time': self.request.data['final_time'], 'container_id': self.request.data['container_id'], 'Authorization': 'Token ' + str(self.request.auth) } if 'model_count' in self.request.data: data['model_count'] = self.request.data['model_count'] # TODO need to change poll_forever and perform check to see if FMU is created, also rework below if self.request.data['container_id'] not in self.request.data['model_name']: transaction.on_commit(lambda: tasks.post_model.apply_async((data,), queue='web', routing_key='web')) polling2.poll( lambda: check_result_backend(self.request.data['model_name']) is True, step=10, poll_forever=True) return Response("FMU Ready", status=200)
def is_running(self, wait=False): currentCSV = self.openshift.get_current_csv(self.name, self.operator_catalog_source_name, self.operator_catalog_channel) if wait: polling2.poll(lambda: self.openshift.search_resource_in_namespace("csvs", currentCSV, self.openshift.operators_namespace), check_success=lambda v: v is not None, step=1, timeout=100) else: if self.openshift.search_resource_in_namespace("csvs", currentCSV, self.openshift.operators_namespace) is None: return False expectedDeployments = self.openshift.get_resource_info_by_jsonpath( "csv", currentCSV, self.openshift.operators_namespace, "{.spec.install.spec.deployments[*].name}").split() found_pod_names = [] for deployment in expectedDeployments: if wait: found_pod_name = self.openshift.wait_for_pod(self.pod_name_pattern.format(name=deployment), self.openshift.operators_namespace) else: found_pod_name = self.openshift.search_pod_in_namespace(self.pod_name_pattern.format(name=deployment), self.openshift.operators_namespace) if found_pod_name is not None: operator_pod_status = self.openshift.check_pod_status(found_pod_name, self.openshift.operators_namespace) print("The pod {} is running: {}".format(found_pod_name, operator_pod_status)) found_pod_names.append(found_pod_name) if len(found_pod_names) == len(expectedDeployments): return True else: print(f"Not all pods from expected deployments [{expectedDeployments}] are running. Only following pods are: [{found_pod_names}]") return False
def check_file_value(context, file_path): value = Template( context.text.strip()).substitute(NAMESPACE=context.namespace.name) resource = substitute_scenario_id(context, file_path) polling2.poll( lambda: context.application.get_file_value(resource) == value, step=5, timeout=400)
def sb_secret_is_not_present(context): openshift = Openshift() polling2.poll(lambda: openshift.search_resource_in_namespace( "secrets", context.sb_secret, context.namespace.name), step=100, timeout=1000, ignore_exceptions=(ValueError, ), check_success=lambda v: v is None)
def sbo_jq_is(context, jq_expression, sbr_name=None, json_value=""): if sbr_name is None: sbr_name = list(context.bindings.values())[0].name polling2.poll(lambda: json.loads(context.bindings[ sbr_name].get_info_by_jsonpath(jq_expression)) == json_value, step=5, timeout=800, ignore_exceptions=(json.JSONDecodeError, ))
def resource_jsonpath_value(context, json_path, res_name, json_value=""): openshift = Openshift() json_path = substitute_scenario_id(context, json_path) res_name = substitute_scenario_id(context, res_name) json_value = substitute_scenario_id(context, json_value) (crdName, name) = res_name.split("/") polling2.poll(lambda: openshift.get_resource_info_by_jsonpath(crdName, name, context.namespace.name, json_path) == json_value, step=5, timeout=800, ignore_exceptions=(json.JSONDecodeError,))
def poll_file_check(): result = file_check() print(result) try: polling2.poll(file_check, timeout=20, step=5 ) print("files are available") except Exception as tee: print("files are not available")
def test_default_is_not_log(self, caplog): """ Shouldn't log anything unless explicitly asked to do so. Except for Begin poll() """ with caplog.at_level(logging.DEBUG): polling2.poll(target=lambda: True, step=0.1, max_tries=1) assert len(caplog.records) == 1, "Should ony be one log records" assert 'Begin poll(' in caplog.records[0].msg
def check_secret_key(context, secret_name, key): openshift = Openshift() json_path = f'{{.data.{key}}}' polling2.poll(lambda: openshift.get_resource_info_by_jsonpath( "secrets", secret_name, context.namespace.name, json_path) == "", step=5, timeout=120, ignore_exceptions=(binascii.Error, ))
def check_env_var_value_in_both_apps(context, name, value): polling2.poll( lambda: context.application1.get_env_var_value(name) == value, step=5, timeout=400) polling2.poll( lambda: context.application2.get_env_var_value(name) == value, step=5, timeout=400)
def check_no_redeployment(context, time): try: polling2.poll(lambda: context.application.get_generation() > context. latest_application_generation, step=5, timeout=int(time) * 60) assert False, "Application has redeployed again unexpectedly" except polling2.TimeoutException: pass
def validate_secret_empty(context, secret_name): openshift = Openshift() try: polling2.poll(lambda: json.loads( openshift.get_resource_info_by_jq("secrets", secret_name, context.namespace.name, ".data", wait=False)) == "null", step=5, timeout=20, ignore_exceptions=(json.JSONDecodeError,)) except polling2.TimeoutException: pass
def check_secret_key_with_ip_value(context, secret_name, secret_key): openshift = Openshift() json_path = f'{{.data.{secret_key}}}' polling2.poll(lambda: ipaddress.ip_address( openshift.get_resource_info_by_jsonpath("secrets", secret_name, context .namespace.name, json_path)), step=5, timeout=120, ignore_exceptions=(ValueError, ))
def sbo_secret_name_has_been_set(context, sbr_name=None): if sbr_name is None: sbr_name = list(context.bindings.values())[0].name else: sbr_name = Template(sbr_name).substitute( scenario_id=scenario_id(context)) polling2.poll(lambda: context.bindings[sbr_name].get_secret_name() != "", step=5, timeout=800, ignore_exceptions=(json.JSONDecodeError, ))
def sbo_secret_name_has_been_set(context, sbr_name): openshift = Openshift() polling2.poll(lambda: json.loads( openshift.get_resource_info_by_jq("servicebinding", sbr_name, context.namespace.name, ".status.secret", wait=False)) != "", step=5, timeout=800, ignore_exceptions=(json.JSONDecodeError, ))
def sbo_jq_is(context, jq_expression, sbr_name, json_value): openshift = Openshift() polling2.poll(lambda: json.loads( openshift.get_resource_info_by_jq("servicebinding", sbr_name, context.namespace.name, jq_expression, wait=False)) == json_value, step=5, timeout=800, ignore_exceptions=(json.JSONDecodeError, ))
def verify_injected_secretRef(context, cr_name, crd_name, json_path): openshift = Openshift() secret = polling2.poll(lambda: get_sbr_secret_name(context), step=100, timeout=1000, ignore_exceptions=(ValueError, ), check_success=lambda v: v is not None) polling2.poll(lambda: openshift.get_resource_info_by_jsonpath( crd_name, cr_name, context.namespace.name, json_path) == secret, step=5, timeout=400)
def test_max_call_no_sleep(self): """ Test that a MaxCallException is raised without sleeping after the last call """ tries = 2 sleep = 0.1 start_time = time.time() with pytest.raises(polling2.MaxCallException): polling2.poll(lambda: False, step=sleep, max_tries=tries) assert time.time( ) - start_time < tries * sleep, 'Poll function slept before MaxCallException'
def test_logs_response_at_debug(self, caplog): """ Test that the log_value decorator will log values returned to a check_success function. """ with caplog.at_level(logging.DEBUG): polling2.poll(target=lambda: True, step=0.1, max_tries=1, log=logging.DEBUG) assert len(caplog.records) == 2, "Should only be two log records." record = caplog.records[1] assert record.levelname == 'DEBUG' assert record.message == "poll() calls check_success(True)"
def validate_secret_empty(context): openshift = Openshift() if "sbr_name" in context: secret = polling2.poll(lambda: get_sbr_secret_name(context), step=100, timeout=1000, ignore_exceptions=(ValueError,), check_success=lambda v: v is not None) try: polling2.poll(lambda: json.loads( openshift.get_resource_info_by_jq("secrets", secret, context.namespace.name, ".data", wait=False)) == "null", step=5, timeout=20, ignore_exceptions=(json.JSONDecodeError,)) except polling2.TimeoutException: pass else: assert False, "sbr_name not in context"
def test_max_call_exception(self): """ Test that a MaxCallException will be raised """ tries = 100 try: polling2.poll(lambda: False, step=0, max_tries=tries) except polling2.MaxCallException as e: assert e.values.qsize( ) == tries, 'Poll function called the incorrect number of times' assert e.last is False, 'The last value was incorrect' else: assert False, 'No MaxCallException raised'
def test_logs_response_change_level(self, caplog): """ Test that the log parameter controls the logging level in poll function """ with caplog.at_level(logging.DEBUG): polling2.poll(target=lambda: True, step=0.1, max_tries=1, log=logging.INFO) assert len(caplog.records) == 2, "Should only be two log record." record = caplog.records[1] assert record.levelname == 'INFO' assert record.message == "poll() calls check_success(True)"