def get_all_test_results_impl(self, should_raise_exception=True): # type: (bool) -> TestResultsSummary while True: states = list(set([t.state for t in self.all_running_tests])) if len(states) == 1 and states[0] == "completed": break datetime_utils.sleep(500) all_results = [] for test, test_result in iteritems(self._all_test_result): exception = None if test.test_result is None: exception = TestFailedError("Test haven't finished correctly") scenario_id_or_name = test_result.name app_id_or_name = test_result.app_name if test_result and test_result.is_unresolved and not test_result.is_new: exception = DiffsFoundError( test_result, scenario_id_or_name, app_id_or_name ) if test_result and test_result.is_new: exception = NewTestError( test_result, scenario_id_or_name, app_id_or_name ) if test_result and test_result.is_failed: exception = TestFailedError( test_result, scenario_id_or_name, app_id_or_name ) all_results.append( TestResultContainer(test_result, test.browser_info, exception) ) if exception and should_raise_exception: raise exception return TestResultsSummary(all_results)
def eyes(request, eyes_class, batch_info): # TODO: allow to setup logger level through pytest option # logger.set_logger(StdoutLogger()) eyes = eyes_class() eyes.configuration.batch = batch_info eyes.configuration.hide_scrollbars = True eyes.configuration.save_new_tests = False eyes.configuration.hide_caret = True eyes._debug_screenshot_provided = True # configure eyes options through @pytest.mark.eyes() marker eyes_mark_opts = request.node.get_closest_marker("eyes") eyes_mark_opts = eyes_mark_opts.kwargs if eyes_mark_opts else {} # configure eyes through @pytest.mark.parametrize('eyes', [], indirect=True) eyes_parametrized_opts = getattr(request, "param", {}) if set(eyes_mark_opts.keys()).intersection(eyes_parametrized_opts): raise ValueError( "Eyes options conflict. The values from .mark.eyes and .mark.parametrize shouldn't intersect." ) eyes_mark_opts.update(eyes_parametrized_opts) for key, val in iteritems(eyes_mark_opts): setattr(eyes, key, val) eyes.add_property("Agent ID", eyes.full_agent_id) yield eyes eyes.abort()
def process_all(self): for r_url, val in iteritems(self): if val is None: logger.debug("No response for {}".format(r_url)) val = VGResource.EMPTY(r_url) self[r_url] = val return self
def collect_test_results(tests, should_raise_exception): # type: (Dict[RunningTest, TestResults], bool) -> List[TestResultContainer] all_results = [] for test, test_result in iteritems(tests): if test.pending_exceptions: logger.error( "During test execution above exception raised. \n {:s}".join( str(e) for e in test.pending_exceptions)) if test.has_checks: exception = None else: exception = TestFailedError("Test has no checks") if test_result: scenario_id_or_name = test_result.name app_id_or_name = test_result.app_name if test_result.is_unresolved and not test_result.is_new: exception = DiffsFoundError(test_result, scenario_id_or_name, app_id_or_name) if test_result.is_new: exception = NewTestError(test_result, scenario_id_or_name, app_id_or_name) if test_result.is_failed: exception = TestFailedError(test_result, scenario_id_or_name, app_id_or_name) else: exception = TestFailedError("Test haven't finished correctly") all_results.append( TestResultContainer(test_result, test.browser_info, exception)) if exception and should_raise_exception: raise exception return all_results
def get_skip_tests_list(): result = defaultdict(dict) for test_file, test_dict in itertools.chain( get_failed_tests_from_file(), get_skip_duplicates_tests_from_file()): for test_name, val in iteritems(test_dict): result[test_file][test_name] = val return result
def post_locators(self, visual_locators_data): # type: (VisualLocatorsData) -> LOCATORS_TYPE data = json_utils.to_json(visual_locators_data) response = self._com.long_request("post", "api/locators/locate", data=data) response.raise_for_status() return { locator_id: json_utils.attr_from_dict(regions, Region) for locator_id, regions in iteritems(response.json()) }
def _get_all_running_tests(self): # type: ()-> List[RunningTest] tests = list( itertools.chain.from_iterable(e.test_list for e in self.all_eyes)) if not bool(self._get_all_running_tests.calls % 15): # print state every 15 call counter = Counter(t.state for t in tests) logger.info("Current tests states: \n{}".format("\n".join( ["\t{} - {}".format(t, c) for t, c in iteritems(counter)]))) return tests
def parse_frame_dom_resources(self, data): # noqa # type: (Dict) -> RGridDom base_url = data["url"] resource_urls = data.get("resourceUrls", []) blobs = data.get("blobs", []) frames = data.get("frames", []) discovered_resources_urls = [] def handle_resources(content_type, content): urls_from_css, urls_from_svg = [], [] if content_type.startswith("text/css"): urls_from_css = parsers.get_urls_from_css_resource(content) if content_type.startswith("image/svg"): urls_from_svg = parsers.get_urls_from_svg_resource(content) for discovered_url in urls_from_css + urls_from_svg: target_url = _apply_base_url(discovered_url, base_url) with self.discovered_resources_lock: discovered_resources_urls.append(target_url) def get_resource(link): # type: (Text) -> VGResource if link.startswith("data:"): # resource already in blob return VGResource.EMPTY(link) response = self.eyes_connector.download_resource(link) return VGResource.from_response(link, response, on_created=handle_resources) for f_data in frames: f_data["url"] = _apply_base_url(f_data["url"], base_url) self.request_resources[f_data[ "url"]] = self.parse_frame_dom_resources(f_data).resource for blob in blobs: resource = VGResource.from_blob(blob, on_created=handle_resources) if resource.url.rstrip("#") == base_url: continue self.all_blobs.append(resource) self.request_resources[resource.url] = resource for r_url in set(resource_urls + discovered_resources_urls): self.resource_cache.fetch_and_store(r_url, get_resource) for r_url, val in iteritems(self.resource_cache): if val is None: val = VGResource.EMPTY(r_url) self.request_resources[r_url] = val return RGridDom(url=base_url, dom_nodes=data["cdt"], resources=self.request_resources)
def _get_all_running_tests(self): # type: ()-> List[RunningTest] tests = list(itertools.chain.from_iterable(e.test_list for e in self.all_eyes)) if time() - self._last_states_logging_time > 15: self._last_states_logging_time = time() # print states every 15 seconds counter = Counter(t.state for t in tests) logger.info( "Current tests states: \n{}".format( "\n".join(["\t{} - {}".format(t, c) for t, c in iteritems(counter)]) ) ) return tests
def eyes_setup(request, eyes_class, eyes_config, eyes_runner, batch_info): # TODO: allow to setup logger level through pytest option # logger.set_logger(StdoutLogger()) # in case eyes-images eyes = eyes_class() if eyes_runner: eyes = eyes_class(eyes_runner) # configure eyes options through @pytest.mark.eyes_config() marker config_mark_opts = request.node.get_closest_marker("eyes_config") config_mark_opts = config_mark_opts.kwargs if config_mark_opts else {} for key, val in iteritems(config_mark_opts): setattr(eyes_config, key, val) eyes.set_configuration(eyes_config) eyes.add_property("Agent ID", eyes.full_agent_id) yield eyes eyes.abort()
def eyes(request, eyes_class): # TODO: allow to setup logger level through pytest option # logger.set_logger(StdoutLogger()) eyes = eyes_class() eyes.hide_scrollbars = True # configure eyes options through @pytest.mark.eyes() marker eyes_mark_opts = request.node.get_closest_marker("eyes") eyes_mark_opts = eyes_mark_opts.kwargs if eyes_mark_opts else {} # configure eyes through @pytest.mark.parametrize('eyes', []) eyes_parametrized_opts = getattr(request, "param", {}) if set(eyes_mark_opts.keys()).intersection(eyes_parametrized_opts): raise ValueError( "Eyes options conflict. The values from .mark.eyes and .mark.parametrize shouldn't intersect." ) eyes_mark_opts.update(eyes_parametrized_opts) for key, val in iteritems(eyes_mark_opts): setattr(eyes, key, val) yield eyes eyes.abort()
def get_text_regions_in_running_session_image(self, data): # type: (TextSettingsData) -> PATTERN_TEXT_REGIONS logger.debug( "call", _class=self.__class__.__name__, _method="extract_text_regions", text_region_data=data, ) resp = self._com.long_request( "post", urljoin(self.API_SESSIONS_RUNNING, "images/textregions"), data=json_utils.to_json(data), ) if resp.ok: return { pattern: json_utils.attr_from_dict(regions, TextRegion) for pattern, regions in iteritems(resp.json()) } raise EyesError( "ServerConnector.extract_text_regions - unexpected status {}".format( resp.status_code ) )
def _get_all_test_results_impl(self, should_raise_exception=True): # type: (bool) -> TestResultsSummary while True: states = list(set(t.state for t in self._get_all_running_tests())) logger.debug("Current test states: \n {}".format(states)) if len(states) == 1 and states[0] == "completed": break datetime_utils.sleep( 1500, msg="Waiting for state completed in get_all_test_results_impl", ) all_results = [] for test, test_result in iteritems(self._all_test_result): if test.pending_exceptions: logger.error( "During test execution above exception raised. \n {:s}". join(str(e) for e in test.pending_exceptions)) exception = None if test.test_result is None: exception = TestFailedError("Test haven't finished correctly") scenario_id_or_name = test_result.name app_id_or_name = test_result.app_name if test_result and test_result.is_unresolved and not test_result.is_new: exception = DiffsFoundError(test_result, scenario_id_or_name, app_id_or_name) if test_result and test_result.is_new: exception = NewTestError(test_result, scenario_id_or_name, app_id_or_name) if test_result and test_result.is_failed: exception = TestFailedError(test_result, scenario_id_or_name, app_id_or_name) all_results.append( TestResultContainer(test_result, test.browser_info, exception)) if exception and should_raise_exception: raise exception return TestResultsSummary(all_results)
def calls(self): return { key: results[0] for key, results in iteritems(self.input_calls) }
def get_skip_duplicates_tests_from_file(): with open(path.join(here, "generatedTestsSuite.yaml")) as f: generated_tests = yaml.load(f, Loader=yaml.Loader) return iteritems(generated_tests)
def get_failed_tests_from_file(): with open(path.join(here, "failedTestsSuite.yaml")) as f: failed_tests = yaml.load(f, Loader=yaml.Loader) return iteritems(failed_tests)
def __init__(self, position, **kwargs): self.position = position for name, val in iteritems(kwargs): setattr(self, name, val)
def __init__(self, position, **kwargs): # type: (Point, **Any) -> None self.position = position for name, val in iteritems(kwargs): setattr(self, name, val)