def make_profile(profile_type: Profiles = None, preferences: dict = None): """Internal-only method used to create profiles on disk. :param profile_type: Profiles.BRAND_NEW, Profiles.LIKE_NEW, Profiles.TEN_BOOKMARKS, Profiles.DEFAULT :param preferences: A dictionary containing profile preferences """ if profile_type is None: profile_type = Profiles.DEFAULT if preferences is None: if profile_type is Profiles.BRAND_NEW: preferences = FirefoxSettings.DEFAULT_FX_PREFS else: preferences = {} test_root = PathManager.get_current_tests_directory() current_test = os.environ.get('CURRENT_TEST') test_path = current_test.split(test_root)[1].split('.py')[0][1:] profile_path = os.path.join(PathManager.get_current_run_dir(), test_path, 'profile') if profile_type is Profiles.BRAND_NEW: logger.debug('Creating brand new profile: %s' % profile_path) elif profile_type in (Profiles.LIKE_NEW, Profiles.TEN_BOOKMARKS): logger.debug('Creating new profile from %s staged profile.' % profile_type.value.upper()) profile_path = FirefoxProfile._get_staged_profile( profile_type, profile_path) else: raise ValueError('No profile found: %s' % profile_type.value) return MozProfile(profile=profile_path, preferences=preferences)
def send_json_report(self): report_s = validate_section('Report_URL') if len(report_s) > 0: logger.warning( '{}. \nJSON report cannot be sent - no report URL found in config file.' .format(report_s)) else: run_file = os.path.join(PathManager.get_current_run_dir(), 'run.json') url = get_config_property('Report_URL', 'url') if url is not None: try: with open(run_file, 'rb') as file: r = requests.post(url=url, files={'file': file}) if not r.ok: logger.error( 'Report was not sent to URL: %s \nResponse text: %s' % url, r.text) logger.debug('Sent JSON report status: %s' % r.text) except requests.RequestException as ex: logger.error( 'Failed to send run report to URL: %s \nException data: %s' % url, ex) else: logger.error('Bad URL for JSON report.')
def send_json_report(self): report_s = validate_section("Report_URL") if len(report_s) > 0: logger.warning( "{}. \nJSON report cannot be sent - no report URL found in config file." .format(report_s)) else: run_file = os.path.join(PathManager.get_current_run_dir(), "run.json") url = get_config_property("Report_URL", "url") if url is not None: try: with open(run_file, "rb") as file: r = requests.post(url=url, files={"file": file}) if not r.ok: logger.error( "Report was not sent to URL: %s \nResponse text: %s" % url, r.text) logger.debug("Sent JSON report status: %s" % r.text) except requests.RequestException as ex: logger.error( "Failed to send run report to URL: %s \nException data: %s" % url, ex) else: logger.error("Bad URL for JSON report.")
def get_file_attachment(): test_report_file = os.path.join(PathManager.get_current_run_dir(), "iris_log.log") if os.path.exists(test_report_file): file_log = open(test_report_file) attachment = MIMEText(file_log.read(), 1) file_log.close() attachment.add_header( "Content-Disposition", "attachment", filename=os.path.basename(test_report_file), ) return attachment else: raise Exception("File %s is not present in path" % test_report_file)
def convert_test_list(test_list, only_failures=False): """Takes a flat list of test objects and paths and converts to an object that can be serialized as JSON. :param test_list: List of completed tests :param only_failures: If True, only return failed tests :return: """ test_root = os.path.join(PathManager.get_module_dir(), "tests") tests = [] for test in test_list: test_failed = (True if "FAILED" in test.outcome or "ERROR" in test.outcome else False) original_path = str(test.item.__dict__.get("fspath")) try: target_root = original_path.split(test_root)[1] except IndexError: logger.error("Error parsing test list.") logger.error( "Try resetting your PYTHONPATH before your next run, i.e.:") if OSHelper.get_os().value == "win": logger.error("\tsetx PYTHONPATH %CD%") else: logger.error("\texport PYTHONPATH=$PWD") return tests target = target_root.split(os.sep)[1] test_path = target_root.split("%s%s%s" % (os.sep, target, os.sep))[1] parent = tests details = get_test_markers(test.item) for module in test_path.split(os.sep): test_obj = {"name": module.split(".py")[0]} if "py" not in module: module_exists = False for objects in parent: if objects["name"] == module: parent = objects["children"] module_exists = True break if not module_exists: new_parent = test_obj["children"] = [] if only_failures and test_failed: parent.append(test_obj) elif not only_failures: parent.append(test_obj) parent = new_parent else: if test_failed: test_assert = { "error": test.error.lstrip(), "message": test.message.lstrip(), "call_stack": test.traceback + "\n\n ", "code": get_failing_code(test.node_name, int(test.line)), } test_obj["assert"] = test_assert test_obj["result"] = test.outcome test_obj["time"] = test.test_duration debug_image_directory = os.path.join( PathManager.get_current_run_dir(), test_path.split(".py")[0], "debug_images", ) test_obj["debug_image_directory"] = debug_image_directory test_obj["debug_images"] = get_image_names( debug_image_directory) test_obj["description"] = details.get("description") values = {} for i in details: if i != "description": values[i] = details.get(i) test_obj["values"] = values if only_failures and test_failed: parent.append(test_obj) elif not only_failures: parent.append(test_obj) parent = tests return tests
def create_run_log(app): args = get_core_args() meta = { "run_id": PathManager.get_run_id(), "platform": OSHelper.get_os().value, "config": "%s, %s-bit, %s" % (OSHelper.get_os_version(), OSHelper.get_os_bits(), OSHelper.get_processor()), "locale": args.locale, "args": " ".join(sys.argv), "params": vars(args), "log": os.path.join(PathManager.get_current_run_dir(), "iris_log.log"), } values = {} for i in app.values: values[i] = app.values[i] meta["values"] = values meta["iris_version"] = 2.0 try: repo = git.Repo(PathManager.get_module_dir()) meta["iris_repo"] = repo.working_tree_dir try: meta["iris_branch"] = repo.active_branch.name except: # If we're on a detached head, the active_branch is # undefined and raises an exception. This at least # allows the test run to finish meta["iris_branch"] = "detached" meta["iris_branch_head"] = repo.head.object.hexsha except: # Iris is not running in a Git repo, so don't try to # report on non-existent data. meta["iris_repo"] = "n/a" meta["iris_branch"] = "n/a" meta["iris_branch_head"] = "n/a" meta["python_version"] = get_python_version() failed = 0 passed = 0 skipped = 0 errors = 0 for test in app.completed_tests: if test.outcome == "FAILED": failed = failed + 1 if test.outcome == "PASSED": passed = passed + 1 if test.outcome == "SKIPPED": skipped = skipped + 1 if test.outcome == "ERROR": errors = errors + 1 logger.debug("Updating run.json with completed run data.") meta["total"] = len(app.completed_tests) meta["passed"] = passed meta["failed"] = failed meta["skipped"] = skipped meta["errors"] = errors meta["start_time"] = app.start_time meta["end_time"] = app.end_time meta["total_time"] = app.end_time - app.start_time tests = { "all_tests": convert_test_list(app.completed_tests), "failed_tests": convert_test_list(app.completed_tests, only_failures=True), "flaky_tests": app.flaky_tests, } run_file = os.path.join(PathManager.get_current_run_dir(), "run.json") run_file_data = {"meta": meta, "tests": tests} with open(run_file, "w") as f: json.dump(run_file_data, f, sort_keys=True, indent=True)