def compare_results(issue_key, new_xray, master_xray, tests_json, first_xray): print("Starting to compare autotest results") try: results = uploader_common.read_json(tests_json) except FileNotFoundError: new_res = uploader_common.read_json(new_xray) new_tests = new_res['tests'] results = {} results["cases"] = [] for new_test in new_tests: new_test_key = new_test["testKey"] results[new_test_key] = [] results["cases"].append(new_test_key) results[new_test_key].append(new_test['status']) results["runs"] = 0 #If there is no master xray_report, the current test result can be just copied if not os.path.isfile(master_xray): master_res =_compare_results_new_task(master_xray, new_xray, first_xray) #otherwise the master_xray_report and newly created report have to be compared and master_xray_report is modified #according to specifications of which results overwrite which. else: master_res = uploader_common.read_json(master_xray) new_res = uploader_common.read_json(new_xray) results = _compare_two_xrays(master_res, new_res, results) print(results) results['runs'] += 1 master_res['testExecutionKey'] = issue_key uploader_common.dump_to_file(master_xray, master_res) uploader_common.dump_to_file(tests_json, results)
def get_image_measurement(project, conf_file): try: conf_data = uploader_common.read_json(conf_file) return conf_data["project"][project] except: raise BootUploadError("Unable to fetch measurement info.\n" \ "Check jira_upload_conf.json")
def import_xray(jira, issue_key, testfolder_path): #Append the new issue_key into the test result xray_report.json xray_file = os.path.join(testfolder_path, "xray_report.json") data = uploader_common.read_json(xray_file) data["testExecutionKey"] = issue_key uploader_common.dump_to_file(xray_file, data) jira.import_xray(issue_key, xray_file)
def boot_time_upload(sut_name, test_id, test_type, skip, conf_file=DEFAULT_CONF_FILE): print("Starting Boot Time Upload for", sut_name) try: auth = uploader_common.get_jira_auth() jira_url = global_config.get_conf("urls", "jira") except (IOError, PermissionError, ArgumentError, FileNotFoundError, json.decoder.JSONDecodeError, KeyError) as e: print("Error when reading auth from config:", file=sys.stderr) uploader_common.print_exc(e) sys.exit(1) test_props = get_test_properties(test_type) try: jira = jiralib.JIRA(jira_url, auth) argotestroot = uploader_common.get_path_from_conf("argotest", conf_file) sutfolder_path = os.path.join(argotestroot, "TestReports_" + sut_name) image_url = uploader_common.read_file(os.path.join( sutfolder_path, \ "last_installed_zip.txt")).strip() image = os.path.basename(image_url) project = image.split("-")[2] measurement = get_image_measurement(project, conf_file) try: issues_path = os.path.join(argotestroot, test_props["issues_dir"]) except TypeError as e: print("Bad image_type:", e) sys.exit(1) except KeyError as e: print("No issues_dir key for image type " + test_type, e) sys.exit(1) master_folder = os.path.join(issues_path, image) os.makedirs(master_folder, exist_ok=True) shutil.copyfile(os.path.join(sutfolder_path, test_id, "boots.csv"), \ os.path.join(master_folder, "boots.csv")) #Fetch information how many images are drawn into the graph images_to_draw = uploader_common.read_json(conf_file)["num_images_boot_time"] #Fetch information about the previous images images_to_draw = get_previous_images(image, issues_path, images_to_draw) key = get_jira_key(os.path.join(master_folder, image + "_key.json")) #Use sufu_csv2graph to draw some lines output_filename = test_type + "-" + project + "-BootTime.png" result_image_path = os.path.join(master_folder, output_filename) draw_graph(images_to_draw, issues_path, result_image_path, measurement) if key and not skip: upload_graph_to_jira(jira, result_image_path, key) dav_url = uploader_common.get_url_from_conf("dav", conf_file) upload_graph_to_dav(dav_url, result_image_path, auth) if (test_props.get("release", False)): shutil.copyfile(path.join(master_folder, output_filename), path.join(sutfolder_path, test_id, output_filename)) except BootUploadError as e: print("BootUploadError:", e) return 1 return 0
def delete_attachment(jira, file): attachment_file = uploader_common.read_json(file) for attachment in attachment_file: try: jira.delete_file(attachment["id"]) print("Deleted attachment: name:", attachment["filename"], "id:", \ attachment["id"]) except JIRAException as e: print("Error deleting files:", e)
def get_jira_key(file): try: key_file = uploader_common.read_json(file) key = key_file['key'] except: print("No Jira issue key for this image. Skipping Jira upload.") return return key
def update_jira_attachments(jira, fm, master_path, issue_key, conf_file): attachments = os.path.join(master_path, "attachments.json") if os.path.exists(attachments): delete_attachment(jira, attachments) conf = uploader_common.read_json(conf_file) image_names = conf["graphs"] for image in image_names: fm.add_file(os.path.join(master_path, image)) result_attachments = jira.upload_files(issue_key, fm) uploader_common.dump_to_file(attachments, result_attachments)
def upload_attachments(jira, issue_key, testfolder_path, sut_name, test_id, conf_file): attachments = os.path.join(testfolder_path, "attachments.json") with jiralib.MultipleFileManager() as fm: fm.add_file( os.path.join(testfolder_path, sut_name + "-" + test_id + "-cron.log")) fm.add_file( os.path.join(testfolder_path, sut_name + "-" + test_id + "-log_TestRun.txt")) conf = uploader_common.read_json(conf_file) graphs = conf["graphs"] for graph in graphs: fm.add_file(os.path.join(testfolder_path, graph)) result_attachments = jira.upload_files(issue_key, fm) uploader_common.dump_to_file(attachments, result_attachments)
def create_test_table(test_table, issue_key, test_id, sut_name): results = uploader_common.read_json(test_table) cases = results["cases"] html = '<html><body><table border="1"><tr><td>n</td>' #Create header row, which contains the test LM codes for case in cases: html += "<td>{}</td>".format(case) html += "</tr>" num_test_runs = results['runs'] for test_run in range(0, num_test_runs): run_count = test_run + 1 #First cell of each column is always the number of test run html += "<tr>" html += "<td>{run}</td>".format(run = str(run_count)) #Create cell for test_case in cases: test_result = results[test_case][test_run] url = "https://jira.link-motion.com/secure/XrayExecuteTest!default.jspa?testExecIssueKey=" + issue_key + "&testIssueKey=" + test_case #color format for cell background color = TEST_COLORS[test_result] if test_result == TEST_NOT_RUN: test_result = "" html += "<td bgcolor=\"{color}\">".format(color = color) html += "<a href=\"{url}\">{test_result}</a>".format(url = url, test_result = test_result) html += "</td>" #end cell html += "</tr>" #end table html += "</table>" #Extra information about SUT and latest test run html += "<br><span>Latest test run ID: {}</span><br>".format(test_id) html += "<span>SUT ID: {}</span><br>".format(sut_name) dt = datetime.datetime.now() html += "<span>Last test finished at: {}</span><br>".format(str(dt)) #end of html file html += "</body></html>" return html
def _compare_results_new_task(master_xray, new_xray, first_xray): print("No master_xray_report to be found. First run for this image") shutil.copyfile(new_xray, master_xray) shutil.copyfile(new_xray, first_xray) return uploader_common.read_json(master_xray)
def get_test_properties(test_type): conf = uploader_common.read_json(global_config.get_path()) return conf["test_types"][test_type]