def set_report_and_cleanup(job_id, report): analyzer_name = report.get("name", "") logger.info( f"start set_report_and_cleanup for job_id:{job_id}," f" analyzer:{analyzer_name}" ) job_object = None try: # add process time finished_time = time.time() report["process_time"] = finished_time - report["started_time"] with transaction.atomic(): job_object = object_by_job_id(job_id, transaction=True) job_object.analysis_reports.append(report) job_object.save(update_fields=["analysis_reports"]) if job_object.status == "failed": raise AlreadyFailedJobException() num_analysis_reports = len(job_object.analysis_reports) num_analyzers_to_execute = len(job_object.analyzers_to_execute) logger.info( f"job_id:{job_id}, analyzer {analyzer_name}, " f"num analysis reports:{num_analysis_reports}, " f"num analyzer to execute:{num_analyzers_to_execute}" ) # check if it was the last analysis... # ..In case, set the analysis as "reported" or "failed" if num_analysis_reports == num_analyzers_to_execute: status_to_set = "reported_without_fails" # set status "failed" in case all analyzers failed failed_analyzers = 0 for analysis_report in job_object.analysis_reports: if not analysis_report.get("success", False): failed_analyzers += 1 if failed_analyzers == num_analysis_reports: status_to_set = "failed" elif failed_analyzers >= 1: status_to_set = "reported_with_fails" set_job_status(job_id, status_to_set) job_object.finished_analysis_time = get_now() job_object.save(update_fields=["finished_analysis_time"]) except AlreadyFailedJobException: logger.error( f"job_id {job_id} status failed. Do not process the report {report}" ) except Exception as e: logger.exception(f"job_id: {job_id}, Error: {e}") set_job_status(job_id, "failed", errors=[str(e)]) job_object.finished_analysis_time = get_now() job_object.save(update_fields=["finished_analysis_time"])
def set_report_and_cleanup(job_id, report): logger.info("start set_report_and_cleanup for job_id:{}, analyzer:{}" "".format(job_id, report.get('name', ''))) job_object = None try: job_object = object_by_job_id(job_id) if job_object.status == 'failed': raise AlreadyFailedJobException() # add process time finished_time = time.time() report['process_time'] = finished_time - report['started_time'] job_object.analysis_reports.append(report) job_object.save(update_fields=['analysis_reports']) num_analysis_reports = len(job_object.analysis_reports) num_analyzers_to_execute = len(job_object.analyzers_to_execute) logger.info( "job_id:{} num analysis reports:{} num analyzer to execute:{}" "".format(job_id, num_analysis_reports, num_analyzers_to_execute)) # check if it was the last analysis. In case, set the analysis as "reported" or "failed" if num_analysis_reports == num_analyzers_to_execute: status_to_set = "reported_without_fails" # set status "failed" in case all analyzers failed failed_analyzers = 0 for analysis_report in job_object.analysis_reports: if not analysis_report.get('success', False): failed_analyzers += 1 if failed_analyzers == num_analysis_reports: status_to_set = "failed" elif failed_analyzers >= 1: status_to_set = "reported_with_fails" set_job_status(job_id, status_to_set) job_object.finished_analysis_time = get_now() job_object.save(update_fields=['finished_analysis_time']) except AlreadyFailedJobException: logger.error("job_id {} status failed. Do not process the report {}" "".format(job_id, report)) except Exception as e: logger.exception("job_id: {}, Error: {}".format(job_id, e)) set_job_status(job_id, "failed", errors=[str(e)]) job_object.finished_analysis_time = get_now() job_object.save(update_fields=['finished_analysis_time'])