def get(self, request): metric = request.GET["metric"] cache = redis_conn.get(f"analysis_and_training:{request.user.id}") filters = loads(cache)["filters"] if cache else None settings = get_training_settings(request.user) issues = get_issues( fields=[ metric.split()[0], settings.get("mark_up_source"), "Description_tr", "Assignee", "Reporter", ], filters=filters, ) df = pd.DataFrame.from_records(issues) if metric.split()[0] not in ("Resolution", "Priority"): if settings["mark_up_source"] and settings["mark_up_entities"]: for area in settings["mark_up_entities"]: if area["area_of_testing"] == metric.split()[0]: df = mark_up_series( df, settings["mark_up_source"], metric.split()[0], area["entities"], ) significant_terms = calculate_significance_weights(df, metric) context = {"significant_terms": significant_terms} return Response(context)
def post(self, request): instance = request.user cache = redis_conn.get(f"analysis_and_training:{request.user.id}") filters = loads(cache)["filters"] if cache else None fields = get_issues_fields(request.user) df = pd.DataFrame(get_issues(filters=filters, fields=fields)) # New predictions will be appended after training. delete_old_predictions() settings = get_training_settings(request.user) if settings["mark_up_source"] not in df.columns: raise InvalidMarkUpSource resolutions = ([ resolution["value"] for resolution in settings["bug_resolution"] ] if len(settings["bug_resolution"]) != 0 else []) areas_of_testing = [] if settings["mark_up_source"]: areas_of_testing = [ area["area_of_testing"] for area in settings["mark_up_entities"] ] + ["Other"] for area in settings["mark_up_entities"]: df = mark_up_series( df, settings["mark_up_source"], area["area_of_testing"], area["entities"], ) df = mark_up_other_data(df, areas_of_testing) delete_training_data(get_archive_path(instance)) train( instance, df, areas_of_testing, resolutions, ) context = { "result": "success", } process = Process(target=append_predictions, args=(request.user, )) process.start() return Response(context, status=200)
def get(self, request): archive_path = get_archive_path(request.user) if not is_file_in_archive(archive_path, TRAINING_PARAMETERS_FILENAME): raise DescriptionAssessmentUnavailableWarning settings = get_training_settings(request.user) resolutions = ( [resolution["value"] for resolution in settings["bug_resolution"]] if len(settings["bug_resolution"]) != 0 else [] ) training_parameters = read_from_archive( archive_path, TRAINING_PARAMETERS_FILENAME ) context = { "priority": training_parameters.get("Priority"), "resolution": resolutions, "areas_of_testing": training_parameters.get("areas_of_testing"), } return Response(context)
def post(self, request): fields = get_issues_fields(request.user) filters = get_filters( request.user, issues=pd.DataFrame.from_records(get_issues(fields=fields)), ) if request.data.get("action") == "apply": new_filters = request.data.get("filters") if new_filters: for new_filter in new_filters: for filter_ in filters: if new_filter["name"] == filter_["name"]: filter_.update({ "current_value": new_filter["current_value"], "filtration_type": new_filter["filtration_type"], "exact_match": new_filter["exact_match"], }) issues = get_issues(filters=filters, fields=fields) else: issues = get_issues(fields=fields) else: issues = get_issues(fields=fields) if len(issues) == 0: context = { "records_count": { "total": get_issue_count(), "filtered": 0 }, "frequently_terms": [], "statistics": {}, "submission_chart": {}, "significant_terms": {}, "filters": filters, } redis_conn.set(f"analysis_and_training:{request.user.id}", dumps(context)) return Response({}) issues = pd.DataFrame.from_records(issues) freq_terms = calculate_frequently_terms(issues) statistics = calculate_statistics( df=issues, series=["Comments", "Attachments", "Time to Resolve"]) submission_chart = calculate_defect_submission(df=issues, period="Month") significant_terms = get_significant_terms( issues, get_training_settings(request.user)) context = { "records_count": { "total": get_issue_count(), "filtered": len(issues), }, "frequently_terms": freq_terms, "statistics": statistics, "submission_chart": submission_chart, "significant_terms": significant_terms, "filters": filters, } redis_conn.set(f"analysis_and_training:{request.user.id}", dumps(context)) return Response(context)
def get(self, request): training_settings = get_training_settings(request.user) return Response(training_settings)