def test_get_significant_terms_with_filter_positive(self): issues = get_issues_dataframe( fields=[ "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=[{ "name": "Priority", "filtration_type": "drop-down", "current_value": ["Major"], "exact_match": False, }], ) significant_terms = get_significant_terms(issues) assert all([ significant_terms["chosen_metric"] == "Resolution Done", significant_terms["metrics"] == [ "Resolution Done", "Resolution Rejected", "Resolution Unresolved", "Priority Major", ], ])
def get(self, request): cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) filters = loads(cache) if cache else None user = request.user issues = get_issues_dataframe( fields=[ get_source_field(user), "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=filters, ) if issues.empty: return Response({}) settings = { "source_field": get_source_field(user), "bug_resolution": get_bug_resolutions(user), "mark_up_entities": get_mark_up_entities(user), } significant_terms = get_significant_terms(issues, settings) context = {"significant_terms": significant_terms} return Response(context)
def test_get_significant_terms(self): issues = get_issues_dataframe( fields=[ "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=[], ) significant_terms = get_significant_terms(issues) assert significant_terms == self.result_significant_terms
def test_get_significant_terms_with_aot(self): issues = get_issues_dataframe( fields=[ "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=[], ) significant_terms = get_significant_terms(issues, self.settings_for_aot) result_significant_terms = self.result_significant_terms.copy() result_significant_terms["metrics"].append("AOT1") assert significant_terms == result_significant_terms
def get(self, request): cache = redis_conn.get(f"analysis_and_training:{request.user.id}") if cache: return Response(loads(cache)) fields = get_issues_fields(request.user) issues = get_issues(fields=fields) if not issues: # TODO: FE shows progress bar when data is empty return Response({}) issues = pd.DataFrame.from_records(issues) freq_terms = calculate_frequently_terms(issues) statistics = calculate_statistics( df=issues, series=["Comments", "Attachments", "Time to Resolve"]) defect_submission = calculate_defect_submission(df=issues, period="Month") significant_terms = get_significant_terms(issues) filters = get_filters(request.user, issues=issues) context = { "records_count": { "total": len(issues), "filtered": len(issues) }, "frequently_terms": freq_terms, "statistics": statistics, "submission_chart": defect_submission, "significant_terms": significant_terms, "filters": filters, } redis_conn.set( name=f"analysis_and_training:{request.user.id}", value=dumps(context), ex=60 * 30, ) return Response(context)
def test_get_significant_terms_with_filter_negative(self): issues = get_issues_dataframe( fields=[ "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=[{ "name": "Priority", "filtration_type": "drop-down", "current_value": ["Critical"], "exact_match": False, }], ) significant_terms = get_significant_terms(issues) assert significant_terms == { "metrics": [], "chosen_metric": "", "terms": {}, }
def post(self, request): fields = get_issues_fields(request.user) filters = get_filters( request.user, issues=pd.DataFrame.from_records(get_issues(fields=fields)), ) if request.data.get("action") == "apply": new_filters = request.data.get("filters") if new_filters: for new_filter in new_filters: for filter_ in filters: if new_filter["name"] == filter_["name"]: filter_.update({ "current_value": new_filter["current_value"], "filtration_type": new_filter["filtration_type"], "exact_match": new_filter["exact_match"], }) issues = get_issues(filters=filters, fields=fields) else: issues = get_issues(fields=fields) else: issues = get_issues(fields=fields) if len(issues) == 0: context = { "records_count": { "total": get_issue_count(), "filtered": 0 }, "frequently_terms": [], "statistics": {}, "submission_chart": {}, "significant_terms": {}, "filters": filters, } redis_conn.set(f"analysis_and_training:{request.user.id}", dumps(context)) return Response({}) issues = pd.DataFrame.from_records(issues) freq_terms = calculate_frequently_terms(issues) statistics = calculate_statistics( df=issues, series=["Comments", "Attachments", "Time to Resolve"]) submission_chart = calculate_defect_submission(df=issues, period="Month") significant_terms = get_significant_terms( issues, get_training_settings(request.user)) context = { "records_count": { "total": get_issue_count(), "filtered": len(issues), }, "frequently_terms": freq_terms, "statistics": statistics, "submission_chart": submission_chart, "significant_terms": significant_terms, "filters": filters, } redis_conn.set(f"analysis_and_training:{request.user.id}", dumps(context)) return Response(context)