def test_get_significant_terms_with_filter_positive(self): issues = get_issues_dataframe( fields=[ "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=[{ "name": "Priority", "filtration_type": "drop-down", "current_value": ["Major"], "exact_match": False, }], ) significant_terms = get_significant_terms(issues) assert all([ significant_terms["chosen_metric"] == "Resolution Done", significant_terms["metrics"] == [ "Resolution Done", "Resolution Rejected", "Resolution Unresolved", "Priority Major", ], ])
def calculate_issues_predictions(user: User, fields: List[str], filters: List[dict]) -> pd.DataFrame: """Appends predictions to issues. Parameters: ---------- user: User instance. fields: Predictions table fields. filters: Filters. Returns: ---------- Issues. """ filters = [UNRESOLVED_BUGS_FILTER] + filters issues = get_issues_dataframe(fields=fields, filters=filters) if issues.empty: return pd.DataFrame() issues = get_predictions(user, issues) return issues
def get(self, request): cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) filters = loads(cache) if cache else None user = request.user issues = get_issues_dataframe( fields=[ get_source_field(user), "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=filters, ) if issues.empty: return Response({}) settings = { "source_field": get_source_field(user), "bug_resolution": get_bug_resolutions(user), "mark_up_entities": get_mark_up_entities(user), } significant_terms = get_significant_terms(issues, settings) context = {"significant_terms": significant_terms} return Response(context)
def post(self, request): period = request.GET["period"] cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) filters = loads(cache) if cache else None issues = get_issues_dataframe( fields=["Key", "Created", "Resolved"], filters=filters ) if issues.empty: return Response({}) coordinates = get_defect_submission(issues, period) context = { **coordinates, **get_max_amount(coordinates), "period": period, } redis_conn.set( f"user:{request.user.id}:analysis_and_training:defect_submission", dumps(context), ) return Response(context)
def post(self, request): new_filters = request.data.get("filters", []) filters = get_qa_metrics_settings(request.user) fields = [field["name"] for field in filters] filters = update_drop_down_fields( filters, get_issues_dataframe( fields=fields, filters=[UNRESOLVED_BUGS_FILTER] ), ) if new_filters: for new_filter in new_filters: for filter_ in filters: if new_filter["name"] == filter_["name"]: filter_.update( { "current_value": new_filter["current_value"], "filtration_type": new_filter[ "filtration_type" ], "exact_match": new_filter["exact_match"], } ) filters += [UNRESOLVED_BUGS_FILTER] cached_filters = redis_conn.get( f"user:{request.user.id}:qa_metrics:filters" ) cached_filters = loads(cached_filters) if cached_filters else [] context = { "records_count": { "total": get_issue_count(filters=[UNRESOLVED_BUGS_FILTER]), "filtered": get_issue_count(filters), }, "filters": filters, } if not cached_filters or not check_filters_equality( filters, cached_filters ): clear_cache( [ "qa_metrics:predictions_table", "qa_metrics:predictions_page", ], request.user.id, ) for element in context: redis_conn.set( f"user:{request.user.id}:qa_metrics:{element}", dumps(context.get(element)), ) return Response(context)
def post(self, request): user = request.user cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) filters = loads(cache) if cache else None fields = get_issues_fields(request.user) issues = get_issues_dataframe(filters=filters, fields=fields) if issues.empty: raise BugsNotFoundWarning source_field = get_source_field(user) if source_field not in issues.columns: raise InvalidSourceField resolutions = ( [resolution["value"] for resolution in get_bug_resolutions(user)] if len(get_bug_resolutions(user)) != 0 else [] ) areas_of_testing = [] mark_up_entities = get_mark_up_entities(user) if source_field: areas_of_testing = [ area["area_of_testing"] for area in mark_up_entities ] + ["Other"] for area in mark_up_entities: issues = mark_up_series( issues, get_source_field(user), area["area_of_testing"], area["entities"], ) issues = mark_up_other_data(issues, areas_of_testing) train( user, issues, areas_of_testing, resolutions, ) clear_cache( ["qa_metrics:predictions_page", "qa_metrics:predictions_table"], request.user.id, ) context = { "result": "success", } return Response(context, status=200)
def test_frequently_terms(self): issues = get_issues_dataframe( fields=[ "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=[], ) frequently_terms = calculate_frequently_terms(issues) assert frequently_terms == ["description_tr"]
def test_get_significant_terms(self): issues = get_issues_dataframe( fields=[ "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=[], ) significant_terms = get_significant_terms(issues) assert significant_terms == self.result_significant_terms
def post(self, request): fields = get_issues_fields(request.user) issues = get_issues_dataframe(fields=fields) filters = get_filters( request.user, issues=issues, ) if request.data.get("action") == "apply": new_filters = request.data.get("filters") if new_filters: for new_filter in new_filters: for filter_ in filters: if new_filter["name"] == filter_["name"]: filter_.update( { "current_value": new_filter[ "current_value" ], "filtration_type": new_filter[ "filtration_type" ], "exact_match": new_filter["exact_match"], } ) issues = get_issues(filters=filters, fields=fields) issues_count = len(issues) context = { "records_count": { "total": get_issue_count(), "filtered": issues_count, }, "filters": filters, } for element in context: redis_conn.set( f"user:{request.user.id}:analysis_and_training:{element}", dumps(context.get(element)), ) remove_cache_record( "analysis_and_training:defect_submission", request.user.id ) return Response(context)
def get(self, request): cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) filters = loads(cache) if cache else None fields = get_issues_fields(request.user.id) issues = get_issues_dataframe(fields=fields, filters=filters) if issues.empty: return Response({}) freq_terms = calculate_frequently_terms(issues) context = {"frequently_terms": freq_terms} return Response(context)
def test_get_significant_terms_with_aot(self): issues = get_issues_dataframe( fields=[ "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=[], ) significant_terms = get_significant_terms(issues, self.settings_for_aot) result_significant_terms = self.result_significant_terms.copy() result_significant_terms["metrics"].append("AOT1") assert significant_terms == result_significant_terms
def get(self, request): cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) if cache: filters = loads(cache) else: fields = get_issues_fields(request.user) filters = get_filters( request.user, issues=get_issues_dataframe(fields=fields) ) redis_conn.set( name=f"user:{request.user.id}:analysis_and_training:filters", value=dumps(filters), ex=60 * 30, ) return Response(filters)
def get(self, request): cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) filters = loads(cache) if cache else None fields = get_issues_fields(request.user.id) issues = get_issues_dataframe(fields=fields, filters=filters) if issues.empty: return Response({}) statistics = calculate_statistics( issues, ["Comments", "Attachments", "Time to Resolve"], ) context = {"statistics": statistics} return Response(context)
def get(self, request): user = request.user check_training_files(user) cached_filters = redis_conn.get( f"user:{request.user.id}:qa_metrics:filters" ) if cached_filters: filters = loads(cached_filters) else: filters = get_qa_metrics_settings(user) fields = [field["name"] for field in filters] issues = get_issues_dataframe( fields=fields, filters=[UNRESOLVED_BUGS_FILTER] ) filters = update_drop_down_fields(filters, issues) return Response(filters)
def post(self, request): metric = request.GET["metric"] cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) filters = loads(cache) if cache else None source_field = get_source_field(request.user) issues = get_issues_dataframe( fields=[ metric.split()[0], source_field, "Description_tr", "Assignee", "Reporter", ], filters=filters, ) if issues.empty: return Response({}) mark_up_entities = get_mark_up_entities(request.user) if metric.split()[0] not in ("Resolution", "Priority"): if source_field and mark_up_entities: for area in mark_up_entities: if area["area_of_testing"] == metric.split()[0]: issues = mark_up_series( issues, source_field, metric.split()[0], area["entities"], ) significant_terms = calculate_significance_weights(issues, metric) context = {"significant_terms": significant_terms} return Response(context)
def test_get_significant_terms_with_filter_negative(self): issues = get_issues_dataframe( fields=[ "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=[{ "name": "Priority", "filtration_type": "drop-down", "current_value": ["Critical"], "exact_match": False, }], ) significant_terms = get_significant_terms(issues) assert significant_terms == { "metrics": [], "chosen_metric": "", "terms": {}, }