def get(self, request): cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) filters = loads(cache) if cache else None user = request.user issues = get_issues_dataframe( fields=[ get_source_field(user), "Priority", "Resolution", "Description_tr", "Assignee", "Reporter", ], filters=filters, ) if issues.empty: return Response({}) settings = { "source_field": get_source_field(user), "bug_resolution": get_bug_resolutions(user), "mark_up_entities": get_mark_up_entities(user), } significant_terms = get_significant_terms(issues, settings) context = {"significant_terms": significant_terms} return Response(context)
def post(self, request): user = request.user cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) filters = loads(cache) if cache else None fields = get_issues_fields(request.user) issues = get_issues_dataframe(filters=filters, fields=fields) if issues.empty: raise BugsNotFoundWarning source_field = get_source_field(user) if source_field not in issues.columns: raise InvalidSourceField resolutions = ( [resolution["value"] for resolution in get_bug_resolutions(user)] if len(get_bug_resolutions(user)) != 0 else [] ) areas_of_testing = [] mark_up_entities = get_mark_up_entities(user) if source_field: areas_of_testing = [ area["area_of_testing"] for area in mark_up_entities ] + ["Other"] for area in mark_up_entities: issues = mark_up_series( issues, get_source_field(user), area["area_of_testing"], area["entities"], ) issues = mark_up_other_data(issues, areas_of_testing) train( user, issues, areas_of_testing, resolutions, ) clear_cache( ["qa_metrics:predictions_page", "qa_metrics:predictions_table"], request.user.id, ) context = { "result": "success", } return Response(context, status=200)
def get(self, request): check_issues_exist() mark_up_entities = get_mark_up_entities(request.user) source_field = get_source_field(request.user) if not source_field: return Response({}) unique_values = get_unique_values(source_field) unique_values = split_values(unique_values) result = { "mark_up_entities": mark_up_entities, "entity_names": sorted(unique_values), } return Response(result)
def test_update_mark_up_entities(self): from apps.settings.serializers import ( UserTrainingSerializer, ) user = User.objects.get(name=TEST_USER["name"]) mark_up_entities = { "mark_up_entities": [{ "area_of_testing": "TestAOT", "entities": ["Minor"] }] } mark_up_entities_serializer = UserTrainingSerializer( data=mark_up_entities) mark_up_entities_serializer.is_valid() update_mark_up_entities( user, mark_up_entities_serializer.data["mark_up_entities"]) assert { "mark_up_entities": get_mark_up_entities(user) } == mark_up_entities
def post(self, request): metric = request.GET["metric"] cache = redis_conn.get( f"user:{request.user.id}:analysis_and_training:filters" ) filters = loads(cache) if cache else None source_field = get_source_field(request.user) issues = get_issues_dataframe( fields=[ metric.split()[0], source_field, "Description_tr", "Assignee", "Reporter", ], filters=filters, ) if issues.empty: return Response({}) mark_up_entities = get_mark_up_entities(request.user) if metric.split()[0] not in ("Resolution", "Priority"): if source_field and mark_up_entities: for area in mark_up_entities: if area["area_of_testing"] == metric.split()[0]: issues = mark_up_series( issues, source_field, metric.split()[0], area["entities"], ) significant_terms = calculate_significance_weights(issues, metric) context = {"significant_terms": significant_terms} return Response(context)