def get(self, request): user = request.user check_training_files(user) check_predictions() fields = get_qa_metrics_fields(user) issues = DataFrame.from_records( get_issues(filters=[UNRESOLVED_BUGS_FILTER], fields=fields)) filters = get_qa_metrics_settings(user) filters = update_drop_down_fields(filters, issues) return Response(filters)
def post(self, request): user = request.user filters = request.data.get("filters", []) offset = int(request.data.get("offset", DEFAULT_OFFSET)) limit = int(request.data.get("limit", DEFAULT_LIMIT)) check_training_files(user) check_predictions() predictions_table_settings = get_predictions_table_settings(user) bugs_predictions = get_predictions_table(predictions_table_settings, filters, offset, limit) return Response(bugs_predictions.to_dict("records"))
def post(self, request): user = request.user offset = int(request.data.get("offset", DEFAULT_OFFSET)) limit = int(request.data.get("limit", DEFAULT_LIMIT)) cache = redis_conn.get(f"user:{user.id}:qa_metrics:filters") filters = loads(cache) if cache else [UNRESOLVED_BUGS_FILTER] check_training_files(user) cached_predictions = redis_conn.get( f"user:{request.user.id}:qa_metrics:predictions_table" ) if cached_predictions: predictions = DataFrame.from_records(loads(cached_predictions)) else: predictions_table_fields = get_predictions_table_fields(user) issues = calculate_issues_predictions( user, predictions_table_fields, filters ) if issues.empty: return Response({}) predictions_table_fields.remove("Description_tr") predictions_table_fields.remove("Key") predictions = get_predictions_table( issues=issues, fields_settings=predictions_table_fields, offset=None, limit=None, ) redis_conn.set( name=f"user:{request.user.id}:qa_metrics:predictions_table", value=dumps(list(predictions.T.to_dict().values())), ex=60 * 30, ) predictions = list( paginate_bugs(df=predictions, offset=offset, limit=limit) .T.to_dict() .values() ) return Response(predictions)
def post(self, request): user = request.user filters = request.data.get("filters", []) offset = DEFAULT_OFFSET limit = DEFAULT_LIMIT check_training_files(user) check_predictions() archive_path = get_archive_path(user) training_parameters = read_from_archive(archive_path, TRAINING_PARAMETERS_FILENAME) predictions_table_settings = get_predictions_table_settings(user) predictions = get_predictions_table(predictions_table_settings, filters, None, None) if predictions.empty: return Response({}) prediction_table = paginate_bugs(predictions, offset, limit) areas_of_testing_percentage = calculate_aot_percentage( predictions["Area of Testing"]) priority_percentage = calculate_priority_percentage( predictions["Priority"], training_parameters["Priority"]) ttr_percentage = calculate_ttr_percentage( predictions["Time to Resolve"], training_parameters["Time to Resolve"], ) resolution_percentage = calculate_resolution_percentage( predictions, training_parameters["Resolution"]) result = { "predictions_table": prediction_table.T.to_dict().values(), "prediction_table_rows_count": len(predictions), "areas_of_testing_chart": areas_of_testing_percentage, "priority_chart": priority_percentage, "ttr_chart": ttr_percentage, "resolution_chart": resolution_percentage, } return Response(result)
def get(self, request): user = request.user check_training_files(user) cached_filters = redis_conn.get( f"user:{request.user.id}:qa_metrics:filters" ) if cached_filters: filters = loads(cached_filters) else: filters = get_qa_metrics_settings(user) fields = [field["name"] for field in filters] issues = get_issues_dataframe( fields=fields, filters=[UNRESOLVED_BUGS_FILTER] ) filters = update_drop_down_fields(filters, issues) return Response(filters)
def get(self, request): user = request.user check_training_files(user) resolutions = ([ resolution["value"] for resolution in get_bug_resolutions(user) ] if len(get_bug_resolutions(user)) != 0 else []) training_parameters = get_training_parameters(request.user) if "Other" in training_parameters.get("areas_of_testing"): training_parameters["areas_of_testing"].remove("Other") context = { "Priority": training_parameters.get("Priority"), "resolution": resolutions, "areas_of_testing": training_parameters.get("areas_of_testing"), } return Response(context)
def get(self, request): user = request.user offset = DEFAULT_OFFSET limit = DEFAULT_LIMIT cached_predictions = redis_conn.get( f"user:{request.user.id}:qa_metrics:predictions_page" ) cached_filters = redis_conn.get( f"user:{request.user.id}:qa_metrics:filters" ) filters = ( loads(cached_filters) if cached_filters else [UNRESOLVED_BUGS_FILTER] ) if cached_predictions: predictions = loads(cached_predictions) else: check_training_files(user) training_parameters = get_training_parameters(request.user) predictions_table_fields = get_predictions_table_fields(user) issues = calculate_issues_predictions( user, predictions_table_fields, filters ) if issues.empty: return Response({}) predictions_table_fields.remove("Description_tr") predictions_table_fields.remove("Key") predictions_table = get_predictions_table( issues=issues, fields_settings=predictions_table_fields, offset=None, limit=None, ) prediction_table = paginate_bugs(predictions_table, offset, limit) areas_of_testing_percentage = calculate_aot_percentage( predictions_table["Area of Testing"] ) priority_percentage = calculate_priority_percentage( predictions_table["Priority"], training_parameters["Priority"] ) ttr_percentage = calculate_ttr_percentage( predictions_table["Time to Resolve"], training_parameters["Time to Resolve"], ) resolution_percentage = calculate_resolution_percentage( predictions_table, training_parameters["Resolution"] ) predictions = { "predictions_table": list( prediction_table.T.to_dict().values() ), "prediction_table_rows_count": len(predictions_table), "areas_of_testing_chart": areas_of_testing_percentage, "priority_chart": priority_percentage, "ttr_chart": ttr_percentage, "resolution_chart": resolution_percentage, } redis_conn.set( name=f"user:{request.user.id}:qa_metrics:predictions_page", value=dumps(predictions), ex=60 * 30, ) redis_conn.set( name=f"user:{request.user.id}:qa_metrics:filters", value=dumps(filters), ex=60 * 30, ) redis_conn.set( name=f"user:{request.user.id}:qa_metrics:predictions_table", value=dumps(list(predictions_table.T.to_dict().values())), ex=60 * 30, ) return Response(predictions)