def test_calculate_aot_percentage(self): predictions_table = get_predictions_table(self.settings, None, None, None) areas_of_testing_percentage = calculate_aot_percentage( predictions_table["Area of Testing"]) assert areas_of_testing_percentage == {"Other": 100}
def test_predictions_pagination(self): predictions_table = get_predictions_table( self.issues, self.predictions_table_fields, None, None ) predictions_table = paginate_bugs(predictions_table, 0, 20) assert len(predictions_table) == 20
def test_calculate_resolution_percentage(self): predictions_table = get_predictions_table(self.settings, None, None, None) resolution_percentage = calculate_resolution_percentage( predictions_table, self.training_parameters["Resolution"], ) assert resolution_percentage == {"Done": {"Done": 100, "not Done": 0}}
def test_calculate_priority_percentage(self): predictions_table = get_predictions_table(self.settings, None, None, None) priority_percentage = calculate_priority_percentage( predictions_table["Priority"], self.training_parameters["Priority"]) assert priority_percentage == { "Major": 100, "Blocker": 0, "Critical": 0, "Minor": 0, }
def post(self, request): user = request.user filters = request.data.get("filters", []) offset = int(request.data.get("offset", DEFAULT_OFFSET)) limit = int(request.data.get("limit", DEFAULT_LIMIT)) check_training_files(user) check_predictions() predictions_table_settings = get_predictions_table_settings(user) bugs_predictions = get_predictions_table(predictions_table_settings, filters, offset, limit) return Response(bugs_predictions.to_dict("records"))
def post(self, request): user = request.user offset = int(request.data.get("offset", DEFAULT_OFFSET)) limit = int(request.data.get("limit", DEFAULT_LIMIT)) cache = redis_conn.get(f"user:{user.id}:qa_metrics:filters") filters = loads(cache) if cache else [UNRESOLVED_BUGS_FILTER] check_training_files(user) cached_predictions = redis_conn.get( f"user:{request.user.id}:qa_metrics:predictions_table" ) if cached_predictions: predictions = DataFrame.from_records(loads(cached_predictions)) else: predictions_table_fields = get_predictions_table_fields(user) issues = calculate_issues_predictions( user, predictions_table_fields, filters ) if issues.empty: return Response({}) predictions_table_fields.remove("Description_tr") predictions_table_fields.remove("Key") predictions = get_predictions_table( issues=issues, fields_settings=predictions_table_fields, offset=None, limit=None, ) redis_conn.set( name=f"user:{request.user.id}:qa_metrics:predictions_table", value=dumps(list(predictions.T.to_dict().values())), ex=60 * 30, ) predictions = list( paginate_bugs(df=predictions, offset=offset, limit=limit) .T.to_dict() .values() ) return Response(predictions)
def test_calculate_ttr_percentage(self): predictions_table = get_predictions_table(self.settings, None, None, None) ttr_percentage = calculate_ttr_percentage( predictions_table["Time to Resolve"], self.training_parameters["Time to Resolve"], ) assert ttr_percentage == { "3": 100, "0-1.0": 0, "0.999-1.0": 0, "2.0-19.0": 0, "20.0-127.0": 0, ">127.0": 0, }
def post(self, request): user = request.user filters = request.data.get("filters", []) offset = DEFAULT_OFFSET limit = DEFAULT_LIMIT check_training_files(user) check_predictions() archive_path = get_archive_path(user) training_parameters = read_from_archive(archive_path, TRAINING_PARAMETERS_FILENAME) predictions_table_settings = get_predictions_table_settings(user) predictions = get_predictions_table(predictions_table_settings, filters, None, None) if predictions.empty: return Response({}) prediction_table = paginate_bugs(predictions, offset, limit) areas_of_testing_percentage = calculate_aot_percentage( predictions["Area of Testing"]) priority_percentage = calculate_priority_percentage( predictions["Priority"], training_parameters["Priority"]) ttr_percentage = calculate_ttr_percentage( predictions["Time to Resolve"], training_parameters["Time to Resolve"], ) resolution_percentage = calculate_resolution_percentage( predictions, training_parameters["Resolution"]) result = { "predictions_table": prediction_table.T.to_dict().values(), "prediction_table_rows_count": len(predictions), "areas_of_testing_chart": areas_of_testing_percentage, "priority_chart": priority_percentage, "ttr_chart": ttr_percentage, "resolution_chart": resolution_percentage, } return Response(result)
def get(self, request): user = request.user offset = DEFAULT_OFFSET limit = DEFAULT_LIMIT cached_predictions = redis_conn.get( f"user:{request.user.id}:qa_metrics:predictions_page" ) cached_filters = redis_conn.get( f"user:{request.user.id}:qa_metrics:filters" ) filters = ( loads(cached_filters) if cached_filters else [UNRESOLVED_BUGS_FILTER] ) if cached_predictions: predictions = loads(cached_predictions) else: check_training_files(user) training_parameters = get_training_parameters(request.user) predictions_table_fields = get_predictions_table_fields(user) issues = calculate_issues_predictions( user, predictions_table_fields, filters ) if issues.empty: return Response({}) predictions_table_fields.remove("Description_tr") predictions_table_fields.remove("Key") predictions_table = get_predictions_table( issues=issues, fields_settings=predictions_table_fields, offset=None, limit=None, ) prediction_table = paginate_bugs(predictions_table, offset, limit) areas_of_testing_percentage = calculate_aot_percentage( predictions_table["Area of Testing"] ) priority_percentage = calculate_priority_percentage( predictions_table["Priority"], training_parameters["Priority"] ) ttr_percentage = calculate_ttr_percentage( predictions_table["Time to Resolve"], training_parameters["Time to Resolve"], ) resolution_percentage = calculate_resolution_percentage( predictions_table, training_parameters["Resolution"] ) predictions = { "predictions_table": list( prediction_table.T.to_dict().values() ), "prediction_table_rows_count": len(predictions_table), "areas_of_testing_chart": areas_of_testing_percentage, "priority_chart": priority_percentage, "ttr_chart": ttr_percentage, "resolution_chart": resolution_percentage, } redis_conn.set( name=f"user:{request.user.id}:qa_metrics:predictions_page", value=dumps(predictions), ex=60 * 30, ) redis_conn.set( name=f"user:{request.user.id}:qa_metrics:filters", value=dumps(filters), ex=60 * 30, ) redis_conn.set( name=f"user:{request.user.id}:qa_metrics:predictions_table", value=dumps(list(predictions_table.T.to_dict().values())), ex=60 * 30, ) return Response(predictions)
def test_predictions_pagination(self): predictions_table = get_predictions_table(self.settings, None, None, None) predictions_table = paginate_bugs(predictions_table, 0, 20) assert len(predictions_table) == 20
def test_get_predictions_table(self): predictions_table = get_predictions_table(self.settings, None, None, None) assert len(predictions_table) == 100
def test_get_predictions_table_with_limit(self): predictions_table = get_predictions_table( self.issues, self.predictions_table_fields, 0, 20 ) assert len(predictions_table) == 20
def test_get_predictions_table_without_limit(self): predictions_table = get_predictions_table( self.issues, self.predictions_table_fields, None, None ) assert len(predictions_table) == 100