class TestMLModel(TestCase): def setUp(self): pickle_path = os.path.join(BASE_DIR, "server", "tests", "fixtures", "betting_model.pkl") self.ml_model = MLModel(name="my_estimator", filepath=pickle_path) def test_validation(self): with self.subTest("when the data_class_path isn't a module path"): self.ml_model.data_class_path = "some/bad/path" with self.assertRaises(ValidationError): self.ml_model.full_clean() with self.subTest("when the model name already exists"): self.ml_model.data_class_path = "some.perfectly.fine.path" self.ml_model.save() duplicate_model = MLModel(name="my_estimator") with self.assertRaises(ValidationError): duplicate_model.full_clean() def test_load_estimator(self): loaded_model = self.ml_model.load_estimator() self.assertIsInstance(loaded_model, BaseEstimator)
def test_validation(self): with self.subTest("when the model name already exists"): self.ml_model.save() duplicate_model = MLModel(name="test_estimator") with self.assertRaisesMessage( ValidationError, "{'name': ['Ml model with this Name already exists.']}"): duplicate_model.full_clean()
def __build_ml_model(estimator: ml_model.MLModel, data_class: Type[ml_model.MLModelData]) -> MLModel: ml_model_record = MLModel( name=estimator.name, data_class_path=data_class.class_path(), filepath=estimator.pickle_filepath(), ) ml_model_record.full_clean() return ml_model_record
def test_validation(self): with self.subTest("when the data_class_path isn't a module path"): self.ml_model.data_class_path = "some/bad/path" with self.assertRaises(ValidationError): self.ml_model.full_clean() with self.subTest("when the model name already exists"): self.ml_model.data_class_path = "some.perfectly.fine.path" self.ml_model.save() duplicate_model = MLModel(name="my_estimator") with self.assertRaises(ValidationError): duplicate_model.full_clean()
def test_fetch_season_performance_chart_parameters(self): expected_years = list( {match.start_date_time.year for match in self.matches}) executed = self.client.execute(""" query QueryType { fetchSeasonPerformanceChartParameters { availableSeasons availableMlModels { name predictionSeasons } } } """) data = executed["data"]["fetchSeasonPerformanceChartParameters"] self.assertEqual(expected_years, data["availableSeasons"]) db_ml_model_names = [model.name for model in self.ml_models] query_ml_model_names = [ model["name"] for model in data["availableMlModels"] ] self.assertEqual(sorted(db_ml_model_names), sorted(query_ml_model_names)) for model in data["availableMlModels"]: prediction_seasons = ( MLModel.objects.prefetch_related("prediction_set").get( name=model["name"]).prediction_set.distinct( "match__start_date_time__year").values_list( "match__start_date_time__year", flat=True)) self.assertEqual(set(model["predictionSeasons"]), set(prediction_seasons)) with self.subTest("with an MLModel without any predictions"): predictionless_ml_model = MLModel(name="no_predictions") predictionless_ml_model.save() data = executed["data"]["fetchSeasonPerformanceChartParameters"] query_ml_model_names = [ model["name"] for model in data["availableMlModels"] ] self.assertNotIn(predictionless_ml_model.name, query_ml_model_names)
def setUp(self): self.maxDiff = None self.client = Client(schema) home_team = Team(name="Richmond") home_team.save() away_team = Team(name="Melbourne") away_team.save() match_datetime = timezone.make_aware(datetime(2018, 5, 5)) new_match = Match(start_date_time=match_datetime, round_number=5) new_match.save() match_datetime = timezone.make_aware(datetime(2014, 5, 5)) old_match = Match(start_date_time=match_datetime, round_number=7) old_match.save() (TeamMatch(team=home_team, match=new_match, at_home=True, score=150).save()) (TeamMatch(team=away_team, match=new_match, at_home=False, score=100).save()) (TeamMatch(team=home_team, match=old_match, at_home=True, score=150).save()) (TeamMatch(team=away_team, match=old_match, at_home=False, score=100).save()) ml_model = MLModel(name="test_model") ml_model.save() new_prediction = Prediction( match=new_match, ml_model=ml_model, predicted_winner=home_team, predicted_margin=50, ) new_prediction.save() old_prediction = Prediction( match=old_match, ml_model=ml_model, predicted_winner=away_team, predicted_margin=50, ) old_prediction.save()
def test_unique_competition_prediction_type(self): """ Test validation rule for having only unique prediction types for competitions. We run this in its own test method, because raising DB-level errors as part of a subtest tends to break things, because Django wraps each test method in an atomic transaction. """ self.ml_model.used_in_competitions = True self.ml_model.save() duplicate_model = MLModel( name="another_estimator", used_in_competitions=True, prediction_type=self.ml_model.prediction_type, ) with self.assertRaisesMessage( utils.IntegrityError, "duplicate key value violates unique constraint " '"unique_prediction_type_for_competitions"\nDETAIL: ' "Key (prediction_type)=(Margin) already exists.\n", ): with transaction.atomic(): duplicate_model.full_clean() duplicate_model.save()
def test_one_principal_model(self): """ Test validation rule for having only one principal model. We run this in its own test method, because raising DB-level errors as part of a subtest tends to break things, because Django wraps each test method in an atomic transaction. """ self.ml_model.is_principal = True self.ml_model.used_in_competitions = True self.ml_model.save() duplicate_model = MLModel( name="say_hello_to_the_new_boss", is_principal=True, used_in_competitions=True, prediction_type="Win Probability", ) with self.assertRaisesMessage( utils.IntegrityError, 'duplicate key value violates unique constraint "one_principal_model"\n' "DETAIL: Key (is_principal)=(t) already exists.\n", ): duplicate_model.full_clean() duplicate_model.save()
def setUp(self): tomorrow = datetime.now() + timedelta(days=1) year = tomorrow.year team_names = TEAM_NAMES[:] # Mock footywire fixture data self.fixture_data = [{ "date": tomorrow, "season": year, "round": 1, "round_label": "Round 1", "crows": 1234, "home_team": team_names.pop(), "away_team": team_names.pop(), "home_score": 50, "away_score": 100, "venue": FAKE.city(), } for idx in range(ROW_COUNT)] footywire = FootywireDataReader() footywire.get_fixture = Mock( return_value=pd.DataFrame(self.fixture_data)) # Mock bulk_create to make assertions on calls pred_bulk_create = copy.copy(Prediction.objects.bulk_create) Prediction.objects.bulk_create = Mock( side_effect=self.__pred_bulk_create(pred_bulk_create)) # Save records in DB for match_data in self.fixture_data: Team(name=match_data["home_team"]).save() Team(name=match_data["away_team"]).save() betting_model = BettingModel(name="betting_data") pickle_filepath = os.path.abspath( os.path.join(BASE_DIR, "server", "tests", "fixtures", "betting_model.pkl")) MLModel( name=betting_model.name, description="Betting data model", filepath=pickle_filepath, data_class_path=BettingModelData.class_path(), ).save() self.tip_command = tip.Command(data_reader=footywire)
def __build_ml_model(ml_model: MlModel) -> MLModel: ml_model_record = MLModel(name=ml_model["name"], filepath=ml_model["filepath"]) ml_model_record.full_clean() return ml_model_record
def setUp(self): pickle_path = os.path.join(BASE_DIR, "server", "tests", "fixtures", "betting_model.pkl") self.ml_model = MLModel(name="my_estimator", filepath=pickle_path)
def setUp(self): self.ml_model = MLModel( name="test_estimator", filepath="path/to/test_estimator.pkl" )
def setUp(self): self.ml_model = MLModel(name="test_estimator")
class TestMLModel(TestCase): def setUp(self): self.ml_model = MLModel(name="test_estimator") def test_validation(self): with self.subTest("when the model name already exists"): self.ml_model.save() duplicate_model = MLModel(name="test_estimator") with self.assertRaisesMessage( ValidationError, "{'name': ['Ml model with this Name already exists.']}"): duplicate_model.full_clean() def test_clean(self): with self.subTest("when a principal model isn't used in competitions"): self.ml_model.is_principal = True self.ml_model.used_in_competitions = False with self.assertRaisesMessage( ValidationError, "A principal model must be used for competitions."): self.ml_model.full_clean() def test_one_principal_model(self): """ Test validation rule for having only one principal model. We run this in its own test method, because raising DB-level errors as part of a subtest tends to break things, because Django wraps each test method in an atomic transaction. """ self.ml_model.is_principal = True self.ml_model.used_in_competitions = True self.ml_model.save() duplicate_model = MLModel( name="say_hello_to_the_new_boss", is_principal=True, used_in_competitions=True, prediction_type="Win Probability", ) with self.assertRaisesMessage( utils.IntegrityError, 'duplicate key value violates unique constraint "one_principal_model"\n' "DETAIL: Key (is_principal)=(t) already exists.\n", ): duplicate_model.full_clean() duplicate_model.save() def test_unique_competition_prediction_type(self): """ Test validation rule for having only unique prediction types for competitions. We run this in its own test method, because raising DB-level errors as part of a subtest tends to break things, because Django wraps each test method in an atomic transaction. """ self.ml_model.used_in_competitions = True self.ml_model.save() duplicate_model = MLModel( name="another_estimator", used_in_competitions=True, prediction_type=self.ml_model.prediction_type, ) with self.assertRaisesMessage( utils.IntegrityError, "duplicate key value violates unique constraint " '"unique_prediction_type_for_competitions"\nDETAIL: ' "Key (prediction_type)=(Margin) already exists.\n", ): with transaction.atomic(): duplicate_model.full_clean() duplicate_model.save()