def test_advanced_model_benchmarks(self): """Tests advanced models are added with flag""" model_benchmarks = benchmark.ModelBenchmarks(advanced_models=False) self.assertTrue( all(not model.advanced for model in model_benchmarks.models)) all_model_benchmarks = benchmark.ModelBenchmarks(advanced_models=True) self.assertGreater(len(all_model_benchmarks.models), len(model_benchmarks.models))
def test_model_benchmarks_data(self): """Sanity check length and columns of model benchmarks""" model_benchmarks = benchmark.ModelBenchmarks() for model in model_benchmarks.models: model.epochs = 2 model_benchmarks.run() expected_n_rows = 2 * len(model_benchmarks.models) n_rows = model_benchmarks.df.shape[0] self.assertEqual( n_rows, expected_n_rows, msg= f"model benchmarks have {n_rows} rows. Expected {expected_n_rows}", ) self.assertTrue( all(model_benchmarks.df["seconds per epoch"] > 0), msg="seconds per epoch should be greater than 0", ) self.assertTrue( all(model_benchmarks.df["inference time"] > 0), msg="inference time should be greater than 0", ) self.assertTrue( all(model_benchmarks.df["accuracy"] > 0) and all(model_benchmarks.df["accuracy"] < 1.0), msg="accuracy should be between 0 and 1.0", )
def test_model_benchmarks_run(self): """Ensure model benchmarks run without an exception""" model_benchmarks = benchmark.ModelBenchmarks() for model in model_benchmarks.models: model.epochs = 2 model_benchmarks.run()