def test_superior_models(self): adj_models = self.models - linspace(-0.4, 0.4, self.k) stepm = StepM(self.benchmark, adj_models, reps=120) stepm.compute() superior_models = stepm.superior_models spa = SPA(self.benchmark, adj_models, reps=120) spa.compute() spa.pvalues spa.critical_values(0.05) spa.better_models(0.05) adj_models = self.models_df - linspace(-3.0, 3.0, self.k) stepm = StepM(self.benchmark_series, adj_models, reps=120) stepm.compute() superior_models = stepm.superior_models
def test_errors(self): spa = SPA(self.benchmark, self.models, reps=100) with pytest.raises(RuntimeError): spa.pvalues with pytest.raises(RuntimeError): spa.critical_values() with pytest.raises(RuntimeError): spa.better_models() with pytest.raises(ValueError): SPA(self.benchmark, self.models, bootstrap='unknown') spa.compute() with pytest.raises(ValueError): spa.better_models(pvalue_type='unknown') with pytest.raises(ValueError): spa.critical_values(pvalue=1.0)
def test_pvalues_and_critvals(self): spa = SPA(self.benchmark, self.models, reps=100) spa.compute() spa.seed(23456) simulated_vals = spa._simulated_vals max_stats = np.max(simulated_vals, 0) max_loss_diff = np.max(spa._loss_diff.mean(0), 0) pvalues = np.mean(max_loss_diff <= max_stats, 0) pvalues = pd.Series(pvalues, index=['lower', 'consistent', 'upper']) assert_series_equal(pvalues, spa.pvalues) crit_vals = np.percentile(max_stats, 90.0, axis=0) crit_vals = pd.Series(crit_vals, index=['lower', 'consistent', 'upper']) assert_series_equal(spa.critical_values(0.10), crit_vals)