def test_superior_models(self): adj_models = self.models - linspace(-0.4, 0.4, self.k) stepm = StepM(self.benchmark, adj_models, reps=120) stepm.compute() superior_models = stepm.superior_models spa = SPA(self.benchmark, adj_models, reps=120) spa.compute() spa.pvalues spa.critical_values(0.05) spa.better_models(0.05) adj_models = self.models_df - linspace(-3.0, 3.0, self.k) stepm = StepM(self.benchmark_series, adj_models, reps=120) stepm.compute() superior_models = stepm.superior_models
def test_errors(self): spa = SPA(self.benchmark, self.models, reps=100) with pytest.raises(RuntimeError): spa.pvalues with pytest.raises(RuntimeError): spa.critical_values() with pytest.raises(RuntimeError): spa.better_models() with pytest.raises(ValueError): SPA(self.benchmark, self.models, bootstrap='unknown') spa.compute() with pytest.raises(ValueError): spa.better_models(pvalue_type='unknown') with pytest.raises(ValueError): spa.critical_values(pvalue=1.0)