def test_default_configuration(self): for i in range(10): predictions, targets = \ _test_regressor(KNearestNeighborsRegressor) self.assertAlmostEqual(0.068600456340847438, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = \ _test_regressor(AdaboostRegressor, sparse=True, dataset='boston') self.assertAlmostEqual(0.2039634989252479, sklearn.metrics.r2_score(targets, predictions))
def test_default_boston(self): for i in range(2): predictions, targets, n_calls = \ _test_regressor(dataset="boston", Regressor=self.module) if "default_boston_le_ge" in self.res: # Special treatment for Gaussian Process Regression self.assertLessEqual( sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), self.res["default_boston_le_ge"][0]) self.assertGreaterEqual( sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), self.res["default_boston_le_ge"][1]) else: score = sklearn.metrics.r2_score(targets, predictions) fixture = self.res["default_boston"] if score < -1e10: score = np.log(-score) fixture = np.log(-fixture) self.assertAlmostEqual( fixture, score, places=self.res.get("default_boston_places", 7), ) if self.res.get("boston_n_calls"): self.assertEqual(self.res["boston_n_calls"], n_calls)
def test_default_configuration(self): for i in range(10): predictions, targets = \ _test_regressor(ARDRegression, dataset='boston') self.assertAlmostEqual(0.70316694175513961, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration_sparse_data(self): for i in range(10): predictions, targets = \ _test_regressor(KNearestNeighborsRegressor, sparse=True) self.assertAlmostEqual(-0.16321841460809972, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(LibLinear_SVR, dataset='boston') self.assertAlmostEqual(0.54372712745256768, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): configuration_space = RidgeRegression.get_hyperparameter_search_space() default = configuration_space.get_default_configuration() configuration_space_preproc = RandomKitchenSinks.get_hyperparameter_search_space() default_preproc = configuration_space_preproc.get_default_configuration() for i in range(10): # This should be a bad results predictions, targets = _test_regressor(RidgeRegression,) self.assertAlmostEqual(0.32614416980439365, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions)) # This should be much more better X_train, Y_train, X_test, Y_test = get_dataset(dataset='diabetes', make_sparse=False) preprocessor = RandomKitchenSinks( random_state=1, **{hp_name: default_preproc[hp_name] for hp_name in default_preproc if default_preproc[hp_name] is not None}) transformer = preprocessor.fit(X_train, Y_train) X_train_transformed = transformer.transform(X_train) X_test_transformed = transformer.transform(X_test) regressor = RidgeRegression( random_state=1, **{hp_name: default[hp_name] for hp_name in default if default[hp_name] is not None}) predictor = regressor.fit(X_train_transformed, Y_train) predictions = predictor.predict(X_test_transformed) self.assertAlmostEqual(0.37183512452087852, sklearn.metrics.r2_score(y_true=Y_test, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = \ _test_regressor(AdaboostRegressor, dataset='boston') self.assertAlmostEqual(0.59461560848921158, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): # Float32 leads to numeric instabilities predictions, targets = _test_regressor(GaussianProcess, dataset='boston') self.assertAlmostEqual(0.83362335184173442, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), places=2)
def test_default_configuration(self): # Only twice to reduce the number of warning printed to the command line for i in range(2): # Float32 leads to numeric instabilities predictions, targets = _test_regressor(GaussianProcess, dataset='boston') self.assertAlmostEqual(0.83362335184173442, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), places=2)
def test_default_configuration(self): for i in range(10): # Float32 leads to numeric instabilities predictions, targets = _test_regressor(GaussianProcess, dataset='boston') self.assertAlmostEqual(0.83362335184173442, sklearn.metrics.r2_score( y_true=targets, y_pred=predictions), places=2)
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(LibLinear_SVR, dataset='boston') # Lenient test because of travis-ci which gets quite different # results here! self.assertAlmostEqual(0.68, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), places=2)
def test_default_diabetes(self): for i in range(2): predictions, targets = \ _test_regressor(dataset="diabetes", Regressor=self.module) self.assertAlmostEqual( self.res["default_diabetes"], sklearn.metrics.r2_score(targets, predictions), places=self.res.get("default_diabetes_places", 7))
def test_default_configuration(self): for i in range(2): predictions, targets = _test_regressor(LibLinear_SVR, dataset='boston') # Lenient test because of travis-ci which gets quite different # results here! self.assertAlmostEqual(0.68, sklearn.metrics.r2_score( y_true=targets, y_pred=predictions), places=2)
def test_default_configuration(self): # Only twice to reduce the number of warning printed to the command line for i in range(2): # Float32 leads to numeric instabilities predictions, targets = _test_regressor(GaussianProcess, dataset='boston') self.assertAlmostEqual(0.83362335184173442, sklearn.metrics.r2_score( y_true=targets, y_pred=predictions), places=2)
def test_default_boston_sparse(self): if SPARSE not in self.module.get_properties()["input"]: return for i in range(2): predictions, targets, _ = \ _test_regressor(dataset="boston", Regressor=self.module, sparse=True) self.assertAlmostEqual( self.res["default_boston_sparse"], sklearn.metrics.r2_score(targets, predictions), places=self.res.get("default_boston_sparse_places", 7))
def test_default_configuration(self): # Only twice to reduce the number of warning printed to the command line for i in range(2): # Float32 leads to numeric instabilities predictions, targets = _test_regressor(GaussianProcess, dataset='boston') # My machine: 0.574913739659292 # travis-ci: 0.49562471963524557 self.assertLessEqual( sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), 0.6) self.assertGreaterEqual( sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), 0.4)
def test_default_boston_sparse(self): if SPARSE not in self.module.get_properties()["input"]: return for i in range(2): predictions, targets, _ = \ _test_regressor(dataset="boston", Regressor=self.module, sparse=True) self.assertAlmostEqual(self.res["default_boston_sparse"], sklearn.metrics.r2_score(targets, predictions), places=self.res.get( "default_boston_sparse_places", 7))
def test_default_diabetes(self): for i in range(2): predictions, targets, n_calls = \ _test_regressor(dataset="diabetes", Regressor=self.module) self.assertAlmostEqual(self.res["default_diabetes"], sklearn.metrics.r2_score(targets, predictions), places=self.res.get( "default_diabetes_places", 7)) if self.res.get("diabetes_n_calls"): self.assertEqual(self.res["diabetes_n_calls"], n_calls)
def test_default_diabetes(self): if self.__class__ == BaseRegressionComponentTest: return for i in range(2): predictions, targets, n_calls = \ _test_regressor(dataset="diabetes", Regressor=self.module) self.assertAlmostEqual( self.res["default_diabetes"], sklearn.metrics.r2_score(targets, predictions), places=self.res.get("default_diabetes_places", 7)) if self.res.get("diabetes_n_calls"): self.assertEqual(self.res["diabetes_n_calls"], n_calls)
def test_default_configuration(self): configuration_space = RidgeRegression.get_hyperparameter_search_space() default = configuration_space.get_default_configuration() configuration_space_preproc = RandomKitchenSinks.get_hyperparameter_search_space( ) default_preproc = configuration_space_preproc.get_default_configuration( ) for i in range(2): # This should be a bad results predictions, targets = _test_regressor(RidgeRegression, ) self.assertAlmostEqual( 0.32614416980439365, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions)) # This should be much more better X_train, Y_train, X_test, Y_test = get_dataset(dataset='diabetes', make_sparse=False) preprocessor = RandomKitchenSinks( random_state=1, **{ hp_name: default_preproc[hp_name] for hp_name in default_preproc if default_preproc[hp_name] is not None }) transformer = preprocessor.fit(X_train, Y_train) X_train_transformed = transformer.transform(X_train) X_test_transformed = transformer.transform(X_test) regressor = RidgeRegression(random_state=1, **{ hp_name: default[hp_name] for hp_name in default if default[hp_name] is not None }) predictor = regressor.fit(X_train_transformed, Y_train) predictions = predictor.predict(X_test_transformed) self.assertAlmostEqual( 0.37183512452087852, sklearn.metrics.r2_score(y_true=Y_test, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(2): predictions, targets = _test_regressor(XGradientBoostingRegressor, sparse=True) self.assertAlmostEqual(0.20743694821393754, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions)) #def test_default_configuration_iterative_fit(self): # for i in range(10): # predictions, targets = \ # _test_regressor_iterative_fit(XGradientBoostingRegressor) # self.assertAlmostEqual(0.40965687834764064, # sklearn.metrics.r2_score(y_true=targets, y_pred=predictions)) #def test_default_configuration_iterative_fit_sparse(self): # for i in range(10): # predictions, targets = \ # _test_regressor_iterative_fit(XGradientBoostingRegressor, # sparse=True) # self.assertAlmostEqual(0.40965687834764064, # sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(XGradientBoostingRegressor, sparse=True) self.assertAlmostEqual(0.20743694821393754, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions)) #def test_default_configuration_iterative_fit(self): # for i in range(10): # predictions, targets = \ # _test_regressor_iterative_fit(XGradientBoostingRegressor) # self.assertAlmostEqual(0.40965687834764064, # sklearn.metrics.r2_score(y_true=targets, y_pred=predictions)) #def test_default_configuration_iterative_fit_sparse(self): # for i in range(10): # predictions, targets = \ # _test_regressor_iterative_fit(XGradientBoostingRegressor, # sparse=True) # self.assertAlmostEqual(0.40965687834764064, # sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_boston(self): for i in range(2): predictions, targets = \ _test_regressor(dataset="boston", Regressor=self.module) if "default_boston_le_ge" in self.res: # Special treatment for Gaussian Process Regression self.assertLessEqual( sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), self.res["default_boston_le_ge"][0]) self.assertGreaterEqual( sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), self.res["default_boston_le_ge"][1]) else: self.assertAlmostEqual( self.res["default_boston"], sklearn.metrics.r2_score(targets, predictions), places=self.res.get("default_boston_places", 7))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(DecisionTree) self.assertAlmostEqual(0.14886750572325669, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(SGD) self.assertAlmostEqual(0.078043497701660636, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(DecisionTree, sparse=True) self.assertAlmostEqual(0.021778487309118133, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets =_test_regressor(LinReg) R2score = sklearn.metrics.r2_score(y_true=targets, y_pred=predictions) print(R2score) self.assertAlmostEqual(0.1212, R2score, places=1)
def test_default_configuration_iterative_fit(self): for i in range(10): predictions, targets = _test_regressor(GradientBoosting) self.assertAlmostEqual( 0.35273007696557712, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(DecisionTree, sparse=True) self.assertAlmostEqual(-0.020818312539637507, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(LibSVM_SVR) self.assertAlmostEqual(0.12849591861430087, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(DecisionTree, sparse=True) self.assertAlmostEqual( 0.021778487309118133, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(DecisionTree,) self.assertAlmostEqual(0.1564592449511697, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(RandomForest, sparse=True) self.assertAlmostEqual(0.24225685933770469, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(SGD) self.assertAlmostEqual(0.066576586105546731, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(2): predictions, targets = _test_regressor(XGradientBoostingRegressor) self.assertAlmostEqual(0.34009199992306871, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(SGD) self.assertAlmostEqual(0.092460881802630235, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(2): predictions, targets = _test_regressor(DecisionTree,) self.assertAlmostEqual(0.1564592449511697, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(RegDeepNet) R2score = sklearn.metrics.r2_score(y_true=targets, y_pred=predictions) print(R2score) self.assertAlmostEqual(0.43, R2score)
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(XGradientBoostingRegressor) self.assertAlmostEqual(0.34009199992306871, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(ExtraTreesRegressor) self.assertAlmostEqual(0.4269923975466271, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(ExtraTreesRegressor, sparse=True) self.assertAlmostEqual(0.26287621251507987, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(RandomForest) self.assertAlmostEqual(0.41795829411621988, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(LibSVM_SVR, sparse=True) self.assertAlmostEqual(0.0098877566961463881, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(DecisionTree, sparse=True) self.assertAlmostEqual( -0.020818312539637507, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(GradientBoosting) self.assertAlmostEqual(0.35273007696557712, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(DecisionTree, ) self.assertAlmostEqual( 0.14886750572325669, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(SGD) self.assertAlmostEqual( 0.092460881802630235, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))