def test_default_configuration_sparse_data(self): for i in range(10): predictions, targets = \ _test_regressor(KNearestNeighborsRegressor, sparse=True) self.assertAlmostEqual( -0.16321841460809972, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = \ _test_regressor(KNearestNeighborsRegressor) self.assertAlmostEqual( 0.068600456340847438, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = \ _test_regressor(AdaboostRegressor, dataset='boston') self.assertAlmostEqual( 0.11053868761882502, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): configuration_space = RidgeRegression.get_hyperparameter_search_space() default = configuration_space.get_default_configuration() configuration_space_preproc = RandomKitchenSinks.get_hyperparameter_search_space() default_preproc = configuration_space_preproc.get_default_configuration() for i in range(10): # This should be a bad results predictions, targets = _test_regressor(RidgeRegression,) self.assertAlmostEqual(0.32614416980439365, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions)) # This should be much more better X_train, Y_train, X_test, Y_test = get_dataset(dataset='diabetes', make_sparse=False) preprocessor = RandomKitchenSinks( random_state=1, **{hp_name: default_preproc[hp_name] for hp_name in default_preproc if default_preproc[hp_name] is not None}) transformer = preprocessor.fit(X_train, Y_train) X_train_transformed = transformer.transform(X_train) X_test_transformed = transformer.transform(X_test) regressor = RidgeRegression( random_state=1, **{hp_name: default[hp_name] for hp_name in default if default[hp_name] is not None}) predictor = regressor.fit(X_train_transformed, Y_train) predictions = predictor.predict(X_test_transformed) self.assertAlmostEqual(0.37183512452087852, sklearn.metrics.r2_score(y_true=Y_test, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(RandomForest) self.assertAlmostEqual( 0.41224692924630502, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(LibLinear_SVR, dataset='boston') self.assertAlmostEqual(0.54372712745256768, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(GradientBoosting) self.assertAlmostEqual( 0.35273007696557712, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = \ _test_regressor(KNearestNeighborsRegressor) self.assertAlmostEqual(0.068600456340847438, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = \ _test_regressor(AdaboostRegressor, sparse=True, dataset='boston') self.assertAlmostEqual(-0.077540100211211049, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = \ _test_regressor(AdaboostRegressor, dataset='boston') self.assertAlmostEqual(0.11053868761882502, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(LibLinear_SVR, dataset='boston') self.assertAlmostEqual( 0.54372712745256768, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = \ _test_regressor(ExtraTreesRegressor, sparse=True) self.assertAlmostEqual(0.26287621251507987, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = \ _test_regressor(ExtraTreesRegressor) self.assertAlmostEqual(0.4269923975466271, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration_sparse_data(self): for i in range(10): predictions, targets = \ _test_regressor(KNearestNeighborsRegressor, sparse=True) self.assertAlmostEqual(-0.16321841460809972, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = \ _test_regressor(AdaboostRegressor, sparse=True, dataset='boston') self.assertAlmostEqual( -0.077540100211211049, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = \ _test_regressor(ExtraTreesRegressor) self.assertAlmostEqual( 0.4269923975466271, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = \ _test_regressor(ExtraTreesRegressor, sparse=True) self.assertAlmostEqual( 0.26287621251507987, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): # Float32 leads to numeric instabilities predictions, targets = _test_regressor(GaussianProcess, dataset='boston') self.assertAlmostEqual(0.83362335184173442, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions), places=2)
def test_default_configuration(self): for i in range(10): # Float32 leads to numeric instabilities predictions, targets = _test_regressor(GaussianProcess, dataset='boston') self.assertAlmostEqual(0.83362335184173442, sklearn.metrics.r2_score( y_true=targets, y_pred=predictions), places=2)
def test_default_configuration(self): configuration_space = RidgeRegression.get_hyperparameter_search_space() default = configuration_space.get_default_configuration() configuration_space_preproc = RandomKitchenSinks.get_hyperparameter_search_space( ) default_preproc = configuration_space_preproc.get_default_configuration( ) for i in range(10): # This should be a bad results predictions, targets = _test_regressor(RidgeRegression, ) self.assertAlmostEqual( 0.32614416980439365, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions)) # This should be much more better X_train, Y_train, X_test, Y_test = get_dataset(dataset='diabetes', make_sparse=False) preprocessor = RandomKitchenSinks( random_state=1, **{ hp_name: default_preproc[hp_name] for hp_name in default_preproc if default_preproc[hp_name] is not None }) transformer = preprocessor.fit(X_train, Y_train) X_train_transformed = transformer.transform(X_train) X_test_transformed = transformer.transform(X_test) regressor = RidgeRegression(random_state=1, **{ hp_name: default[hp_name] for hp_name in default if default[hp_name] is not None }) predictor = regressor.fit(X_train_transformed, Y_train) predictions = predictor.predict(X_test_transformed) self.assertAlmostEqual( 0.37183512452087852, sklearn.metrics.r2_score(y_true=Y_test, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(DecisionTree, sparse=True) self.assertAlmostEqual(0.021778487309118133, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration_digits(self): for i in range(10): predictions, targets = _test_regressor(SGD, dataset='boston') self.assertAlmostEqual(-2.9165866511775519e+31, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(SGD) self.assertAlmostEqual(0.092460881802630235, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(LibSVM_SVR, sparse=True) self.assertAlmostEqual(0.0098877566961463881, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(LibSVM_SVR) self.assertAlmostEqual(0.12849591861430087, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(RandomForest, sparse=True) self.assertAlmostEqual( 0.24117530425422551, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_digits(self): for i in range(10): predictions, targets = _test_regressor(SGD, dataset='boston') self.assertAlmostEqual( -2.9165866511775519e+31, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(GradientBoosting) self.assertAlmostEqual(0.35273007696557712, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(SGD) self.assertAlmostEqual( 0.092460881802630235, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(RandomForest, sparse=True) self.assertAlmostEqual(0.24117530425422551, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(RandomForest) self.assertAlmostEqual(0.41224692924630502, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(DecisionTree,) self.assertAlmostEqual(0.14886750572325669, sklearn.metrics.r2_score(targets, predictions))
def test_default_configuration(self): for i in range(10): predictions, targets = _test_regressor(LibSVM_SVR) self.assertAlmostEqual( 0.12849591861430087, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))
def test_default_configuration_sparse(self): for i in range(10): predictions, targets = _test_regressor(LibSVM_SVR, sparse=True) self.assertAlmostEqual( 0.0098877566961463881, sklearn.metrics.r2_score(y_true=targets, y_pred=predictions))