def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() penalty = cs.add_hyperparameter(Constant("penalty", "l1")) loss = cs.add_hyperparameter( CategoricalHyperparameter("loss", ["hinge", "squared_hinge"], default="squared_hinge")) dual = cs.add_hyperparameter(Constant("dual", "False")) # This is set ad-hoc tol = cs.add_hyperparameter( UniformFloatHyperparameter("tol", 1e-5, 1e-1, default=1e-4, log=True)) C = cs.add_hyperparameter( UniformFloatHyperparameter("C", 0.03125, 32768, log=True, default=1.0)) multi_class = cs.add_hyperparameter(Constant("multi_class", "ovr")) # These are set ad-hoc fit_intercept = cs.add_hyperparameter(Constant("fit_intercept", "True")) intercept_scaling = cs.add_hyperparameter( Constant("intercept_scaling", 1)) penalty_and_loss = ForbiddenAndConjunction( ForbiddenEqualsClause(penalty, "l1"), ForbiddenEqualsClause(loss, "hinge")) cs.add_forbidden_clause(penalty_and_loss) return cs
def get_hyperparameter_search_space(dataset_properties=None): loss = CategoricalHyperparameter( "loss", ["hinge", "log", "modified_huber", "squared_hinge", "perceptron"], default="hinge") penalty = CategoricalHyperparameter("penalty", ["l1", "l2", "elasticnet"], default="l2") alpha = UniformFloatHyperparameter("alpha", 10**-7, 10**-1, log=True, default=0.0001) l1_ratio = UniformFloatHyperparameter("l1_ratio", 0, 1, default=0.15) fit_intercept = UnParametrizedHyperparameter("fit_intercept", "True") n_iter = UniformIntegerHyperparameter("n_iter", 5, 1000, default=20) epsilon = UniformFloatHyperparameter("epsilon", 1e-5, 1e-1, default=1e-4, log=True) learning_rate = CategoricalHyperparameter( "learning_rate", ["optimal", "invscaling", "constant"], default="optimal") eta0 = UniformFloatHyperparameter("eta0", 10**-7, 0.1, default=0.01) power_t = UniformFloatHyperparameter("power_t", 1e-5, 1, default=0.5) # This does not allow for other resampling methods! class_weight = CategoricalHyperparameter("class_weight", ["None", "auto"], default="None") cs = ConfigurationSpace() cs.add_hyperparameter(loss) cs.add_hyperparameter(penalty) cs.add_hyperparameter(alpha) cs.add_hyperparameter(l1_ratio) cs.add_hyperparameter(fit_intercept) cs.add_hyperparameter(n_iter) cs.add_hyperparameter(epsilon) cs.add_hyperparameter(learning_rate) cs.add_hyperparameter(eta0) cs.add_hyperparameter(power_t) cs.add_hyperparameter(class_weight) # TODO add passive/aggressive here, although not properly documented? elasticnet = EqualsCondition(l1_ratio, penalty, "elasticnet") epsilon_condition = EqualsCondition(epsilon, loss, "modified_huber") # eta0 seems to be always active according to the source code; when # learning_rate is set to optimial, eta0 is the starting value: # https://github.com/scikit-learn/scikit-learn/blob/0.15.X/sklearn/linear_model/sgd_fast.pyx #eta0_and_inv = EqualsCondition(eta0, learning_rate, "invscaling") #eta0_and_constant = EqualsCondition(eta0, learning_rate, "constant") #eta0_condition = OrConjunction(eta0_and_inv, eta0_and_constant) power_t_condition = EqualsCondition(power_t, learning_rate, "invscaling") cs.add_condition(elasticnet) cs.add_condition(epsilon_condition) cs.add_condition(power_t_condition) return cs
def get_hyperparameter_search_space(dataset_properties=None): n_components = UniformIntegerHyperparameter("n_components", 10, 2000, default=100) kernel = CategoricalHyperparameter( 'kernel', ['poly', 'rbf', 'sigmoid', 'cosine'], 'rbf') degree = UniformIntegerHyperparameter('degree', 2, 5, 3) gamma = UniformFloatHyperparameter("gamma", 3.0517578125e-05, 8, log=True, default=1.0) coef0 = UniformFloatHyperparameter("coef0", -1, 1, default=0) cs = ConfigurationSpace() cs.add_hyperparameter(n_components) cs.add_hyperparameter(kernel) cs.add_hyperparameter(degree) cs.add_hyperparameter(gamma) cs.add_hyperparameter(coef0) degree_depends_on_poly = EqualsCondition(degree, kernel, "poly") coef0_condition = InCondition(coef0, kernel, ["poly", "sigmoid"]) gamma_condition = InCondition(gamma, kernel, ["poly", "rbf"]) cs.add_condition(degree_depends_on_poly) cs.add_condition(coef0_condition) cs.add_condition(gamma_condition) return cs
def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() C = cs.add_hyperparameter(UniformFloatHyperparameter( "C", 0.03125, 32768, log=True, default=1.0)) loss = cs.add_hyperparameter(CategoricalHyperparameter( "loss", ["epsilon_insensitive", "squared_epsilon_insensitive"], default="squared_epsilon_insensitive")) # Random Guess epsilon = cs.add_hyperparameter(UniformFloatHyperparameter( name="epsilon", lower=0.001, upper=1, default=0.1, log=True)) dual = cs.add_hyperparameter(Constant("dual", "False")) # These are set ad-hoc tol = cs.add_hyperparameter(UniformFloatHyperparameter( "tol", 1e-5, 1e-1, default=1e-4, log=True)) fit_intercept = cs.add_hyperparameter(Constant("fit_intercept", "True")) intercept_scaling = cs.add_hyperparameter(Constant( "intercept_scaling", 1)) dual_and_loss = ForbiddenAndConjunction( ForbiddenEqualsClause(dual, "False"), ForbiddenEqualsClause(loss, "epsilon_insensitive") ) cs.add_forbidden_clause(dual_and_loss) return cs
def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() alpha = cs.add_hyperparameter(UniformFloatHyperparameter( "alpha", 10 ** -5, 10., log=True, default=1.)) fit_intercept = cs.add_hyperparameter(UnParametrizedHyperparameter( "fit_intercept", "True")) tol = cs.add_hyperparameter(UniformFloatHyperparameter( "tol", 1e-5, 1e-1, default=1e-4, log=True)) return cs
def get_hyperparameter_search_space(dataset_properties=None): nugget = UniformFloatHyperparameter( name="nugget", lower=0.0001, upper=10, default=0.1, log=True) thetaL = UniformFloatHyperparameter( name="thetaL", lower=1e-6, upper=1e-3, default=1e-4, log=True) thetaU = UniformFloatHyperparameter( name="thetaU", lower=0.2, upper=10, default=1.0, log=True) cs = ConfigurationSpace() cs.add_hyperparameter(nugget) cs.add_hyperparameter(thetaL) cs.add_hyperparameter(thetaU) return cs
def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() shrinkage = cs.add_hyperparameter(CategoricalHyperparameter( "shrinkage", ["None", "auto", "manual"], default="None")) shrinkage_factor = cs.add_hyperparameter(UniformFloatHyperparameter( "shrinkage_factor", 0., 1., 0.5)) n_components = cs.add_hyperparameter(UniformIntegerHyperparameter( 'n_components', 1, 250, default=10)) tol = cs.add_hyperparameter(UniformFloatHyperparameter( "tol", 1e-5, 1e-1, default=1e-4, log=True)) cs.add_condition(EqualsCondition(shrinkage_factor, shrinkage, "manual")) return cs
def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() loss = cs.add_hyperparameter( CategoricalHyperparameter("loss", ["ls", "lad", "huber", "quantile"], default="ls")) learning_rate = cs.add_hyperparameter( UniformFloatHyperparameter(name="learning_rate", lower=0.0001, upper=1, default=0.1, log=True)) n_estimators = cs.add_hyperparameter(Constant("n_estimators", 100)) max_depth = cs.add_hyperparameter( UniformIntegerHyperparameter(name="max_depth", lower=1, upper=10, default=3)) min_samples_split = cs.add_hyperparameter( UniformIntegerHyperparameter(name="min_samples_split", lower=2, upper=20, default=2, log=False)) min_samples_leaf = cs.add_hyperparameter( UniformIntegerHyperparameter(name="min_samples_leaf", lower=1, upper=20, default=1, log=False)) min_weight_fraction_leaf = cs.add_hyperparameter( UnParametrizedHyperparameter("min_weight_fraction_leaf", 0.)) subsample = cs.add_hyperparameter( UniformFloatHyperparameter(name="subsample", lower=0.01, upper=1.0, default=1.0, log=False)) max_features = cs.add_hyperparameter( UniformFloatHyperparameter("max_features", 0.5, 5, default=1)) max_leaf_nodes = cs.add_hyperparameter( UnParametrizedHyperparameter(name="max_leaf_nodes", value="None")) alpha = cs.add_hyperparameter( UniformFloatHyperparameter("alpha", lower=0.75, upper=0.99, default=0.9)) cs.add_condition(InCondition(alpha, loss, ['huber', 'quantile'])) return cs
def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() # base_estimator = Constant(name="base_estimator", value="None") n_estimators = cs.add_hyperparameter( UniformIntegerHyperparameter(name="n_estimators", lower=50, upper=500, default=50, log=False)) learning_rate = cs.add_hyperparameter( UniformFloatHyperparameter(name="learning_rate", lower=0.0001, upper=2, default=0.1, log=True)) algorithm = cs.add_hyperparameter( CategoricalHyperparameter(name="algorithm", choices=["SAMME.R", "SAMME"], default="SAMME.R")) max_depth = cs.add_hyperparameter( UniformIntegerHyperparameter(name="max_depth", lower=1, upper=10, default=1, log=False)) return cs
def get_hyperparameter_search_space(dataset_properties=None): #n_estimators = UniformIntegerHyperparameter( # "n_estimators", 10, 100, default=10) n_estimators = Constant("n_estimators", 100) criterion = CategoricalHyperparameter("criterion", ["gini", "entropy"], default="gini") #max_features = UniformFloatHyperparameter( # "max_features", 0.01, 0.5, default=0.2) max_features = UniformFloatHyperparameter("max_features", 0.5, 5, default=1) max_depth = UnParametrizedHyperparameter("max_depth", "None") min_samples_split = UniformIntegerHyperparameter("min_samples_split", 2, 20, default=2) min_samples_leaf = UniformIntegerHyperparameter("min_samples_leaf", 1, 20, default=1) max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None") bootstrap = CategoricalHyperparameter("bootstrap", ["True", "False"], default="True") cs = ConfigurationSpace() cs.add_hyperparameter(n_estimators) cs.add_hyperparameter(criterion) cs.add_hyperparameter(max_features) cs.add_hyperparameter(max_depth) cs.add_hyperparameter(min_samples_split) cs.add_hyperparameter(min_samples_leaf) cs.add_hyperparameter(max_leaf_nodes) cs.add_hyperparameter(bootstrap) return cs
def get_hyperparameter_search_space(dataset_properties=None): criterion = Constant(name="criterion", value="mse") # Copied from classification/random_forest.py #n_estimators = UniformIntegerHyperparameter( # name="n_estimators", lower=10, upper=100, default=10, log=False) n_estimators = Constant("n_estimators", 100) max_features = UniformFloatHyperparameter( "max_features", 0.5, 5, default=1) max_depth = UnParametrizedHyperparameter("max_depth", "None") min_samples_split = UniformIntegerHyperparameter( name="min_samples_split", lower=2, upper=20, default=2, log=False) min_samples_leaf = UniformIntegerHyperparameter( name="min_samples_leaf", lower=1, upper=20, default=1, log=False) bootstrap = CategoricalHyperparameter( name="bootstrap", choices=["True", "False"], default="True") cs = ConfigurationSpace() cs.add_hyperparameter(n_estimators) cs.add_hyperparameter(max_features) cs.add_hyperparameter(max_depth) cs.add_hyperparameter(min_samples_split) cs.add_hyperparameter(min_samples_leaf) cs.add_hyperparameter(bootstrap) cs.add_hyperparameter(criterion) return cs
def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() n_estimators = cs.add_hyperparameter(Constant("n_estimators", 100)) criterion = cs.add_hyperparameter( CategoricalHyperparameter("criterion", ["gini", "entropy"], default="gini")) max_features = cs.add_hyperparameter( UniformFloatHyperparameter("max_features", 0.5, 5, default=1)) max_depth = cs.add_hyperparameter( UnParametrizedHyperparameter(name="max_depth", value="None")) min_samples_split = cs.add_hyperparameter( UniformIntegerHyperparameter("min_samples_split", 2, 20, default=2)) min_samples_leaf = cs.add_hyperparameter( UniformIntegerHyperparameter("min_samples_leaf", 1, 20, default=1)) min_weight_fraction_leaf = cs.add_hyperparameter( Constant('min_weight_fraction_leaf', 0.)) bootstrap = cs.add_hyperparameter( CategoricalHyperparameter("bootstrap", ["True", "False"], default="False")) return cs
def get_hyperparameter_search_space(dataset_properties=None): N = UniformIntegerHyperparameter("N", 5, 20, default=10) precond = UniformFloatHyperparameter("precond", 0, 0.5, default=0.1) cs = ConfigurationSpace() cs.add_hyperparameter(N) cs.add_hyperparameter(precond) return cs
def get_hyperparameter_search_space(dataset_properties=None): reg_param = UniformFloatHyperparameter('reg_param', 0.0, 10.0, default=0.5) cs = ConfigurationSpace() cs.add_hyperparameter(reg_param) return cs
def get_hyperparameter_search_space(dataset_properties=None): cs = ConstrainedFeedNet.get_hyperparameter_search_space() solver = Constant(name='solver', value='adam') cs.add_hyperparameter(solver) beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1, log=True, default=0.1) beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1, log=True, default=0.1) cs.add_hyperparameter(beta1) cs.add_hyperparameter(beta2) return cs
def _sample_UniformIntegerHyperparameter(self, uihp): ufhp = UniformFloatHyperparameter(uihp.name, uihp.lower - 0.4999, uihp.upper + 0.4999, log=uihp.log, q=uihp.q, default=uihp.default) ihp = self._sample_UniformFloatHyperparameter(ufhp) return uihp.instantiate(int(np.round(ihp.value, 0)))
def get_hyperparameter_search_space(dataset_properties=None): cs = ConstrainedFeedNet.get_hyperparameter_search_space() solver = Constant(name='solver', value='adadelta') cs.add_hyperparameter(solver) rho = UniformFloatHyperparameter('rho', 0.0, 1.0, default=0.95) cs.add_hyperparameter(rho) return cs
def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() loss = cs.add_hyperparameter(CategoricalHyperparameter("loss", ["squared_loss", "huber", "epsilon_insensitive", "squared_epsilon_insensitive"], default="squared_loss")) penalty = cs.add_hyperparameter(CategoricalHyperparameter( "penalty", ["l1", "l2", "elasticnet"], default="l2")) alpha = cs.add_hyperparameter(UniformFloatHyperparameter( "alpha", 10e-7, 1e-1, log=True, default=0.01)) l1_ratio = cs.add_hyperparameter(UniformFloatHyperparameter( "l1_ratio", 1e-9, 1., log=True, default=0.15)) fit_intercept = cs.add_hyperparameter(UnParametrizedHyperparameter( "fit_intercept", "True")) n_iter = cs.add_hyperparameter(UniformIntegerHyperparameter( "n_iter", 5, 1000, log=True, default=20)) epsilon = cs.add_hyperparameter(UniformFloatHyperparameter( "epsilon", 1e-5, 1e-1, default=1e-4, log=True)) learning_rate = cs.add_hyperparameter(CategoricalHyperparameter( "learning_rate", ["optimal", "invscaling", "constant"], default="optimal")) eta0 = cs.add_hyperparameter(UniformFloatHyperparameter( "eta0", 10 ** -7, 0.1, default=0.01)) power_t = cs.add_hyperparameter(UniformFloatHyperparameter( "power_t", 1e-5, 1, default=0.5)) average = cs.add_hyperparameter(CategoricalHyperparameter( "average", ["False", "True"], default="False")) # TODO add passive/aggressive here, although not properly documented? elasticnet = EqualsCondition(l1_ratio, penalty, "elasticnet") epsilon_condition = InCondition(epsilon, loss, ["huber", "epsilon_insensitive", "squared_epsilon_insensitive"]) # eta0 seems to be always active according to the source code; when # learning_rate is set to optimial, eta0 is the starting value: # https://github.com/scikit-learn/scikit-learn/blob/0.15.X/sklearn/linear_model/sgd_fast.pyx # eta0_and_inv = EqualsCondition(eta0, learning_rate, "invscaling") #eta0_and_constant = EqualsCondition(eta0, learning_rate, "constant") #eta0_condition = OrConjunction(eta0_and_inv, eta0_and_constant) power_t_condition = EqualsCondition(power_t, learning_rate, "invscaling") cs.add_condition(elasticnet) cs.add_condition(epsilon_condition) cs.add_condition(power_t_condition) return cs
def get_hyperparameter_search_space(dataset_properties=None): C = UniformFloatHyperparameter("C", 0.03125, 32768, log=True, default=1.0) # No linear kernel here, because we have liblinear kernel = CategoricalHyperparameter(name="kernel", choices=["rbf", "poly", "sigmoid"], default="rbf") degree = UniformIntegerHyperparameter("degree", 1, 5, default=3) gamma = UniformFloatHyperparameter("gamma", 3.0517578125e-05, 8, log=True, default=0.1) # TODO this is totally ad-hoc coef0 = UniformFloatHyperparameter("coef0", -1, 1, default=0) # probability is no hyperparameter, but an argument to the SVM algo shrinking = CategoricalHyperparameter("shrinking", ["True", "False"], default="True") tol = UniformFloatHyperparameter("tol", 1e-5, 1e-1, default=1e-4, log=True) # cache size is not a hyperparameter, but an argument to the program! max_iter = UnParametrizedHyperparameter("max_iter", -1) cs = ConfigurationSpace() cs.add_hyperparameter(C) cs.add_hyperparameter(kernel) cs.add_hyperparameter(degree) cs.add_hyperparameter(gamma) cs.add_hyperparameter(coef0) cs.add_hyperparameter(shrinking) cs.add_hyperparameter(tol) cs.add_hyperparameter(max_iter) degree_depends_on_poly = EqualsCondition(degree, kernel, "poly") coef0_condition = InCondition(coef0, kernel, ["poly", "sigmoid"]) cs.add_condition(degree_depends_on_poly) cs.add_condition(coef0_condition) return cs
def get_hyperparameter_search_space(dataset_properties=None): keep_variance = UniformFloatHyperparameter( "keep_variance", 0.5, 0.9999, default=0.9999) whiten = CategoricalHyperparameter( "whiten", ["False", "True"], default="False") cs = ConfigurationSpace() cs.add_hyperparameter(keep_variance) cs.add_hyperparameter(whiten) return cs
def get_hyperparameter_search_space(dataset_properties=None): gamma = UniformFloatHyperparameter( "gamma", 0.3, 2., default=1.0) n_components = UniformIntegerHyperparameter( "n_components", 50, 10000, default=100, log=True) cs = ConfigurationSpace() cs.add_hyperparameter(gamma) cs.add_hyperparameter(n_components) return cs
def get_hyperparameter_search_space(dataset_properties=None): if dataset_properties is not None and \ (dataset_properties.get("sparse") is True or dataset_properties.get("signed") is False): allow_chi2 = False else: allow_chi2 = True possible_kernels = ['poly', 'rbf', 'sigmoid', 'cosine'] if allow_chi2: possible_kernels.append("chi2") kernel = CategoricalHyperparameter('kernel', possible_kernels, 'rbf') degree = UniformIntegerHyperparameter('degree', 2, 5, 3) gamma = UniformFloatHyperparameter("gamma", 3.0517578125e-05, 8, log=True, default=0.1) coef0 = UniformFloatHyperparameter("coef0", -1, 1, default=0) n_components = UniformIntegerHyperparameter("n_components", 50, 10000, default=100, log=True) cs = ConfigurationSpace() cs.add_hyperparameter(kernel) cs.add_hyperparameter(degree) cs.add_hyperparameter(gamma) cs.add_hyperparameter(coef0) cs.add_hyperparameter(n_components) degree_depends_on_poly = EqualsCondition(degree, kernel, "poly") coef0_condition = InCondition(coef0, kernel, ["poly", "sigmoid"]) gamma_kernels = ["poly", "rbf", "sigmoid"] if allow_chi2: gamma_kernels.append("chi2") gamma_condition = InCondition(gamma, kernel, gamma_kernels) cs.add_condition(degree_depends_on_poly) cs.add_condition(coef0_condition) cs.add_condition(gamma_condition) return cs
def get_hyperparameter_search_space(dataset_properties=None): alpha = UniformFloatHyperparameter(name="alpha", lower=0.0001, upper=10, default=1.0, log=True) cs = ConfigurationSpace() cs.add_hyperparameter(alpha) return cs
def get_hyperparameter_search_space(dataset_properties=None): loss = CategoricalHyperparameter( name="loss", choices=["ls", "lad"], default='ls') #, "huber", "quantile"], default='ls') learning_rate = UniformFloatHyperparameter(name="learning_rate", lower=0.0001, upper=1, default=0.1, log=True) subsample = UniformFloatHyperparameter(name="subsample", lower=0.01, upper=1.0, default=1.0, log=False) n_estimators = Constant("n_estimators", 100) max_features = UniformFloatHyperparameter("max_features", 0.5, 5, default=1) max_depth = UniformIntegerHyperparameter(name="max_depth", lower=1, upper=10, default=3) min_samples_split = UniformIntegerHyperparameter( name="min_samples_split", lower=2, upper=20, default=2, log=False) min_samples_leaf = UniformIntegerHyperparameter( name="min_samples_leaf", lower=1, upper=20, default=1, log=False) cs = ConfigurationSpace() cs.add_hyperparameter(n_estimators) cs.add_hyperparameter(loss) cs.add_hyperparameter(learning_rate) cs.add_hyperparameter(max_features) cs.add_hyperparameter(max_depth) cs.add_hyperparameter(min_samples_split) cs.add_hyperparameter(min_samples_leaf) cs.add_hyperparameter(subsample) return cs
def get_hyperparameter_search_space(dataset_properties=None): percentile = UniformFloatHyperparameter( "percentile", lower=1, upper=99, default=50) score_func = UnParametrizedHyperparameter( name="score_func", value="f_regression") cs = ConfigurationSpace() cs.add_hyperparameter(percentile) cs.add_hyperparameter(score_func) return cs
def _lr_policy_configuration_space(cs, policy=None): if policy == 'inv': lr_policy = Constant(name='lr_policy', value='inv') gamma = UniformFloatHyperparameter(name="gamma", lower=1e-2, upper=1.0, log=True, default=1e-2) power = UniformFloatHyperparameter("power", 0.0, 1.0, default=0.5) cs.add_hyperparameter(lr_policy) cs.add_hyperparameter(gamma) cs.add_hyperparameter(power) elif policy == 'exp': lr_policy = Constant(name='lr_policy', value='exp') gamma = UniformFloatHyperparameter(name="gamma", lower=0.7, upper=1.0, default=0.79) cs.add_hyperparameter(lr_policy) cs.add_hyperparameter(gamma) elif policy == 'step': lr_policy = Constant(name='lr_policy', value='step') gamma = UniformFloatHyperparameter(name="gamma", lower=1e-2, upper=1.0, log=True, default=1e-2) epoch_step = CategoricalHyperparameter("epoch_step", [6, 8, 12], default=8) cs.add_hyperparameter(lr_policy) cs.add_hyperparameter(gamma) cs.add_hyperparameter(epoch_step) else: lr_policy = Constant(name='lr_policy', value='fixed') cs.add_hyperparameter(lr_policy) return cs
def get_hyperparameter_search_space(dataset_properties=None): cs = ConfigurationSpace() loss = cs.add_hyperparameter(Constant("loss", "deviance")) learning_rate = cs.add_hyperparameter( UniformFloatHyperparameter(name="learning_rate", lower=0.0001, upper=1, default=0.1, log=True)) n_estimators = cs.add_hyperparameter(Constant("n_estimators", 100)) max_depth = cs.add_hyperparameter( UniformIntegerHyperparameter(name="max_depth", lower=1, upper=10, default=3)) min_samples_split = cs.add_hyperparameter( UniformIntegerHyperparameter(name="min_samples_split", lower=2, upper=20, default=2, log=False)) min_samples_leaf = cs.add_hyperparameter( UniformIntegerHyperparameter(name="min_samples_leaf", lower=1, upper=20, default=1, log=False)) min_weight_fraction_leaf = cs.add_hyperparameter( UnParametrizedHyperparameter("min_weight_fraction_leaf", 0.)) subsample = cs.add_hyperparameter( UniformFloatHyperparameter(name="subsample", lower=0.01, upper=1.0, default=1.0, log=False)) max_features = cs.add_hyperparameter( UniformFloatHyperparameter("max_features", 0.5, 5, default=1)) max_leaf_nodes = cs.add_hyperparameter( UnParametrizedHyperparameter(name="max_leaf_nodes", value="None")) return cs
def get_hyperparameter_search_space(dataset_properties=None): loss = CategoricalHyperparameter("loss", ["hinge", "squared_hinge"], default="hinge") fit_intercept = UnParametrizedHyperparameter("fit_intercept", "True") n_iter = UniformIntegerHyperparameter("n_iter", 5, 1000, default=20) C = UniformFloatHyperparameter("C", 1e-5, 10, 1, log=True) cs = ConfigurationSpace() cs.add_hyperparameter(loss) cs.add_hyperparameter(fit_intercept) cs.add_hyperparameter(n_iter) cs.add_hyperparameter(C) return cs
def get_hyperparameter_search_space(dataset_properties=None): cs = ConstrainedFeedNet.get_hyperparameter_search_space() solver = Constant(name='solver', value='momentum') cs.add_hyperparameter(solver) momentum = UniformFloatHyperparameter("momentum", 0.3, 0.999, default=0.9) cs.add_hyperparameter(momentum) return cs
def get_hyperparameter_search_space(dataset_properties=None): # Constrain the search space of the DeepFeedNet # Fixed Architecture for MNIST batch_size = Constant(name='batch_size', value=963) # TODO: Decimal library to work around floating point issue dropout_layer_1 = Constant(name='dropout_layer_1', value=0.39426633933) dropout_output = Constant(name='dropout_output', value=0.085813712701) num_layers = Constant(name='num_layers', value='c') num_units_layer_1 = Constant(name='num_units_layer_1', value=1861) number_updates = Constant(name='number_updates', value=1105) std_layer_1 = Constant(name='std_layer_1', value=0.00351015701) # To Optimize for all cases l2 = UniformFloatHyperparameter("lambda2", 1e-6, 1e-2, log=True, default=1e-3) lr = UniformFloatHyperparameter("learning_rate", 1e-4, 1e-1, log=True, default=1e-2) cs = ConfigurationSpace() cs.add_hyperparameter(batch_size) cs.add_hyperparameter(number_updates) cs.add_hyperparameter(num_layers) cs.add_hyperparameter(num_units_layer_1) cs.add_hyperparameter(dropout_layer_1) cs.add_hyperparameter(dropout_output) cs.add_hyperparameter(std_layer_1) cs.add_hyperparameter(lr) cs.add_hyperparameter(l2) return cs