def getSGDRegressorParams(): param_grid = {} params = { "alpha": genPowerTen(-1, 1, 4), "epsilon": genLinear(0.05, 0.25, step=0.05), "eta0": genPowerTen(-3, -1, 5), "l1_ratio": genPowerTen(-2, 0, 5), "learning_rate": ["constant", "optimal", "invscaling"], "loss": [ "squared_loss", "huber", "epsilon_insensitive", "squared_epsilon_insensitive" ], "max_iter": [1000], "penalty": ["l1", "l2"], "power_t": genPowerTwo(-3, -1, 3), "tol": [0.001] } for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getRidgeRegressorParams(cv=False): param_grid = {} params = {} if cv is True: params["alphas"] = tuple(genPowerTen(-1, 1, 9)) else: params["alpha"] = genPowerTen(-1, 1, 9) for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getLogisticRegressionClassifer(cv=False): param_grid = {} if cv is False: params = {"C": genPowerTen(-4, 4, 100), "penalty": ["l1", "l2"]} else: params = {"Cs": genPowerTen(-2, 2, 100), "penalty": ["l1", "l2"]} for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getARDRegressorParams(): param_grid = {} params = { "alpha_1": genPowerTen(-7, -5, 3), "lambda_1": genPowerTen(-7, -5, 3), "alpha_2": genPowerTen(-7, -5, 3), "lambda_2": genPowerTen(-7, -5, 3) } for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getBayesianRidgeRegressorParams(): param_grid = {} params = { "alpha_1": genPowerTen(-8, -4, 9), "lambda_1": genPowerTen(-8, -2, 13), "alpha_2": genPowerTen(-8, -4, 9), "lambda_2": genPowerTen(-8, -2, 13) } for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getElasticNetRegressorParams(cv=False): param_grid = {} if cv is True: params = { "l1_ratio": genPowerTen(-2, 0, 9), "alphas": genPowerTen(-1, 1, 9) } else: params = { "l1_ratio": genPowerTen(-2, 0, 9), "alpha": genPowerTen(-1, 1, 9) } for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getSVMEpsRegressorParams(kernel): param_grid = {} baseParams = { "C": genPowerTen(-1, 1, 9), "epsilon": genLinear(0.1, 0.9, step=0.2), "kernel": [kernel] } if kernel == "poly": params = { "coef0": genLinear(-1, 1, 3), "degree": genLinear(1, 5, step=1), "gamma": ['auto'] } if kernel == "linear": params = {} if kernel == "sigmoid": params = {"coef0": genLinear(-1, 1, 3), "gamma": ['auto']} if kernel == "rbf": params = {"gamma": ['auto']} params = dict(baseParams.items() + params.items()) for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getXGBRegressorParams(): params = { "gamma": genLinear(0, 1, step=0.2), "max_depth": genLinear(2, 8, step=2), "learning_rate": genPowerTen(-2, -0.5, 4), "n_estimators": [50, 100, 200, 350, 500], "reg_alpha": genPowerTen(-2, 1, 4), "reg_lambda": genPowerTen(-2, 1, 4) } param_grid = {} for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getBernoulliNaiveBayesClassifierParams(): params = {"alpha": genPowerTen(-4, 4, 100)} param_grid = {} for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getQuadraticDiscriminantAnalysisParams(): params = {"reg_param": genPowerTen(-4, 4, 100)} param_grid = {} for param,dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getSGDClassifierParams(): param_grid = {} params = { "alpha": genPowerTen(-4, 4, 100), "epsilon": genLinear(0.05, 0.25, step=0.05), "eta0": genPowerTen(-3, -1, 5), "l1_ratio": genPowerTen(-2, 0, 5), "learning_rate": ["constant", "optimal", "invscaling"], "loss": ["modified_huber", "log"], "max_iter": [1000], "penalty": ["l1", "l2"], "power_t": genPowerTwo(-3, -1, 3), "tol": [0.001] } for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getSVMLinearRegressorParams(): param_grid = {} params = { "C": genPowerTen(-1, 1, 9), "loss": ['epsilon_insensitive', 'squared_epsilon_insensitive'] } for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getAdaBoostRegressorParams(): params = {} params["learning_rate"] = genPowerTen(-2, 1, 5) params["n_estimators"] = [100] param_grid = {} for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getPassiveAggressiveRegressorParams(): param_grid = {} params = { "C": genPowerTen(-1, 1, 9), "loss": ["epsilon_insensitive", "squared_epsilon_insensitive"], "max_iter": [1000], "tol": [0.001] } for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getKernelRidgeRegressorParams(): param_grid = {} params = { "alpha": genPowerTen(-1, 1, 9), "coef0": genLinear(-1, 1, 3), "degree": genLinear(1, 5, step=1), "kernel": ['linear', 'poly', 'rbf', 'sigmoid'] } #, 'precomputed']} for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getHuberRegressorParams(): param_grid = {} params = { "alpha": genPowerTen(-5, -3, 9), "epsilon": genLinear(1.05, 1.65, step=0.05), "max_iter": [1000], "tol": [0.00001] } for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getMLPRegressorParams(): param_grid = {} params = { "activation": ["identity", "logistic", "tanh", "relu"], "alpha": genPowerTen(-1, 1, 9), "beta_1": genLinear(0.81, 0.99, step=0.04), "hidden_layer_sizes": [(10, ), (25, ), (50, )], #, (100,), (250,)], #"learning_rate": ["constant", "invscaling", "adaptive"], #"momentum": genLinear(0.75, 0.95, step=0.05), "max_iter": [500] } #"power_t": genLinear(0.25, 0.75, step=0.25)} #"solver": ["lbfgs", "sgd", "adam"]} for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval
def getGradientBoostingRegressorParams(): treeParams = {"max_depth": [2, 4, 6, 8]} # "max_features": ['auto', 'sqrt', 'log2', None], # "min_impurity_decrease": genLinear(0, 0.25, step=0.05), # "min_samples_leaf": genLinear(1, 10, step=1)} params = treeParams #params["criterion"] = ["mae", "friedman_mse"] params["loss"] = ["ls"] #params = {} params["learning_rate"] = genPowerTen(-2, -0.5, 4) params["n_estimators"] = [50] param_grid = {} for param, dist in params.iteritems(): param_grid[param] = convertDistribution(dist) retval = {"dist": params, "grid": param_grid} return retval