Esempio n. 1
0
File: svm.py Progetto: tgadf/pymva
def getSVMEpsRegressorParams(kernel):
    param_grid = {}
    baseParams = {
        "C": genPowerTen(-1, 1, 9),
        "epsilon": genLinear(0.1, 0.9, step=0.2),
        "kernel": [kernel]
    }
    if kernel == "poly":
        params = {
            "coef0": genLinear(-1, 1, 3),
            "degree": genLinear(1, 5, step=1),
            "gamma": ['auto']
        }
    if kernel == "linear":
        params = {}
    if kernel == "sigmoid":
        params = {"coef0": genLinear(-1, 1, 3), "gamma": ['auto']}
    if kernel == "rbf":
        params = {"gamma": ['auto']}

    params = dict(baseParams.items() + params.items())

    for param, dist in params.iteritems():
        param_grid[param] = convertDistribution(dist)
    retval = {"dist": params, "grid": param_grid}
    return retval
Esempio n. 2
0
File: svm.py Progetto: tgadf/pymva
def getKernelRidgeRegressorParams():
    param_grid = {}
    params = {
        "alpha": genPowerTen(-1, 1, 9),
        "coef0": genLinear(-1, 1, 3),
        "degree": genLinear(1, 5, step=1),
        "kernel": ['linear', 'poly', 'rbf', 'sigmoid']
    }  #, 'precomputed']}

    for param, dist in params.iteritems():
        param_grid[param] = convertDistribution(dist)
    retval = {"dist": params, "grid": param_grid}
    return retval
Esempio n. 3
0
File: xgb.py Progetto: tgadf/pymva
def getXGBRegressorParams():
    params = {
        "gamma": genLinear(0, 1, step=0.2),
        "max_depth": genLinear(2, 8, step=2),
        "learning_rate": genPowerTen(-2, -0.5, 4),
        "n_estimators": [50, 100, 200, 350, 500],
        "reg_alpha": genPowerTen(-2, 1, 4),
        "reg_lambda": genPowerTen(-2, 1, 4)
    }

    param_grid = {}
    for param, dist in params.iteritems():
        param_grid[param] = convertDistribution(dist)

    retval = {"dist": params, "grid": param_grid}
    return retval
Esempio n. 4
0
def getSGDRegressorParams():
    param_grid = {}
    params = {
        "alpha":
        genPowerTen(-1, 1, 4),
        "epsilon":
        genLinear(0.05, 0.25, step=0.05),
        "eta0":
        genPowerTen(-3, -1, 5),
        "l1_ratio":
        genPowerTen(-2, 0, 5),
        "learning_rate": ["constant", "optimal", "invscaling"],
        "loss": [
            "squared_loss", "huber", "epsilon_insensitive",
            "squared_epsilon_insensitive"
        ],
        "max_iter": [1000],
        "penalty": ["l1", "l2"],
        "power_t":
        genPowerTwo(-3, -1, 3),
        "tol": [0.001]
    }

    for param, dist in params.iteritems():
        param_grid[param] = convertDistribution(dist)
    retval = {"dist": params, "grid": param_grid}
    return retval
Esempio n. 5
0
def getRadiusNeighborsRegressorParams():
    params = {
        "algorithm": ['auto', 'ball_tree', 'kd_tree', 'brute'],
        "leaf_size":
        genLinear(10, 50, step=10),
        "metric": [
            'minkowski', 'cityblock', 'cosine', 'euclidean', 'l1', 'l2',
            'manhattan'
        ],
        "radius":
        genLinear(0.5, 1.5, step=0.5),
        "weights": ['uniform', 'distance']
    }

    param_grid = {}
    for param, dist in params.iteritems():
        param_grid[param] = convertDistribution(dist)

    retval = {"dist": params, "grid": param_grid}
    return retval
Esempio n. 6
0
def getHuberRegressorParams():
    param_grid = {}
    params = {
        "alpha": genPowerTen(-5, -3, 9),
        "epsilon": genLinear(1.05, 1.65, step=0.05),
        "max_iter": [1000],
        "tol": [0.00001]
    }

    for param, dist in params.iteritems():
        param_grid[param] = convertDistribution(dist)
    retval = {"dist": params, "grid": param_grid}
    return retval
Esempio n. 7
0
File: nn.py Progetto: tgadf/pymva
def getMLPRegressorParams():
    param_grid = {}
    params = {
        "activation": ["identity", "logistic", "tanh", "relu"],
        "alpha": genPowerTen(-1, 1, 9),
        "beta_1": genLinear(0.81, 0.99, step=0.04),
        "hidden_layer_sizes": [(10, ), (25, ), (50, )],  #, (100,), (250,)],
        #"learning_rate": ["constant", "invscaling", "adaptive"],
        #"momentum": genLinear(0.75, 0.95, step=0.05),
        "max_iter": [500]
    }
    #"power_t": genLinear(0.25, 0.75, step=0.25)}
    #"solver": ["lbfgs", "sgd", "adam"]}

    for param, dist in params.iteritems():
        param_grid[param] = convertDistribution(dist)
    retval = {"dist": params, "grid": param_grid}
    return retval
Esempio n. 8
0
def getSGDClassifierParams():
    param_grid = {}
    params = {
        "alpha": genPowerTen(-4, 4, 100),
        "epsilon": genLinear(0.05, 0.25, step=0.05),
        "eta0": genPowerTen(-3, -1, 5),
        "l1_ratio": genPowerTen(-2, 0, 5),
        "learning_rate": ["constant", "optimal", "invscaling"],
        "loss": ["modified_huber", "log"],
        "max_iter": [1000],
        "penalty": ["l1", "l2"],
        "power_t": genPowerTwo(-3, -1, 3),
        "tol": [0.001]
    }

    for param, dist in params.iteritems():
        param_grid[param] = convertDistribution(dist)
    retval = {"dist": params, "grid": param_grid}
    return retval