Esempio n. 1
0
def polynomial():
    a = _randint(2,10)
    b = _randchoice([True, False])
    c = _randchoice([True, False])
    scaler=PolynomialFeatures(degree=a, interaction_only=b, include_bias=c)
    tmp = str(a) + "_" + str(b) + "_" + str(c)+ "_"+PolynomialFeatures.__name__
    return scaler, tmp
Esempio n. 2
0
def DT():
    a = _randuniform(0.0, 1.0)
    b = _randchoice(['gini', 'entropy'])
    c = _randchoice(['best', 'random'])
    model = DecisionTreeClassifier(criterion=b,
                                   splitter=c,
                                   min_samples_split=a,
                                   max_features=None,
                                   min_impurity_decrease=0.0)
    tmp = str(a) + "_" + b + "_" + c + "_" + DecisionTreeClassifier.__name__
    return model, tmp
Esempio n. 3
0
def KNN():
    a = _randint(2, 25)
    b = _randchoice(['uniform', 'distance'])
    c = _randchoice(['minkowski', 'chebyshev'])
    if c == 'minkowski':
        d = _randint(1, 15)
    else:
        d = 2
    model = KNeighborsClassifier(n_neighbors=a,
                                 weights=b,
                                 algorithm='auto',
                                 p=d,
                                 metric=c,
                                 n_jobs=-1)
    tmp = str(a) + "_" + b + "_" + c + "_" + str(
        d) + "_" + KNeighborsClassifier.__name__
    return model, tmp
Esempio n. 4
0
def quantile_transform():
    a, b = _randint(100, 1000), _randint(1000, 1e5)
    c = _randchoice(['normal', 'uniform'])
    scaler = QuantileTransformer(n_quantiles=a,
                                 output_distribution=c,
                                 subsample=b)
    tmp = str(a) + "_" + str(b) + "_" + c + "_" + QuantileTransformer.__name__
    return scaler, tmp
Esempio n. 5
0
def LR():

    a = _randchoice(['l2'])
    b = _randuniform(0.0, 0.1)
    c = _randint(1, 500)

    # model = LogisticRegression(penalty=a, tol=b, C=float(c), solver='liblinear', multi_class='warn')
    model = LogisticRegression(penalty=a, tol=b, C=float(c))
    tmp = a + "_" + str(round(
        b, 5)) + "_" + str(c) + "_" + LogisticRegression.__name__
    return model, tmp
Esempio n. 6
0
def RF():
    a = _randint(50, 150)
    b = _randchoice(['gini', 'entropy'])
    c = _randuniform(0.0, 1.0)
    model = RandomForestClassifier(n_estimators=a,
                                   criterion=b,
                                   min_samples_split=c,
                                   max_features=None,
                                   min_impurity_decrease=0.0,
                                   n_jobs=-1)
    tmp = str(a) + "_" + b + "_" + str(round(
        c, 5)) + "_" + RandomForestClassifier.__name__
    return model, tmp
Esempio n. 7
0
def SVM():
    # from sklearn.preprocessing import MinMaxScaler
    # scaling = MinMaxScaler(feature_range=(-1, 1)).fit(train_data)
    # train_data = scaling.transform(train_data)
    # test_data = scaling.transform(test_data)
    a = _randint(1, 500)
    b = _randchoice(['linear', 'poly', 'rbf', 'sigmoid'])
    c = _randint(2, 10)
    d = _randuniform(0.0, 1.0)
    e = _randuniform(0.0, 0.1)
    f = _randuniform(0.0, 0.1)
    model = SVC(C=float(a),
                kernel=b,
                degree=c,
                gamma=d,
                coef0=e,
                tol=f,
                cache_size=20000)
    tmp = str(a) + "_" + b + "_" + str(c) + "_" + str(round(d, 5)) + "_" + str(
        round(e, 5)) + "_" + str(round(f, 5)) + "_" + SVC.__name__
    return model, tmp
Esempio n. 8
0
def normalizer():
    a = _randchoice(['l1', 'l2','max'])
    scaler=Normalizer(norm=a)
    tmp=a+"_"+Normalizer.__name__
    return scaler,tmp