コード例 #1
0
def trainELMClassifier(trainData, trainLabels, testData):
    print("\nTraining ELM Classifier...")

    trainData = np.asarray(trainData)
    trainLabels = np.asarray(trainLabels)
    print(trainData.shape)
    print(trainLabels.shape)

    # create initialize elm activation functions
    nh = 100
    activation = 'tanh'

    if activation == 'rbf':
        act_layer = RBFRandomLayer(n_hidden=nh,
                                   random_state=0,
                                   rbf_width=0.001)
    elif activation == 'tanh':
        act_layer = MLPRandomLayer(n_hidden=nh, activation_func='tanh')
    elif activation == 'tribas':
        act_layer = MLPRandomLayer(n_hidden=nh, activation_func='tribas')
    elif activation == 'hardlim':
        act_layer = MLPRandomLayer(n_hidden=nh, activation_func='hardlim')

    # initialize ELM Classifier
    elm = GenELMClassifier(hidden_layer=act_layer)

    t0 = time()
    elm.fit(trainData, trainLabels)
    print("\nTraining finished in %0.3fs \n" % (time() - t0))

    t0 = time()
    predictedLabels = elm.predict(testData)
    print("\nTesting finished in %0.3fs" % (time() - t0))

    t0 = time()
    confidence_scores = elm.decision_function(testData)
    print("\nTesting finished in %0.3fs" % (time() - t0))

    print("\nPredicted Labels")
    print("----------------------------------")
    print(predictedLabels)

    print("\nConfidence Scores")
    print("----------------------------------")
    print(confidence_scores)

    params = {
        'nh': nh,
        'af': activation,
    }

    return confidence_scores, predictedLabels, params
コード例 #2
0
ファイル: run_script.py プロジェクト: yolle103/solution
def run_ELM(
    x,
    y,
    threshold,
    test_num,
    n_hidden,
    random_state=2018,
    kernel_type='MLP',
):
    #  split the data set into train/test
    x_train, x_test, y_train, y_test = cross_validation.train_test_split(
        x, y, test_size=0.3, random_state=random_state)
    # currently only support test_num <=100k
    assert test_num <= 100000

    def powtanh_xfer(activations, power=1.0):
        return pow(np.tanh(activations), power)

    model_count = 0
    result = []
    hidden_options = {
        'MLP': MLPRandomLayer,
        'RBF': RBFRandomLayer,
        'GRBF': GRBFRandomLayer
    }

    for i in range(0, test_num):
        tanh_rhl = hidden_options[kernel_type](n_hidden=n_hidden,
                                               random_state=i,
                                               activation_func=powtanh_xfer,
                                               activation_args={
                                                   'power': 3.0
                                               })
        elmc_tanh = GenELMClassifier(hidden_layer=tanh_rhl)
        # start Training
        elmc_tanh.fit(x_train, y_train)
        # calculate score
        train_acc = elmc_tanh.score(x_train, y_train)
        test_acc = elmc_tanh.score(x_test, y_test)
        if train_acc > threshold and test_acc > threshold:
            logging.info(
                'find model satisfiy threshold, train_acc: {}, test_acc: {}'.
                format(train_acc, test_acc))
            result.append(
                (train_acc, test_acc, tanh_rhl.components_['weights']))

            model_count += 1
    logging.info('fininsh training, get {} valid models'.format(model_count))

    result.sort(key=lambda x: x[1], reverse=True)
    return result
コード例 #3
0
num_epochs = 10
for train_index, test_index in sKF.split(std_X, y):
    i += 1
    x_train = std_X[train_index]
    y_train = y[train_index]
    x_test = std_X[test_index]
    y_test = y[test_index]
    #-------------------------------------------------------------------------------
    grbf = GRBFRandomLayer(n_hidden=500, grbf_lambda=0.0001)
    act = MLPRandomLayer(n_hidden=500, activation_func='sigmoid')
    rbf = RBFRandomLayer(n_hidden=290,
                         rbf_width=0.0001,
                         activation_func='sigmoid')

    clf = GenELMClassifier(hidden_layer=rbf)
    clf.fit(x_train, y_train.ravel())
    y_pre = clf.predict(x_test)
    y_score = clf.decision_function(x_test)
    fpr, tpr, thresholds = roc_curve(y_test, y_score)
    tprs.append(tpr)
    fprs.append(fpr)
    roc_auc = auc(fpr, tpr)
    tn, fp, fn, tp = confusion_matrix(y_test, y_pre).ravel()
    test_acc = (tn + tp) / (tn + fp + fn + tp)
    test_Sn = tp / (fn + tp)
    test_Sp = tn / (fp + tn)
    mcc = (tp * tn - fp * fn) / pow(
        ((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn)), 0.5)
    final_test_acc.append(test_acc)
    final_test_Sn.append(test_Sn)
    final_test_Sp.append(test_Sp)
コード例 #4
0
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)

#rhl = RBFRandomLayer(n_hidden=nh, activation_func='inv_multiquadric')
#rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)
rhl = GRBFRandomLayer(n_hidden=nh, grbf_lambda=.0001, centers=ctrs)
elmr = GenELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

rbf_rhl = RBFRandomLayer(n_hidden=100, random_state=0, rbf_width=0.01)
elmc_rbf = GenELMClassifier(hidden_layer=rbf_rhl)
elmc_rbf.fit(dgx_train, dgy_train)
print elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test)


def powtanh_xfer(activations, power=1.0):
    return pow(np.tanh(activations), power)


tanh_rhl = MLPRandomLayer(n_hidden=100,
                          activation_func=powtanh_xfer,
                          activation_args={'power': 3.0})
elmc_tanh = GenELMClassifier(hidden_layer=tanh_rhl)
elmc_tanh.fit(dgx_train, dgy_train)
print elmc_tanh.score(dgx_train,
                      dgy_train), elmc_tanh.score(dgx_test, dgy_test)
コード例 #5
0
ファイル: elm_notebook.py プロジェクト: chrinide/py_ml_utils
(ctrs, _, _) = k_means(xtoy_train, nh)
unit_rs = np.ones(nh)

#rhl = RBFRandomLayer(n_hidden=nh, activation_func='inv_multiquadric')
#rhl = RBFRandomLayer(n_hidden=nh, centers=ctrs, radii=unit_rs)
rhl = GRBFRandomLayer(n_hidden=nh, grbf_lambda=.0001, centers=ctrs)
elmr = GenELMRegressor(hidden_layer=rhl)
elmr.fit(xtoy_train, ytoy_train)
print elmr.score(xtoy_train, ytoy_train), elmr.score(xtoy_test, ytoy_test)
plot(xtoy, ytoy, xtoy, elmr.predict(xtoy))

# <codecell>

rbf_rhl = RBFRandomLayer(n_hidden=100, random_state=0, rbf_width=0.01)
elmc_rbf = GenELMClassifier(hidden_layer=rbf_rhl)
elmc_rbf.fit(dgx_train, dgy_train)
print elmc_rbf.score(dgx_train, dgy_train), elmc_rbf.score(dgx_test, dgy_test)

def powtanh_xfer(activations, power=1.0):
    return pow(np.tanh(activations), power)

tanh_rhl = MLPRandomLayer(n_hidden=100, activation_func=powtanh_xfer, activation_args={'power':3.0})
elmc_tanh = GenELMClassifier(hidden_layer=tanh_rhl)
elmc_tanh.fit(dgx_train, dgy_train)
print elmc_tanh.score(dgx_train, dgy_train), elmc_tanh.score(dgx_test, dgy_test)

# <codecell>

rbf_rhl = RBFRandomLayer(n_hidden=100, rbf_width=0.01)
tr, ts = res_dist(dgx, dgy, GenELMClassifier(hidden_layer=rbf_rhl), n_runs=100, random_state=0)