def test_elmr_boston():

    # load dataset
    data = elm.read("tests/data/boston.data")

    # create a regressor
    elmr = elm.ELMRandom()

    try:
        # search for best parameter for this dataset
        # elmr.search_param(data, cv="kfold", of="rmse")

        # split data in training and testing sets
        tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

        #train and test
        tr_result = elmr.train(tr_set)
        te_result = elmr.test(te_set)

    except:
        ERROR = 1
    else:
        ERROR = 0

    assert (ERROR == 0)
Example #2
0
def test_elmr_boston():

    # load dataset
    data = elm.read("elmTestData/boston.data")

    # create a regressor
    elmr = elm.ELMRandom()

    try:
        # search for best parameter for this dataset
        # elmr.search_param(data, cv="kfold", of="rmse")

        # split data in training and testing sets
        tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

        #train and test
        tr_result = elmr.train(tr_set)
        te_result = elmr.test(te_set)

    except:
        ERROR = 1
    else:
        ERROR = 0

    assert (ERROR == 0)
Example #3
0
def main():

    # Load in 2D velocity data
    velocity = data.load_data()
    # data.example_of_data(velocity)
    # form testing and training sets for velocity data
    X_train, y_train, X_test, y_test = data.form_train_test_sets(velocity)

    # Data transformation
    #print(X_test[0]['u'].shape)
    print("len of y", len(y_test))
    # print("shape of y", y_test.shape)
    #print(y_train)

    #print(X_train['u'].shape)

    import elm as standard_elm
    # create a classifier
    elmk = standard_elm.ELMKernel()
    nn_structure = [9, 100, 1]
    x, y = utils.transform_dict_for_nn(X_train, y_train, nn_structure[0])
    x = np.transpose(x)
    y = np.transpose([y])

    tr_set = np.concatenate(
        (y, x), 1)  #standard format for elm function - y_train + x_train

    x_test, y_test = utils.transform_dict_for_nn(X_test[0], y_test[0],
                                                 nn_structure[0])
    #x_test = np.transpose(x_test)
    #y_test = np.transpose([y_test])

    #te_set = np.concatenate((y_test, x_test), 1)

    # load dataset
    dataa = standard_elm.read("boston.data")

    # create a classifier
    elmk = standard_elm.elmk.ELMKernel()

    # split data in training and testing sets
    # use 80% of dataset to training and shuffle data before splitting
    tr_set, te_set = standard_elm.split_sets(dataa,
                                             training_percent=.8,
                                             perm=True)

    #train and test
    # results are Error objects
    tr_result = elmk.train(tr_set)
    te_result = elmk.test(te_set)
    print(te_result.get_accuracy())
    te_result.predicted_targets
def test_elmk_iris():

    # load dataset
    data = elm.read("tests/data/iris.data")

    # create a regressor
    elmk = elm.ELMKernel()

    try:
        # search for best parameter for this dataset
        elmk.search_param(data, cv="kfold", of="accuracy", eval=10)

        # split data in training and testing sets
        tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

        #train and test
        tr_result = elmk.train(tr_set)
        te_result = elmk.test(te_set)
    except:
        ERROR = 1
    else:
        ERROR = 0

    assert (ERROR == 0)
def test_elmr_iris():

    # load dataset
    data = elm.read("tests/data/iris.data")

    # create a regressor
    elmr = elm.ELMRandom()

    try:
        # search for best parameter for this dataset
        elmr.search_param(data, cv="kfold", of="accuracy", eval=10)

        # split data in training and testing sets
        tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

        #train and test
        tr_result = elmr.train(tr_set)
        te_result = elmr.test(te_set)
    except:
        ERROR = 1
    else:
        ERROR = 0

    assert (ERROR == 0)
Example #6
0
        # split data in training and testing sets
        tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

        #train and test
        tr_result = elmr.train(tr_set)
        te_result = elmr.test(te_set)
        print(tr_result.get_accuracy)
        print(te_result.get_accuracy)
    except:
        ERROR = 1
    else:
        ERROR = 0

    assert (ERROR == 0)
    # assert (te_result.get_rmse() <= 70)


if __name__ == '__main__':
    # load dataset
    data = elm.read("elmTestData/diabetes.data")

    # create a regressor
    elmr = elm.ELMRandom()
    tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

    # train and test
    tr_result = elmr.train(tr_set)
    te_result = elmr.test(te_set)
    print(tr_result.get_rmse())
    print(te_result.get_rmse())
Example #7
0
def buildELM(data, iterNum, isNormal, isRegression, isPCA, n_components,normalMethod):
    rfList = []
    elmr = elm.ELMKernel()
    # elmr = elm.ELMRandom()
    elmr.search_param(data, eval=100, cv='kfold')
    # print("This is X_norm: ", X_norm)
    for j in range(iterNum):
        # X_train, X_test, y_train, y_test = train_test_split(X_norm, y, test_size=0.2)

        # param_test1 = {'n_hidden': range(10, 21, 10)}
        # model = GridSearchCV(estimator=elm.ELMRegressor(),
        #                        param_grid=param_test1, cv=5, n_jobs=1)
        # model = elm.ELMRegressor(n_hidden=tempDim,activation_func='sine')
        # model = elm.ELMRegressor(activation_func='tanh')
        # model = elm.GenELMRegressor()
        # temp = model.fit(X_train, y_train)
        # print("The best parameters are %s with a score of %0.2f"
        #       % (model.best_params_, model.best_score_))
        # Predict on new data
        # y_pred = temp.predict(X_test).tolist()
        # y_test_list = y_test.tolist()

        # create a classifier
        # elmr = elm.ELMRandom()
        # search for best parameter for this dataset
        # define "kfold" cross-validation method, "accuracy" as a objective function
        # to be optimized and perform 10 searching steps.
        # best parameters will be saved inside 'elmk' object


        # split data in training and testing sets
        # use 80% of dataset to training and shuffle data before splitting

        tr_set, te_set = elm.split_sets(data, training_percent=.9, perm=True)
        # print("This is tr_set: ", tr_set)

        X_train = tr_set[:, 1:].copy()
        # print("This is X_train", X_train)
        X_test = te_set[:, 1:].copy()
        # print("This is X_test: ", X_test)


        if normalMethod == 1:
            print("Normalizer() is using..." )
            sc = preprocessing.Normalizer()
        elif normalMethod == 2:
            print("StandardScalar() is using...")
            sc = preprocessing.StandardScaler()
        elif normalMethod == 3:
            print("MinMaxScalar() is using...")
            sc = preprocessing.MinMaxScaler()

        sc.fit(X_train)
        X_train_std = sc.transform(X_train)
        X_test_std = sc.transform(X_test)

        if isPCA:
            X_train, X_test = reduceDim.featureExtraction(X_train_std, X_test_std, n_components)
        else:
            X_train = X_train_std
            X_test = X_test_std

        tr_set = np.c_[tr_set[:, 0], X_train]
        te_set = np.c_[te_set[:, 0], X_test]

        # print("This is new tr_set: ", tr_set)
        # print("This is new te_set: ", te_set)
        # train and test
        # results are Error objects
        tr_result = elmr.train(tr_set)
        te_result = elmr.test(te_set)
        y_test_list = te_result.expected_targets
        y_pred = te_result.predicted_targets
        # print("This is tr_result: ", tr_result.expected_targets)
        # print("This is tr_result pre: ",tr_result.predicted_targets )
        # print("This is te_result: ", te_result.expected_targets)  # expected is the real value
        # print("This is te_result predict: ", te_result.predicted_targets)
        # print(te_result.get_accuracy)
        if isRegression:
            return y_pred
        else:
            sum_mean = 0
            for i in range(len(y_pred)):
                # print(
                #     "This is REAL value %.4f, ======ELM=====> PRED value: %.4f" % (y_test_list[i], y_pred[i]))
                # sum_mean += (y_pred[i] - y_test[i]) ** 2  # if the target is np array
                sum_mean += (float("{0:.4f}".format(float(y_pred[i]))) - y_test_list[i]) ** 2
                # else:
                #     print("This is REAL value %.4f, ======Random Forest=====> PRED value: %.4f" % (
                #         y_test.values[i], y_pred[i]))
                #     # sum_mean += (y_pred[i] - y_test.values[i]) ** 2
                #     sum_mean += (float("{0:.4f}".format(float(y_pred[i]))) - y_test.values[i]) ** 2
            sum_erro = np.sqrt(sum_mean / len(y_pred))
            rfList.append(sum_erro)
            print("This is RMSE for ELM: ", sum_erro)
            print("This is iteration num: ", j + 1)
    return rfList