def test_elmr_boston():

    # load dataset
    data = elm.read("tests/data/boston.data")

    # create a regressor
    elmr = elm.ELMRandom()

    try:
        # search for best parameter for this dataset
        # elmr.search_param(data, cv="kfold", of="rmse")

        # split data in training and testing sets
        tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

        #train and test
        tr_result = elmr.train(tr_set)
        te_result = elmr.test(te_set)

    except:
        ERROR = 1
    else:
        ERROR = 0

    assert (ERROR == 0)
Exemple #2
0
def test_elmr_boston():

    # load dataset
    data = elm.read("elmTestData/boston.data")

    # create a regressor
    elmr = elm.ELMRandom()

    try:
        # search for best parameter for this dataset
        # elmr.search_param(data, cv="kfold", of="rmse")

        # split data in training and testing sets
        tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

        #train and test
        tr_result = elmr.train(tr_set)
        te_result = elmr.test(te_set)

    except:
        ERROR = 1
    else:
        ERROR = 0

    assert (ERROR == 0)
Exemple #3
0
def main():

    # Load in 2D velocity data
    velocity = data.load_data()
    # data.example_of_data(velocity)
    # form testing and training sets for velocity data
    X_train, y_train, X_test, y_test = data.form_train_test_sets(velocity)

    # Data transformation
    #print(X_test[0]['u'].shape)
    print("len of y", len(y_test))
    # print("shape of y", y_test.shape)
    #print(y_train)

    #print(X_train['u'].shape)

    import elm as standard_elm
    # create a classifier
    elmk = standard_elm.ELMKernel()
    nn_structure = [9, 100, 1]
    x, y = utils.transform_dict_for_nn(X_train, y_train, nn_structure[0])
    x = np.transpose(x)
    y = np.transpose([y])

    tr_set = np.concatenate(
        (y, x), 1)  #standard format for elm function - y_train + x_train

    x_test, y_test = utils.transform_dict_for_nn(X_test[0], y_test[0],
                                                 nn_structure[0])
    #x_test = np.transpose(x_test)
    #y_test = np.transpose([y_test])

    #te_set = np.concatenate((y_test, x_test), 1)

    # load dataset
    dataa = standard_elm.read("boston.data")

    # create a classifier
    elmk = standard_elm.elmk.ELMKernel()

    # split data in training and testing sets
    # use 80% of dataset to training and shuffle data before splitting
    tr_set, te_set = standard_elm.split_sets(dataa,
                                             training_percent=.8,
                                             perm=True)

    #train and test
    # results are Error objects
    tr_result = elmk.train(tr_set)
    te_result = elmk.test(te_set)
    print(te_result.get_accuracy())
    te_result.predicted_targets
def test_elmk_iris():

    # load dataset
    data = elm.read("tests/data/iris.data")

    # create a regressor
    elmk = elm.ELMKernel()

    try:
        # search for best parameter for this dataset
        elmk.search_param(data, cv="kfold", of="accuracy", eval=10)

        # split data in training and testing sets
        tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

        #train and test
        tr_result = elmk.train(tr_set)
        te_result = elmk.test(te_set)
    except:
        ERROR = 1
    else:
        ERROR = 0

    assert (ERROR == 0)
def test_elmr_iris():

    # load dataset
    data = elm.read("tests/data/iris.data")

    # create a regressor
    elmr = elm.ELMRandom()

    try:
        # search for best parameter for this dataset
        elmr.search_param(data, cv="kfold", of="accuracy", eval=10)

        # split data in training and testing sets
        tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

        #train and test
        tr_result = elmr.train(tr_set)
        te_result = elmr.test(te_set)
    except:
        ERROR = 1
    else:
        ERROR = 0

    assert (ERROR == 0)
Exemple #6
0
 def load(self):
     data = elm.read(self.__full_path)
     x = data[:, 1:]
     y = data[:, 0]
     return x, y
Exemple #7
0
        # split data in training and testing sets
        tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

        #train and test
        tr_result = elmr.train(tr_set)
        te_result = elmr.test(te_set)
        print(tr_result.get_accuracy)
        print(te_result.get_accuracy)
    except:
        ERROR = 1
    else:
        ERROR = 0

    assert (ERROR == 0)
    # assert (te_result.get_rmse() <= 70)


if __name__ == '__main__':
    # load dataset
    data = elm.read("elmTestData/diabetes.data")

    # create a regressor
    elmr = elm.ELMRandom()
    tr_set, te_set = elm.split_sets(data, training_percent=.8, perm=True)

    # train and test
    tr_result = elmr.train(tr_set)
    te_result = elmr.test(te_set)
    print(tr_result.get_rmse())
    print(te_result.get_rmse())