Ejemplo n.º 1
0
    def test_mixture_of_experts(self):
        dataset = datasets.load_diabetes()
        data, target = asfloat(dataset.data), asfloat(dataset.target)
        insize, outsize = data.shape[1], 1

        input_scaler = preprocessing.MinMaxScaler((-1 ,1))
        output_scaler = preprocessing.MinMaxScaler()
        x_train, x_test, y_train, y_test = cross_validation.train_test_split(
            input_scaler.fit_transform(data),
            output_scaler.fit_transform(target.reshape(-1, 1)),
            train_size=0.8
        )

        n_epochs = 10
        scaled_y_test = output_scaler.inverse_transform(y_test)
        scaled_y_test = scaled_y_test.reshape((y_test.size, 1))

        # -------------- Train single GradientDescent -------------- #

        bpnet = algorithms.GradientDescent(
            (insize, 20, outsize),
            step=0.1,
            verbose=False
        )
        bpnet.train(x_train, y_train, epochs=n_epochs)
        network_output = bpnet.predict(x_test)
        network_error = rmsle(output_scaler.inverse_transform(network_output),
                              scaled_y_test)

        # -------------- Train ensemlbe -------------- #

        moe = algorithms.MixtureOfExperts(
            networks=[
                algorithms.Momentum(
                    (insize, 20, outsize),
                    step=0.1,
                    batch_size=1,
                    verbose=False
                ),
                algorithms.Momentum(
                    (insize, 20, outsize),
                    step=0.1,
                    batch_size=1,
                    verbose=False
                ),
            ],
            gating_network=algorithms.Momentum(
                layers.Softmax(insize) > layers.Output(2),
                step=0.1,
                verbose=False
            )
        )
        moe.train(x_train, y_train, epochs=n_epochs)
        ensemble_output = moe.predict(x_test)
        ensemlbe_error = rmsle(
            output_scaler.inverse_transform(ensemble_output),
            scaled_y_test
        )

        self.assertGreater(network_error, ensemlbe_error)
Ejemplo n.º 2
0
    def test_mixture_of_experts(self):
        dataset = datasets.load_diabetes()
        data, target = asfloat(dataset.data), asfloat(dataset.target)
        insize, outsize = data.shape[1], 1

        input_scaler = preprocessing.MinMaxScaler((-1, 1))
        output_scaler = preprocessing.MinMaxScaler()
        x_train, x_test, y_train, y_test = model_selection.train_test_split(
            input_scaler.fit_transform(data),
            output_scaler.fit_transform(target.reshape(-1, 1)),
            train_size=0.8)

        n_epochs = 10
        scaled_y_test = output_scaler.inverse_transform(y_test)
        scaled_y_test = scaled_y_test.reshape((y_test.size, 1))

        # -------------- Train single GradientDescent -------------- #

        bpnet = algorithms.GradientDescent((insize, 20, outsize),
                                           step=0.1,
                                           verbose=False)
        bpnet.train(x_train, y_train, epochs=n_epochs)
        network_output = bpnet.predict(x_test)
        network_error = rmsle(output_scaler.inverse_transform(network_output),
                              scaled_y_test)

        # -------------- Train ensemlbe -------------- #

        moe = algorithms.MixtureOfExperts(
            networks=[
                algorithms.Momentum((insize, 20, outsize),
                                    step=0.1,
                                    batch_size=1,
                                    verbose=False),
                algorithms.Momentum((insize, 20, outsize),
                                    step=0.1,
                                    batch_size=1,
                                    verbose=False),
            ],
            gating_network=algorithms.Momentum(
                layers.Input(insize) > layers.Softmax(2),
                step=0.1,
                verbose=False))
        moe.train(x_train, y_train, epochs=n_epochs)
        ensemble_output = moe.predict(x_test)

        ensemlbe_error = rmsle(
            output_scaler.inverse_transform(ensemble_output), scaled_y_test)

        self.assertGreater(network_error, ensemlbe_error)
Ejemplo n.º 3
0
 def test_rmsle(self):
     actual = np.e ** (np.array([1, 2, 3, 4])) - 1
     predicted = np.e ** (np.array([4, 3, 2, 1])) - 1
     self.assertEqual(
         asfloat(np.sqrt(5)),
         estimators.rmsle(actual, predicted)
     )
Ejemplo n.º 4
0
    def test_pipeline(self):
        dataset = datasets.load_diabetes()
        target_scaler = preprocessing.MinMaxScaler()
        target = dataset.target.reshape(-1, 1)

        x_train, x_test, y_train, y_test = train_test_split(
            dataset.data,
            target_scaler.fit_transform(target),
            train_size=0.85
        )

        network = algorithms.GradientDescent(
            connection=[
                layers.Input(10),
                layers.Sigmoid(25),
                layers.Sigmoid(1),
            ],
            show_epoch=100,
            verbose=False,
        )
        pipeline = Pipeline([
            ('min_max_scaler', preprocessing.MinMaxScaler()),
            ('gd', network),
        ])
        pipeline.fit(x_train, y_train, gd__epochs=50)
        y_predict = pipeline.predict(x_test)

        error = rmsle(target_scaler.inverse_transform(y_test),
                      target_scaler.inverse_transform(y_predict).round())
        self.assertAlmostEqual(0.48, error, places=2)
Ejemplo n.º 5
0
    def test_pipeline(self):
        dataset = datasets.load_diabetes()
        target_scaler = preprocessing.MinMaxScaler()
        target = dataset.target.reshape(-1, 1)

        x_train, x_test, y_train, y_test = train_test_split(
            dataset.data, target_scaler.fit_transform(target), train_size=0.85)

        network = algorithms.GradientDescent(
            connection=[
                layers.Sigmoid(10),
                layers.Sigmoid(25),
                layers.Output(1),
            ],
            show_epoch=100,
            verbose=False,
        )
        pipeline = Pipeline([
            ('min_max_scaler', preprocessing.MinMaxScaler()),
            ('gd', network),
        ])
        pipeline.fit(x_train, y_train, gd__epochs=50)
        y_predict = pipeline.predict(x_test)

        error = rmsle(target_scaler.inverse_transform(y_test),
                      target_scaler.inverse_transform(y_predict).round())
        self.assertAlmostEqual(0.47, error, places=2)
Ejemplo n.º 6
0
    def test_pandas_for_bp(self):
        dataset = datasets.load_diabetes()
        target = dataset.target.reshape(-1, 1)

        input_scaler = preprocessing.MinMaxScaler()
        target_scaler = preprocessing.MinMaxScaler()

        n_features = dataset.data.shape[1]
        input_columns = ['column_' + str(i) for i in range(n_features)]

        pandas_data = pd.DataFrame(dataset.data, columns=input_columns)
        pandas_data['target'] = target_scaler.fit_transform(target)
        pandas_data[input_columns] = input_scaler.fit_transform(
            pandas_data[input_columns])

        x_train, x_test, y_train, y_test = train_test_split(
            pandas_data[input_columns], pandas_data['target'], test_size=0.15)

        bpnet = algorithms.GradientDescent(connection=[
            layers.Input(10),
            layers.Sigmoid(30),
            layers.Sigmoid(1),
        ],
                                           show_epoch=100)
        bpnet.train(x_train, y_train, epochs=50)
        y_predict = bpnet.predict(x_test).reshape(-1, 1)
        y_test = y_test.values.reshape(-1, 1)

        error = estimators.rmsle(
            target_scaler.inverse_transform(y_test),
            target_scaler.inverse_transform(y_predict).round())
        self.assertAlmostEqual(0.48, error, places=2)
Ejemplo n.º 7
0
 def test_rmsle(self):
     actual = np.e ** (np.array([1, 2, 3, 4])) - 1
     predicted = np.e ** (np.array([4, 3, 2, 1])) - 1
     self.assertEqual(
         asfloat(np.sqrt(5)),
         estimators.rmsle(actual, predicted)
     )
Ejemplo n.º 8
0
def run_neural_net():

    import_modules()

    dataset = datasets.load_boston()
    data, target = dataset.data, dataset.target

    data_scalar = preprocessing.MinMaxScaler()
    target_scalar = preprocessing.MinMaxScaler()

    data = data_scalar.fit_transform(data)
    target = target_scalar.fit_transform(target.reshape(-1, 1))

    environment.reproducible()

    x_train, x_test, y_train, y_test = train_test_split(data,
                                                        target,
                                                        train_size=0.85)

    cgnet = algorithms.ConjugateGradient(
        connection=[
            layers.Input(13),
            layers.Sigmoid(75),
            layers.Sigmoid(25),
            layers.Sigmoid(1),
        ],
        search_method='golden',
        show_epoch=1,
        verbose=True,
        addons=[algorithms.LinearSearch],
    )

    cgnet.train(x_train, y_train, x_test, y_test, epochs=30)

    plots.error_plot(cgnet)

    y_predict = cgnet.predict(x_test).round(1)
    error = rmsle(target_scalar.invers_transform(y_test), \
                  target_scalar.invers_transform(y_predict))

    return (error)
Ejemplo n.º 9
0
    def test_linear_search(self):
        methods = [
            ('golden', 0.34276),
            ('brent', 0.35192),
        ]

        for method_name, valid_error in methods:
            np.random.seed(self.random_seed)

            dataset = datasets.load_boston()
            data, target = dataset.data, dataset.target

            data_scaler = preprocessing.MinMaxScaler()
            target_scaler = preprocessing.MinMaxScaler()

            x_train, x_test, y_train, y_test = train_test_split(
                data_scaler.fit_transform(data),
                target_scaler.fit_transform(target.reshape(-1, 1)),
                train_size=0.85
            )

            cgnet = algorithms.ConjugateGradient(
                connection=[
                    layers.Input(13),
                    layers.Sigmoid(50),
                    layers.Sigmoid(1),
                ],
                show_epoch=1,
                verbose=False,
                search_method=method_name,
                tol=0.1,
                addons=[algorithms.LinearSearch],
            )
            cgnet.train(x_train, y_train, epochs=4)
            y_predict = cgnet.predict(x_test).round(1)

            error = rmsle(target_scaler.inverse_transform(y_test),
                          target_scaler.inverse_transform(y_predict))

            self.assertAlmostEqual(valid_error, error, places=5)
Ejemplo n.º 10
0
    def test_pandas_for_bp(self):
        dataset = datasets.load_diabetes()
        target = dataset.target.reshape(-1, 1)

        input_scaler = preprocessing.MinMaxScaler()
        target_scaler = preprocessing.MinMaxScaler()

        n_features = dataset.data.shape[1]
        input_columns = ['column_' + str(i) for i in range(n_features)]

        pandas_data = pd.DataFrame(dataset.data, columns=input_columns)
        pandas_data['target'] = target_scaler.fit_transform(target)
        pandas_data[input_columns] = input_scaler.fit_transform(
            pandas_data[input_columns]
        )

        x_train, x_test, y_train, y_test = train_test_split(
            pandas_data[input_columns],
            pandas_data['target'],
            train_size=0.85
        )

        bpnet = algorithms.GradientDescent(
            connection=[
                layers.Sigmoid(10),
                layers.Sigmoid(30),
                layers.Output(1),
            ],
            show_epoch=100
        )
        bpnet.train(x_train, y_train, epochs=50)
        y_predict = bpnet.predict(x_test).reshape(-1, 1)
        y_test = y_test.reshape(-1, 1)

        error = estimators.rmsle(
            target_scaler.inverse_transform(y_test),
            target_scaler.inverse_transform(y_predict).round()
        )
        self.assertAlmostEqual(0.48, error, places=2)
Ejemplo n.º 11
0
    def test_linear_search(self):
        methods = [
            ('golden', 0.34202),
            ('brent', 0.34942),
        ]

        for method_name, valid_error in methods:
            np.random.seed(self.random_seed)

            dataset = datasets.load_boston()
            data, target = dataset.data, dataset.target

            data_scaler = preprocessing.MinMaxScaler()
            target_scaler = preprocessing.MinMaxScaler()

            x_train, x_test, y_train, y_test = train_test_split(
                data_scaler.fit_transform(data),
                target_scaler.fit_transform(target.reshape(-1, 1)),
                train_size=0.85)

            cgnet = algorithms.ConjugateGradient(
                connection=[
                    layers.Input(13),
                    layers.Sigmoid(50),
                    layers.Sigmoid(1),
                ],
                show_epoch=1,
                verbose=False,
                search_method=method_name,
                tol=0.1,
                addons=[algorithms.LinearSearch],
            )
            cgnet.train(x_train, y_train, epochs=4)
            y_predict = cgnet.predict(x_test).round(1)

            error = rmsle(target_scaler.inverse_transform(y_test),
                          target_scaler.inverse_transform(y_predict))

            self.assertAlmostEqual(valid_error, error, places=5)
Ejemplo n.º 12
0
    def go(self):
        raw = self.datafile.read().splitlines()

        data = self._prepare_data(raw[::2])
        target = self._prepare_target(raw[1::2])
        print(len(data))
        print(len(target))

        environment.reproducible()

        x_train, x_test, y_train, y_test = train_test_split(data,
                                                            target,
                                                            train_size=0.85)

        print(x_train[0])
        connections = [
            layers.Input(100),
            layers.Linear(200),
            layers.Sigmoid(150),
            layers.Sigmoid(5),
        ]

        cgnet = algorithms.ConjugateGradient(
            connection=connections,
            search_method='golden',
            show_epoch=25,
            verbose=True,
            addons=[algorithms.LinearSearch],
        )

        cgnet.train(x_train, y_train, x_test, y_test, epochs=100)
        plots.error_plot(cgnet)

        y_predict = cgnet.predict(x_test).round(1)
        error = rmsle(y_test, y_predict)
        print(error)

        with open('lib/net/base_searcher.pickle', 'wb') as f:
            pickle.dump(cgnet, f)
Ejemplo n.º 13
0
 def scorer(network, X, y):
     result = network.predict(X)
     return rmsle(result[:, 0], y)
pnnStd = np.linspace(0.1, 2.5, 75) 
#pnnStd = [2]
PNN_NetworkName = 'PNN_Network'                      
trainNetwork = 1
if(trainNetwork):  
    RMSLE_PNN = []
    minRMSLE = 1e10
    bestStd = 0
    splitIdx = int(len(trainRange)/2)
    trainRangeFold1 = trainRange[:splitIdx]
    trainRangeFold2 = trainRange[(splitIdx+1):]
    print('PNN Training Results - Train Std dev input')  
    for x in pnnStd:
        nw = algorithms.PNN(std=x, verbose=False)
        nw.train(x_trainPNN[trainRangeFold1,:], y_trainPNN[trainRangeFold1,:])
        networkRMSLE = estimators.rmsle(y_trainPNN[trainRangeFold2,:], nw.predict(x_trainPNN[trainRangeFold2,:]))
                   
        if(minRMSLE > networkRMSLE):
            minRMSLE = networkRMSLE
            bestStd = x
            
        RMSLE_PNN.append(networkRMSLE)

    plt.figure
    p1, = plt.plot(pnnStd, RMSLE_PNN,'b')
    plt.xlabel('PNN Std.')
    plt.ylabel('Train RMSLE')
    plt.grid(True)       
    plt.title('Train RMSLE to determine PNN Std. Input')
    plt.show() 
Ejemplo n.º 15
0
def scorer(network, X, y):
    result = network.predict(X)
    return estimators.rmsle(result, y)
Ejemplo n.º 16
0
data = dataset.data
target = dataset.target.reshape((-1, 1))

data_scaler = preprocessing.MinMaxScaler((-3, 3))
target_scaler = preprocessing.MinMaxScaler()

data = data_scaler.fit_transform(data)
target = target_scaler.fit_transform(target)

x_train, x_test, y_train, y_test = train_test_split(
    data, target, train_size=0.85
)

cgnet = algorithms.Hessian(
    connection=[
        layers.Sigmoid(13),
        layers.Sigmoid(50),
        layers.Sigmoid(10),
        layers.Output(1),
    ],
    verbose=True,
)

cgnet.train(x_train, y_train, x_test, y_test, epochs=3)
y_predict = cgnet.predict(x_test)

y_test = target_scaler.inverse_transform(y_test.reshape((-1, 1)))
y_predict = target_scaler.inverse_transform(y_predict).T.round(1)
error = estimators.rmsle(y_predict, y_test)
print("RMSLE = {}".format(error))
Ejemplo n.º 17
0
 def scorer(network, X, y):
     result = network.predict(X)
     return rmsle(result[:, 0], y)
def scorer(network, X, y):
    result = network.predict(X)
    return estimators.rmsle(result, y)
Ejemplo n.º 19
0
        layers.Sigmoid(50),
        layers.Sigmoid(2),
    ],
    search_method='golden',
    show_epoch=25,
    verbose=True,
    addons=[algorithms.LinearSearch],
)

# Train neural net
cgnet.train(x_train, y_train, x_test, y_test, epochs=100)

# Make predictions
print("Starting predictions")
y_predict = cgnet.predict(test)
error = rmsle(target_test, target_scaler.inverse_transform(y_predict))
print(error)

# write values to csv
#   lat,lon,year,bdrms,fbath,hbath,sf,res,condo,built
with open('predict2018.csv', 'w') as myfile:
    wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)
    wr.writerow([
        "latitude", "longitude", "year", "bedrooms", "full_bth", "half_bth",
        "square_foot", "res", "condo", "yr_built", "bldg_price", "land_price"
    ])
    for i in range(len(y_predict)):
        wr.writerow(
            data_scaler.inverse_transform(test)[i].tolist() +
            target_scaler.inverse_transform(y_predict)[i].tolist())
Ejemplo n.º 20
0
    grnnStd = np.linspace(0.1, 2, 75)    
    #grnnStd = [1.25]
    GRNN_NetworkName = 'GRNN_Network'                      
    trainNetwork = 1
    if(trainNetwork):  
        RMSLE_GRNN = []
        minRMSLE = 1e10
        bestStd = 0
        splitIdx = int(len(trainRange)/2)
        trainRangeFold1 = trainRange[:splitIdx]
        trainRangeFold2 = trainRange[(splitIdx+1):]
        print('GRNN Training Results - Test Std dev input')  
        for x in grnnStd:
            grnnNW = algorithms.GRNN(std=x, verbose=False)
            grnnNW.train(x_train[trainRangeFold1,:], y_train[trainRangeFold1,:])
            networkRMSLE = estimators.rmsle(y_train[trainRangeFold2,:], grnnNW.predict(x_train[trainRangeFold2,:])[:,0])
            
            if(minRMSLE > networkRMSLE):
                minRMSLE = networkRMSLE
                bestStd = x
                
            RMSLE_GRNN.append(networkRMSLE)

        plt.figure
        p1, = plt.plot(grnnStd, RMSLE_GRNN,'b')
        plt.xlabel('GRNN Std.')
        plt.ylabel('Train RMSLE')
        plt.grid(True)       
        plt.title('Train RMSLE to determine GRNN Std. Input')
        plt.show() 
    
Ejemplo n.º 21
0
data = dataset.data
target = dataset.target.reshape((-1, 1))

data_scaler = preprocessing.MinMaxScaler((-3, 3))
target_scaler = preprocessing.MinMaxScaler()

data = data_scaler.fit_transform(data)
target = target_scaler.fit_transform(target)

x_train, x_test, y_train, y_test = train_test_split(data,
                                                    target,
                                                    train_size=0.85)

cgnet = algorithms.Hessian(
    connection=[
        layers.Sigmoid(13),
        layers.Sigmoid(50),
        layers.Sigmoid(10),
        layers.Output(1),
    ],
    verbose=True,
)

cgnet.train(x_train, y_train, x_test, y_test, epochs=3)
y_predict = cgnet.predict(x_test)

y_test = target_scaler.inverse_transform(y_test.reshape((-1, 1)))
y_predict = target_scaler.inverse_transform(y_predict).T.round(1)
error = estimators.rmsle(y_predict, y_test)
print("RMSLE = {}".format(error))

cgnet = algorithms.ConjugateGradient(
    connection=[
        layers.Sigmoid(len(input1[0])),
        layers.Sigmoid(100),
        layers.Output(1),
    ],
    search_method='golden',
    show_epoch=25,
    verbose=True,
    addons=[algorithms.LinearSearch],
)

cgnet.train(x_train, y_train, x_test, y_test, epochs=250)

# cgnet.plot_errors()

y_predict = cgnet.predict(x_test).round(1)
error = rmsle(target_scaler.inverse_transform(y_test),
              target_scaler.inverse_transform(y_predict))

search = data_scaler.fit_transform(list(df_test.fillna(0).as_matrix()))
search_y_predict = cgnet.predict(search).round(2)

df_test["predict"] = target_scaler.inverse_transform(search_y_predict)

print df_test["predict"]

df_test.to_csv("datasets/generations.test.predict.csv", encoding="utf-8")