Exemplo n.º 1
0
import gmllib.dataset as ds
import gmllib.neural_networks as nn
import gmllib.helpers as hlp

if __name__ == '__main__':
    dataset = ds.DataSet(name='xor', trainx='xor_train_x.npy', trainy='xor_train_y.npy',
                         validationx='xor_val_x.npy', validationy='xor_val_y.npy',
                         testx='xor_test_x.npy', testy='xor_test_y.npy')

    """
    train_x_gpu = nn.gnp.garray(hlp.normalize_dataset(train_x))
    test_x_gpu = nn.gnp.garray(hlp.normalize_dataset(test_x))
    val_x_gpu = nn.gnp.garray(hlp.normalize_dataset(validation_x))
    
    train_y_gpu = nn.gnp.garray(train_vy)
    test_y_gpu = nn.gnp.garray(test_vy)
    val_y_gpu = nn.gnp.garray(validation_vy)
    """
    
    ws_vh, ws_v, ws_h, h, error = nn.dbn_train(dataset.train.x, H=[10,10], batch_size=100,
                epoch_count=50, epsilon=0.1, momentum=.95, return_hidden=False, verbose=True)
                
    # nn.dbn_save(ws_vh, ws_v, ws_h, path='./datasets/synthetic_xor/', file_prefix='unsupervised_')
    
    w, b, val_pred, err = nn.dbn_supervised_finetune(ws_vh, ws_h, dataset=dataset, batch_size=1, epoch_count=10, epsilon=0.1,
            momentum=0.9, stop_if_val_error_increase=False, verbose=True)
    
    h = nn.nn_forward_pass(dataset.test.x.T, w, b)
    test_err = hlp.calculate_classification_error(dataset.test.y, h[-1].T)
    print('Test set classification error: ' + repr(test_err))
    
Exemplo n.º 2
0
##    j, val_h = nn.dbn_forward_pass(ws_vh, ws_v, ws_h, x=val_x_gpu)
##    j, test_h = nn.dbn_forward_pass(ws_vh, ws_v, ws_h, x=test_x_gpu)
##    hlp.save_dataset('mnist_dbn_unsup', './', train_x=train_h, val_x=val_h, test_x=test_h)
##    
##    # code for loading the dbn back from disk
##    # ws_vh, ws_v, ws_h = nn.dbn_load(layer_count=3, path='./', file_prefix='unsup_')
##    # ------- TRAIN DBN (UNSUPERVISED) END-------------------------------------#
    
    # ---- TEST DBN (SUPERVISED) ---------------------------------------------#
    # use DBN to predict outputs for test set
    
    # load supervised dbn
    ws_vh, ws_v, ws_h = nn.dbn_load(layer_count=3, path='./', file_prefix='')
    
    test_pred_h = nn.dbn_supervised_predict_sample(ws_vh, ws_v, ws_h, test_x_gpu, k=20)
    test_err_sup_s = hlp.calculate_classification_error(test_y_gpu, test_pred_h)
    print('Test set classification error with supervised DBN (sample): ' + repr(test_err_sup_s))
    # error about 13%

    test_pred_h = nn.dbn_supervised_predict_exact(ws_vh, ws_v, ws_h, test_x_gpu)
    test_err_sup_e = hlp.calculate_classification_error(test_y_gpu, test_pred_h)
    print('Test set classification error with supervised DBN (exact): ' + repr(test_err_sup_e))
    # error about 5.5%
    # ---- TEST DBN (SUPERVISED) END------------------------------------------#
    
    
##    # ----TEST DBN (UNSUPERVISED) --------------------------------------------#
##    # train a single layer neural network on the learned latent features (output of DBN)
##    
##    # What we do and don't do here:
##    #   Now we have trained a DBN on MNIST, we have extracted latent features
Exemplo n.º 3
0
# 14 Feb 2014
#
# Goker Erdogan

import gmllib.dataset as ds
import gmllib.neural_networks as nn
import gmllib.helpers as hlp

if __name__ == '__main__':
    # multilayer feedforward neural network example on synthetic xor data
    dataset = ds.DataSet(name='xor',
                         trainx='xor_train_x.npy',
                         trainy='xor_train_y.npy',
                         validationx='xor_val_x.npy',
                         validationy='xor_val_y.npy',
                         testx='xor_test_x.npy',
                         testy='xor_test_y.npy')
    # if you increase batch size, increasing momentum and epoch_count seems to work
    # epsilon does not seem to need adjusting. 0.1 is good.
    # H=2 also works
    w, b, val_pred, err = nn.nn_train(dataset,
                                      H=[4],
                                      epoch_count=10,
                                      epsilon=.1,
                                      batch_size=1,
                                      momentum=0.9)

    h = nn.nn_forward_pass(dataset.test.x.T, w, b)
    test_err = hlp.calculate_classification_error(dataset.test.y, h[-1].T)
    print('Test set classification error: ' + repr(test_err))
Exemplo n.º 4
0
    ##    # code for loading the dbn back from disk
    ##    # ws_vh, ws_v, ws_h = nn.dbn_load(layer_count=3, path='./', file_prefix='unsup_')
    ##    # ------- TRAIN DBN (UNSUPERVISED) END-------------------------------------#

    # ---- TEST DBN (SUPERVISED) ---------------------------------------------#
    # use DBN to predict outputs for test set

    # load supervised dbn
    ws_vh, ws_v, ws_h = nn.dbn_load(layer_count=3, path='./', file_prefix='')

    test_pred_h = nn.dbn_supervised_predict_sample(ws_vh,
                                                   ws_v,
                                                   ws_h,
                                                   test_x_gpu,
                                                   k=20)
    test_err_sup_s = hlp.calculate_classification_error(
        test_y_gpu, test_pred_h)
    print('Test set classification error with supervised DBN (sample): ' +
          repr(test_err_sup_s))
    # error about 13%

    test_pred_h = nn.dbn_supervised_predict_exact(ws_vh, ws_v, ws_h,
                                                  test_x_gpu)
    test_err_sup_e = hlp.calculate_classification_error(
        test_y_gpu, test_pred_h)
    print('Test set classification error with supervised DBN (exact): ' +
          repr(test_err_sup_e))
    # error about 5.5%
    # ---- TEST DBN (SUPERVISED) END------------------------------------------#

    ##    # ----TEST DBN (UNSUPERVISED) --------------------------------------------#
    ##    # train a single layer neural network on the learned latent features (output of DBN)