Example #1
0
def perform_experiment_n(rfile,
                         training_data,
                         data_size,
                         test_data,
                         binary=[1],
                         learning_rate=0.1,
                         eras=1,
                         epochs=20,
                         plot_rows=1,
                         plot_cols=1,
                         title='Experiment',
                         sub='',
                         logging=-1,
                         plot_what='er'):
    """ Train the network and output results for one experiment. 
        rfile: a string; the name of an R plot script file to print out. If a path to a sub-directory is given, the sub-directory must exist.
        training_data: a matrix; a sequence of training vectors.
        data_size: the length of a vector in the training data.
        test_data: a matrix; a sequence of target vectors, against which to calculate error of predictions.
        binary: true or false; whether to treat input data as binary and translate to a bit vector for error calculation
        learning_rate: a scalar (float); learning rate for gradient descent.
        eras: a scalar; the number of eras to train for. Each era continues for the given number of epochs.
        epochs: a scalar; the number of epochs to train for.
        plot_rows: a scalar; the number of rows in each sub-plot in the R plot.
        plot_cols: a scalar; the number of columns in each sub-plot in the R plot.
        title: a string; the main title of the R plot.
        sub: a string; the subtitle of the R plot.
    """

    # Set the switch controlling whether test data shoudl be translated to bit vectors for error calculation
    lstm_rnn.binary_data = binary

    # Set logging level:
    lstm_rnn.logging_level = logging

    Eras = []

    # Train for the given number of epochs, repeating for the given number of eras.
    for i in range(0, eras):
        one_era = lstm_rnn.unfold_for_n_epochs(training_data, data_size,
                                               test_data, learning_rate,
                                               epochs)
        Eras.append(one_era['data'])

    pr.print_r_plot_file_eras(rfile, Eras, plot_rows, plot_cols, title, sub,
                              plot_what)

    cell = one_era['net']
    for i in range(len(test_data)):
        stimulus = test_data[i]
        activation = cell.state_update(stimulus)
        activation = lstm_rnn.reals_to_bit_vector(activation)
        lstm_rnn.logging("Activation for %s: %s" % (stimulus, activation), -1)
Example #2
0
def experiment_5():
    """ Train a deep network with two LSTM cells stacked one on top of the other.
    """

    rfile = './experiment_5/experiment_5.r'
    training_data = datasets.binary_counting_train
    data_size = len(datasets.binary_counting_train[0])
    tst_data = datasets.binary_counting_test
    binary = [1]
    learning_rate = 0.05
    eras = 10
    epochs = 80
    plot_rows = 2
    plot_cols = 5
    title = 'Experiment 5.'
    sub = 'Counting in binary (stacked cells)'

    lstm_rnn.binary_data = binary

    lstm_rnn.logging_level = 3

    Eras = []

    for i in range(0, eras):
        one_era = lstm_rnn.deep_unfold_for_n_epochs(training_data, data_size,
                                                    tst_data, learning_rate,
                                                    epochs)
        Eras.append(one_era['data'])
        learning_rate = learning_rate + .10

    pr.print_r_plot_file_eras(rfile, Eras, plot_rows, plot_cols, title, sub,
                              'er')

    cell1, cell2 = one_era['block']
    for i in range(len(tst_data)):
        stimulus = tst_data[i]
        # Activate the first layer
        lstm_rnn.logging("cell 1 activation ======================", 2)
        activation = cell1.state_update(stimulus)
        # Propagate activation forwards
        cell2.ht_min_1 = activation
        # Activate the second layer
        lstm_rnn.logging("cell 2 activation ======================", 2)
        activation = cell2.state_update(stimulus)
        activation = lstm_rnn.reals_to_bit_vector(activation)
        lstm_rnn.logging("Activation for %s: %s" % (stimulus, activation), -1)
Example #3
0
def experiment_3():
    """ Train a network with different learning rate values. 
        NOTE: the R script file generated for this experiment needs a bit of hand-retouching
        to adjust the margins of the main plot figure and to add in the Eta (learning rate) 
        for each Era (those are in the accompanying log file). 
        Make sure to keep a backup if re-generating. 
        If disaster strikes, replace the first line in the file with:
        par(oma=c(1,4,3,1),mfrow=c(2,4))
    """

    rfile = './experiment_3/experiment_3.r'
    training_data = datasets.embedded_reber_t
    data_size = len(datasets.embedded_reber_t[0])
    tst_data = datasets.embedded_reber_s
    binary = [1]
    learning_rate = 0.01
    eras = 8
    epochs = 80
    plot_rows = 2
    plot_cols = 4
    title = 'Experiment 3.'
    sub = 'Counting in binary (iterating learning rate)'

    lstm_rnn.binary_data = binary

    lstm_rnn.logging_level = 3

    Eras = []

    for i in range(0, eras):
        print "Epoch learning rate: %s ==============================" % learning_rate
        one_era = lstm_rnn.unfold_for_n_epochs(training_data, data_size,
                                               tst_data, learning_rate, epochs)
        Eras.append(one_era['data'])
        learning_rate = learning_rate + .10

    pr.print_r_plot_file_eras(rfile, Eras, plot_rows, plot_cols, title, sub,
                              'er')

    cell = one_era['net']
    for i in range(len(tst_data)):
        stimulus = tst_data[i]
        activation = cell.state_update(stimulus)
        activation = lstm_rnn.reals_to_bit_vector(activation)
        lstm_rnn.logging("Activation for %s: %s" % (stimulus, activation), -1)
Example #4
0
def experiment_5():
    """ Train a deep network with two LSTM cells stacked one on top of the other.
    """                                                                     

    rfile='./experiment_5/experiment_5.r'
    training_data = datasets.binary_counting_train
    data_size = len(datasets.binary_counting_train[0])
    tst_data = datasets.binary_counting_test
    binary=[1]
    learning_rate=0.05
    eras=10
    epochs=80
    plot_rows=2
    plot_cols=5 
    title='Experiment 5.'
    sub='Counting in binary (stacked cells)'


    lstm_rnn.binary_data = binary

    lstm_rnn.logging_level = 3 
    
    Eras = []

    for i in range(0,eras):
        one_era = lstm_rnn.deep_unfold_for_n_epochs(training_data, data_size, tst_data, learning_rate, epochs)
        Eras.append(one_era['data'])
        learning_rate = learning_rate +.10 
    
    pr.print_r_plot_file_eras(rfile, Eras, plot_rows, plot_cols, title, sub, 'er')
    
    cell1, cell2 = one_era['block']
    for i in range(len(tst_data)):
        stimulus = tst_data[i]
        # Activate the first layer
        lstm_rnn.logging("cell 1 activation ======================",2)
        activation = cell1.state_update(stimulus)
        # Propagate activation forwards
        cell2.ht_min_1 = activation
        # Activate the second layer
        lstm_rnn.logging("cell 2 activation ======================",2)
        activation = cell2.state_update(stimulus)
        activation = lstm_rnn.reals_to_bit_vector(activation)
        lstm_rnn.logging("Activation for %s: %s" %(stimulus, activation),-1)
Example #5
0
def experiment_3():
    """ Train a network with different learning rate values. 
        NOTE: the R script file generated for this experiment needs a bit of hand-retouching
        to adjust the margins of the main plot figure and to add in the Eta (learning rate) 
        for each Era (those are in the accompanying log file). 
        Make sure to keep a backup if re-generating. 
        If disaster strikes, replace the first line in the file with:
        par(oma=c(1,4,3,1),mfrow=c(2,4))
    """                                                                     

    rfile='./experiment_3/experiment_3.r'
    training_data = datasets.embedded_reber_t
    data_size = len(datasets.embedded_reber_t[0])
    tst_data = datasets.embedded_reber_s
    binary=[1]
    learning_rate=0.01
    eras=8
    epochs=80
    plot_rows=2
    plot_cols=4
    title='Experiment 3.'
    sub='Counting in binary (iterating learning rate)'


    lstm_rnn.binary_data = binary

    lstm_rnn.logging_level = 3 
    
    Eras = []

    for i in range(0,eras):
        print "Epoch learning rate: %s ==============================" % learning_rate
        one_era = lstm_rnn.unfold_for_n_epochs(training_data, data_size, tst_data, learning_rate, epochs)
        Eras.append(one_era['data'])
        learning_rate = learning_rate +.10 
    
    pr.print_r_plot_file_eras(rfile, Eras, plot_rows, plot_cols, title, sub, 'er')
    
    cell = one_era['net']
    for i in range(len(tst_data)):
        stimulus = tst_data[i]
        activation = cell.state_update(stimulus)
        activation = lstm_rnn.reals_to_bit_vector(activation)
        lstm_rnn.logging("Activation for %s: %s" %(stimulus, activation),-1)
Example #6
0
def perform_experiment_n(rfile,training_data,data_size,test_data,binary=[1]
        ,learning_rate=0.1,eras=1,epochs=20,plot_rows=1,plot_cols=1,title='Experiment',sub='',logging=-1,plot_what='er'):
    """ Train the network and output results for one experiment. 
        rfile: a string; the name of an R plot script file to print out. If a path to a sub-directory is given, the sub-directory must exist.
        training_data: a matrix; a sequence of training vectors.
        data_size: the length of a vector in the training data.
        test_data: a matrix; a sequence of target vectors, against which to calculate error of predictions.
        binary: true or false; whether to treat input data as binary and translate to a bit vector for error calculation
        learning_rate: a scalar (float); learning rate for gradient descent.
        eras: a scalar; the number of eras to train for. Each era continues for the given number of epochs.
        epochs: a scalar; the number of epochs to train for.
        plot_rows: a scalar; the number of rows in each sub-plot in the R plot.
        plot_cols: a scalar; the number of columns in each sub-plot in the R plot.
        title: a string; the main title of the R plot.
        sub: a string; the subtitle of the R plot.
    """
    
    # Set the switch controlling whether test data shoudl be translated to bit vectors for error calculation
    lstm_rnn.binary_data = binary

    # Set logging level:
    lstm_rnn.logging_level = logging 
    
    Eras = []

    # Train for the given number of epochs, repeating for the given number of eras.
    for i in range(0,eras):
        one_era = lstm_rnn.unfold_for_n_epochs(training_data, data_size, test_data, learning_rate, epochs)
        Eras.append(one_era['data'])
    
    pr.print_r_plot_file_eras(rfile, Eras, plot_rows, plot_cols, title, sub, plot_what)
    
    cell = one_era['net']
    for i in range(len(test_data)):
        stimulus = test_data[i]
        activation = cell.state_update(stimulus)
        activation = lstm_rnn.reals_to_bit_vector(activation)
        lstm_rnn.logging("Activation for %s: %s" %(stimulus, activation),-1)