コード例 #1
0
def run_sda(datasets=None,
            batch_size=100,
            window_size=7,
            n_principle=4,
            pretraining_epochs=2000,
            pretrain_lr=0.02,
            training_epochs=10000,
            finetune_lr=0.008,
            hidden_layers_sizes=[310, 100],
            corruption_levels=[0., 0.]):
    """
    This function maps spatial PCs to a deep representation.
    
    Parameters:
    datasets:           A list containing 3 tuples. Each tuple have 2 entries, 
                        which are theano.shared variables. They stands for train,
                        valid, test data.
    batch_size:         Batch size.
    pretraining_epochs: Pretraining epoches.
    pretrain_lr:        Pretraining learning rate.
    training_epochs:    Fine-tuning epoches.
    finetune_lr:        Fine-tuning learning rate.
    hidden_layers_sizes:A list containing integers. Each intger specifies a size
                        of a hidden layer.
    corruption_levels:  A list containing floats in the inteval [0, 1]. Each 
                        number specifies the corruption level of its corresponding
                        hidden layer.

    Return:
    spatial_rep:        2-D numpy.array. Deep representation for each spatial sample.
    test_score:         Accuracy this representations yield on the trained SdA.
    """

    print 'finetuning learning rate=', finetune_lr
    print 'pretraining learning rate=', pretrain_lr
    print 'pretraining epoches=', pretraining_epochs
    print 'fine tuning epoches=', training_epochs
    print 'batch size=', batch_size
    print 'hidden layers sizes=', hidden_layers_sizes
    print 'corruption levels=', corruption_levels

    # compute number of minibatches for training, validation and testing
    n_train_batches = datasets[0][0].get_value(borrow=True).shape[0]
    n_train_batches /= batch_size

    # numpy random generator
    numpy_rng = numpy.random.RandomState(89677)
    print '... building the model'
    # construct the stacked denoising autoencoder class
    sda = SdA(numpy_rng=numpy_rng,
              n_ins=datasets[0][0].get_value(borrow=True).shape[1],
              hidden_layers_sizes=hidden_layers_sizes,
              n_outs=gnd_img.max())

    ################################################################################
    # PRETRAINING THE MODEL #
    #########################

    print '... getting the pretraining functions'
    pretraining_fns = sda.pretraining_functions(train_set_x=datasets[0][0],
                                                batch_size=batch_size)

    print '... pre-training the model'
    start_time = time.clock()
    ## Pre-train layer-wise
    for i in xrange(sda.n_layers):
        # go through pretraining epochs
        for epoch in xrange(pretraining_epochs):
            # go through the training set
            c = []
            for batch_index in xrange(n_train_batches):
                c.append(pretraining_fns[i](index=batch_index,
                                            corruption=corruption_levels[i],
                                            lr=pretrain_lr))

            if epoch % 100 == 0:
                print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
                print numpy.mean(c)

    end_time = time.clock()

    print >> sys.stderr, ('The pretraining code for file ' +
                          os.path.split(__file__)[1] + ' ran for %.2fm' %
                          ((end_time - start_time) / 60.))

    ################################################################################
    # FINETUNING THE MODEL #
    ########################

    # get the training, validation and testing function for the model
    print '... getting the finetuning functions'
    train_fn, validate_model, test_model = sda.build_finetune_functions(
        datasets=datasets, batch_size=batch_size, learning_rate=finetune_lr)

    print '... finetunning the model'
    # early-stopping parameters
    patience = 100 * n_train_batches  # look as this many examples regardless
    patience_increase = 2.  # wait this much longer when a new best is
    # found
    improvement_threshold = 0.995  # a relative improvement of this much is
    # considered significant
    validation_frequency = min(10 * n_train_batches, patience / 2)
    # go through this many
    # minibatche before checking the network
    # on the validation set; in this case we
    # check every epoch

    best_params = None
    best_validation_loss = numpy.inf
    test_score = 0.
    start_time = time.clock()

    done_looping = False
    epoch = 0

    while (epoch < training_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):
            minibatch_avg_cost = train_fn(minibatch_index)
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                validation_losses = validate_model()
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:
                    # improve patience if loss improvement is good enough
                    if (this_validation_loss <
                            best_validation_loss * improvement_threshold):
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = test_model()
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

    end_time = time.clock()
    print(('Optimization complete with best validation score of %f %%,'
           'with test performance %f %%') %
          (best_validation_loss * 100., test_score * 100.))
    print >> sys.stdout, ('The training code for file ' +
                          os.path.split(__file__)[1] + ' ran for %.2fm' %
                          ((end_time - start_time) / 60.))

    # keep the following line consistent with line 227, function "prepare_data"
    filename = 'ksc_l1sda_pt%d_ft%d_lrp%.4f_f%.4f_bs%d_pca%d_ws%d' % \
                (pretraining_epochs, training_epochs, pretrain_lr, finetune_lr,
                 batch_size, n_principle, window_size)

    print '... classifying test set with learnt model:'
    pred_func = theano.function(inputs=[sda.x], outputs=sda.logLayer.y_pred)
    pred_test = pred_func(datasets[2][0].get_value(borrow=True))
    true_test = datasets[2][1].get_value(borrow=True)
    true_valid = datasets[1][1].get_value(borrow=True)
    true_train = datasets[0][1].get_value(borrow=True)
    result_analysis(pred_test, true_train, true_valid, true_test)

    print '... classifying the whole image with learnt model:'
    print '...... extracting data'
    data_spectral, data_spatial, _, _ = \
        T_pca_constructor(hsi_img=img, gnd_img=gnd_img, n_principle=n_principle,
                          window_size=window_size, flag='unsupervised',
                          merge=True)

    start_time = time.clock()
    print '...... begin '
    y = pred_func(data_spectral) + 1
    print '...... done '
    end_time = time.clock()
    print 'finished, running time:%fs' % (end_time - start_time)

    y_rgb = cmap[y, :]
    margin = (window_size / 2) * 2  # floor it to a multiple of 2
    y_image = y_rgb.reshape(width - margin, height - margin, 3)
    scipy.misc.imsave(filename + 'wholeimg.png', y_image)
    print 'Saving classification results'
    sio.savemat(filename + 'wholeimg.mat',
                {'y': y.reshape(width - margin, height - margin)})

    ############################################################################
    print '... performing Student\'s t-test'
    best_c = 10000.
    best_g = 10.
    svm_classifier = svm.SVC(C=best_c, gamma=best_g, kernel='rbf')
    svm_classifier.fit(datasets[0][0].get_value(), datasets[0][1].get_value())

    data = [
        numpy.vstack((datasets[1][0].get_value(), datasets[2][0].get_value())),
        numpy.hstack((datasets[1][1].get_value(), datasets[2][1].get_value()))
    ]
    numpy_rng = numpy.random.RandomState(89677)
    num_test = 100
    print 'Total number of tests: %d' % num_test
    k_sae = []
    k_svm = []
    for i in xrange(num_test):
        [_, _], [_, _], [test_x, test_y], _ = \
        train_valid_test(data, ratio=[0, 1, 1], batch_size=1,
                         random_state=numpy_rng.random_integers(1e10))
        test_y = test_y + 1  # fix the label scale problem
        pred_y = pred_func(test_x)
        cm = confusion_matrix(test_y, pred_y)
        pr_a = cm.trace() * 1.0 / test_y.size
        pr_e = ((cm.sum(axis=0)*1.0/test_y.size) * \
                (cm.sum(axis=1)*1.0/test_y.size)).sum()
        k_sae.append((pr_a - pr_e) / (1 - pr_e))

        pred_y = svm_classifier.predict(test_x)
        cm = confusion_matrix(test_y, pred_y)
        pr_a = cm.trace() * 1.0 / test_y.size
        pr_e = ((cm.sum(axis=0)*1.0/test_y.size) * \
                (cm.sum(axis=1)*1.0/test_y.size)).sum()
        k_svm.append((pr_a - pr_e) / (1 - pr_e))

    std_k_sae = numpy.std(k_sae)
    std_k_svm = numpy.std(k_svm)
    mean_k_sae = numpy.mean(k_sae)
    mean_k_svm = numpy.mean(k_svm)
    left =    ( (mean_k_sae - mean_k_svm) * numpy.sqrt(num_test*2-2)) \
            / ( numpy.sqrt(2./num_test) * num_test * (std_k_sae**2 + std_k_svm**2) )

    rv = t(num_test * 2.0 - 2)
    right = rv.ppf(0.95)

    print '\tstd\t\tmean'
    print 'k_sae\t%f\t%f' % (std_k_sae, mean_k_sae)
    print 'k_svm\t%f\t%f' % (std_k_svm, mean_k_svm)
    if left > right:
        print 'left = %f, right = %f, test PASSED.' % (left, right)
    else:
        print 'left = %f, right = %f, test FAILED.' % (left, right)

    return test_score
コード例 #2
0
        print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
        print numpy.mean(c)

end_time = time.clock()

print('The pretraining code for file ' + os.path.split(__file__)[1] +
      ' ran for %.2fm' % ((end_time - start_time) / 60.))

################################################################################
# FINETUNING THE MODEL #
########################

# get the training, validation and testing function for the model
print '... getting the finetuning functions'
train_fn, validate_model, test_model = sda.build_finetune_functions(
    datasets=datasets, batch_size=batch_size, learning_rate=finetune_lr)

print '... finetunning the model'
validation_frequency = 1000 * n_train_batches
# go through this many
# minibatche before checking the network
# on the validation set; in this case we
# check every epoch

best_params = None
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()

epoch = 0
while (epoch < training_epochs):
コード例 #3
0
def run_sda(datasets=None, batch_size=100,
            window_size=7, n_principle=3,
            pretraining_epochs=2000, pretrain_lr=0.02,
            training_epochs=10000,  finetune_lr=0.008, 
            hidden_layers_sizes=[310, 100], corruption_levels = [0., 0.]):
    """
    This function maps spatial PCs to a deep representation.
    
    Parameters:
    datasets:           A list containing 3 tuples. Each tuple have 2 entries, 
                        which are theano.shared variables. They stands for train,
                        valid, test data.
    batch_size:         Batch size.
    pretraining_epochs: Pretraining epoches.
    pretrain_lr:        Pretraining learning rate.
    training_epochs:    Fine-tuning epoches.
    finetune_lr:        Fine-tuning learning rate.
    hidden_layers_sizes:A list containing integers. Each intger specifies a size
                        of a hidden layer.
    corruption_levels:  A list containing floats in the inteval [0, 1]. Each 
                        number specifies the corruption level of its corresponding
                        hidden layer.

    Return:
    spatial_rep:        2-D numpy.array. Deep representation for each spatial sample.
    test_score:         Accuracy this representations yield on the trained SdA.
    """
    
    print 'finetuning learning rate=', finetune_lr
    print 'pretraining learning rate=', pretrain_lr
    print 'pretraining epoches=', pretraining_epochs
    print 'fine tuning epoches=', training_epochs
    print 'batch size=', batch_size
    print 'hidden layers sizes=', hidden_layers_sizes
    print 'corruption levels=', corruption_levels

    # compute number of minibatches for training, validation and testing
    n_train_batches = datasets[0][0].get_value(borrow=True).shape[0]
    n_train_batches /= batch_size

    # numpy random generator
    numpy_rng = numpy.random.RandomState(89677)
    print '... building the model'
    # construct the stacked denoising autoencoder class
    sda = SdA(numpy_rng=numpy_rng, n_ins=datasets[0][0].get_value(borrow=True).shape[1],
              hidden_layers_sizes=hidden_layers_sizes,
              n_outs=gnd_img.max())

    ################################################################################
                               # PRETRAINING THE MODEL #
                               #########################

    print '... getting the pretraining functions'
    pretraining_fns = sda.pretraining_functions(train_set_x=datasets[0][0],
                                                batch_size=batch_size)

    print '... pre-training the model'
    start_time = time.clock()
    ## Pre-train layer-wise
    for i in xrange(sda.n_layers):
        # go through pretraining epochs
        for epoch in xrange(pretraining_epochs):
            # go through the training set
            c = []
            for batch_index in xrange(n_train_batches):
                c.append(pretraining_fns[i](index=batch_index,
                         corruption=corruption_levels[i],
                         lr=pretrain_lr))
            
            if epoch % 100 == 0:
                print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
                print numpy.mean(c)

    end_time = time.clock()

    print >> sys.stderr, ('The pretraining code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))

    ################################################################################
                                # FINETUNING THE MODEL #
                                ########################

    # get the training, validation and testing function for the model
    print '... getting the finetuning functions'
    train_fn, validate_model, test_model = sda.build_finetune_functions(
        datasets=datasets, batch_size=batch_size,
        learning_rate=finetune_lr)

    print '... finetunning the model'
    # early-stopping parameters
    patience = 100 * n_train_batches  # look as this many examples regardless
    patience_increase = 2.  # wait this much longer when a new best is
                            # found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    validation_frequency = min(10 * n_train_batches, patience / 2)
                            # go through this many
                            # minibatche before checking the network
                            # on the validation set; in this case we
                            # check every epoch

    best_params = None
    best_validation_loss = numpy.inf
    test_score = 0.
    start_time = time.clock()

    done_looping = False
    epoch = 0

    while (epoch < training_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):
            minibatch_avg_cost = train_fn(minibatch_index)
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                validation_losses = validate_model()
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:
                    # improve patience if loss improvement is good enough
                    if (this_validation_loss < best_validation_loss *
                        improvement_threshold):
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = test_model()
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

    end_time = time.clock()
    print(('Optimization complete with best validation score of %f %%,'
           'with test performance %f %%') %
                (best_validation_loss * 100., test_score * 100.))
    print >> sys.stdout, ('The training code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))

    # keep the following line consistent with line 227, function "prepare_data"
    filename = 'pavia_l1sda_pt%d_ft%d_lrp%.4f_f%.4f_bs%d_pca%d_ws%d' % \
                (pretraining_epochs, training_epochs, pretrain_lr, finetune_lr, 
                 batch_size, n_principle, window_size) 

    print '... saving parameters'
    sda.save_params(filename + '_params.pkl')

    print '... classifying test set with learnt model:'
    pred_func = theano.function(inputs=[sda.x], outputs=sda.logLayer.y_pred)
    pred_test = pred_func(datasets[2][0].get_value(borrow=True))
    true_test = datasets[2][1].get_value(borrow=True)
    true_valid = datasets[1][1].get_value(borrow=True)
    true_train = datasets[0][1].get_value(borrow=True)
    result_analysis(pred_test, true_train, true_valid, true_test)

    print '... classifying the whole image with learnt model:'
    print '...... extracting data'
    data_spectral, data_spatial, _, _ = \
        T_pca_constructor(hsi_img=img, gnd_img=gnd_img, n_principle=n_principle, 
                          window_size=window_size, flag='unsupervised', 
                          merge=True)
    
    start_time = time.clock()
    print '...... begin '
    y = pred_func(data_spectral) + 1
    print '...... done '
    end_time = time.clock()
    print 'finished, running time:%fs' % (end_time - start_time)

    y_rgb = cmap[y, :]
    margin = (window_size / 2) * 2  # floor it to a multiple of 2
    y_image = y_rgb.reshape(width - margin, height - margin, 3)
    scipy.misc.imsave(filename + 'wholeimg.png' , y_image)
    print 'Saving classification results'
    sio.savemat(filename + 'wholeimg.mat', 
                {'y': y.reshape(width - margin, height - margin)})
    
    ############################################################################
    print '... performing Student\'s t-test'
    best_c = 10000.
    best_g = 10.
    svm_classifier = svm.SVC(C=best_c, gamma=best_g, kernel='rbf')
    svm_classifier.fit(datasets[0][0].get_value(), datasets[0][1].get_value())

    data = [numpy.vstack((datasets[1][0].get_value(),
                          datasets[2][0].get_value())),
            numpy.hstack((datasets[1][1].get_value(),
                          datasets[2][1].get_value()))]
    numpy_rng = numpy.random.RandomState(89677)
    num_test = 100
    print 'Total number of tests: %d' % num_test
    k_sae = []
    k_svm = []
    for i in xrange(num_test):
        [_, _], [_, _], [test_x, test_y], _ = \
        train_valid_test(data, ratio=[0, 1, 1], batch_size=1, 
                         random_state=numpy_rng.random_integers(1e10))
        test_y = test_y + 1 # fix the label scale problem
        pred_y = pred_func(test_x)
        cm = confusion_matrix(test_y, pred_y)
        pr_a = cm.trace()*1.0 / test_y.size
        pr_e = ((cm.sum(axis=0)*1.0/test_y.size) * \
                (cm.sum(axis=1)*1.0/test_y.size)).sum()
        k_sae.append( (pr_a - pr_e) / (1 - pr_e) )

        pred_y = svm_classifier.predict(test_x)
        cm = confusion_matrix(test_y, pred_y)
        pr_a = cm.trace()*1.0 / test_y.size
        pr_e = ((cm.sum(axis=0)*1.0/test_y.size) * \
                (cm.sum(axis=1)*1.0/test_y.size)).sum()
        k_svm.append( (pr_a - pr_e) / (1 - pr_e) )

    std_k_sae = numpy.std(k_sae)
    std_k_svm = numpy.std(k_svm)
    mean_k_sae = numpy.mean(k_sae)
    mean_k_svm = numpy.mean(k_svm)
    left =    ( (mean_k_sae - mean_k_svm) * numpy.sqrt(num_test*2-2)) \
            / ( numpy.sqrt(2./num_test) * num_test * (std_k_sae**2 + std_k_svm**2) )

    rv = t(num_test*2.0 - 2)
    right = rv.ppf(0.95)

    print '\tstd\t\tmean'
    print 'k_sae\t%f\t%f' % (std_k_sae, mean_k_sae)
    print 'k_svm\t%f\t%f' % (std_k_svm, mean_k_svm)
    if left > right:
        print 'left = %f, right = %f, test PASSED.' % (left, right)
    else:
        print 'left = %f, right = %f, test FAILED.' % (left, right)
    
    
    return test_score
コード例 #4
0
ファイル: pavia_SdA.py プロジェクト: hantek/deeplearn_hsi
        print numpy.mean(c)

end_time = time.clock()

print >> sys.stderr, ('The pretraining code for file ' +
                      os.path.split(__file__)[1] +
                      ' ran for %.2fm' % ((end_time - start_time) / 60.))

                            ########################
                            # FINETUNING THE MODEL #
                            ########################

# get the training, validation and testing function for the model
print '... getting the finetuning functions'
train_fn, validate_model, test_model = sda.build_finetune_functions(
    datasets=datasets, batch_size=batch_size,
    learning_rate=finetune_lr)

print '... finetunning the model'
validation_frequency = 1000 * n_train_batches
                              # go through this many
                              # minibatche before checking the network
                              # on the validation set; in this case we
                              # check every epoch

best_params = None
best_validation_loss = numpy.inf
test_score = 0.
start_time = time.clock()

epoch = 0
コード例 #5
0
ファイル: experiment.py プロジェクト: al13mi/traffic
def main():
    # setup output directory
    d = datetime.datetime.today()
    output_folder = "out/{}-{}-{}_{}:{}:{}".format(d.year, d.month, d.day,
                                                   d.hour, d.minute, d.second)
    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)
    os.chdir(output_folder)

    # load dataset
    datasets = load_data()

    train_set_x, train_set_y = util.shared_dataset(datasets[0])
    valid_set_x, valid_set_y = util.shared_dataset(datasets[1])
    test_set_x, test_set_y = util.shared_dataset(datasets[2])

    train_set = (train_set_x, train_set_y)
    valid_set = (valid_set_x, valid_set_y)
    test_set = (test_set_x, test_set_y)

    n_input = train_set_x.get_value(borrow=True).shape[1]
    n_output = train_set_y.get_value(borrow=True).shape[1]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size

    # numpy random generator
    # start-snippet-3
    numpy_rng = numpy.random.RandomState(89677)
    print '... building the model'
    # construct the stacked denoising autoencoder class
    sda = SdA(numpy_rng=numpy_rng,
              n_ins=n_input,
              hidden_layers_sizes=[1000, 1000, 1000],
              n_outs=n_output)

    predict_fn = sda.build_predict_function()

    #########################
    # PRETRAINING THE MODEL #
    #########################
    print '... getting the pretraining functions'
    pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x,
                                                batch_size=batch_size)

    print '... pre-training the model'
    start_time = time.clock()
    ## Pre-train layer-wise
    corruption_levels = [.1, .2, .3]
    for i in xrange(sda.n_layers):
        # go through pretraining epochs
        for epoch in xrange(pretraining_epochs):
            # go through the training set
            c = []
            for batch_index in xrange(n_train_batches):
                c.append(pretraining_fns[i](index=batch_index,
                                            corruption=corruption_levels[i],
                                            lr=pretrain_lr))
            print("Pre-training layer {}, epoch {}, cost ".format(i, epoch)),
            print("{}".format(numpy.mean(c)))

    end_time = time.clock()

    print >> sys.stderr, ('The pretraining code for file ' +
                          os.path.split(__file__)[1] + ' ran for %.2fm' %
                          ((end_time - start_time) / 60.))

    ########################
    # FINETUNING THE MODEL #
    ########################

    # get the training, validation and testing function for the model
    print '... getting the finetuning functions'
    train_fn, validate_model, test_model = sda.build_finetune_functions(
        datasets=(train_set, valid_set, test_set),
        batch_size=batch_size,
        learning_rate=finetune_lr)

    print '... finetunning the model'
    # early-stopping parameters
    patience = 10 * n_train_batches  # look as this many examples regardless
    patience_increase = 2.  # wait this much longer when a new best is
    # found
    improvement_threshold = 0.995  # a relative improvement of this much is
    # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
    # go through this many
    # minibatche before checking the network
    # on the validation set; in this case we
    # check every epoch

    best_validation_loss = numpy.inf
    test_score = 0.
    start_time = time.clock()

    done_looping = False
    epoch = 0

    while (epoch < training_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):
            minibatch_avg_cost = train_fn(minibatch_index)
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                validation_losses = validate_model()
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:

                    #improve patience if loss improvement is good enough
                    if (this_validation_loss <
                            best_validation_loss * improvement_threshold):
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = test_model()
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = time.clock()
    print(('Optimization complete with best validation score of %f %%, '
           'on iteration %i, '
           'with test performance %f %%') %
          (best_validation_loss * 100., best_iter + 1, test_score * 100.))
    print >> sys.stderr, ('The training code for file ' +
                          os.path.split(__file__)[1] + ' ran for %.2fm' %
                          ((end_time - start_time) / 60.))

    ###########
    # PREDICT #
    ###########
    y_pred = predict_fn(test_set_x.get_value(borrow=True))
    mae, mre = util.calculate_error_indexes(test_set_y, y_pred)
    print("-*-*RESULT*-*-")
    print("mae={}".format(mae))
    print("mre={}".format(mre))

    # plot
    for i in xrange(n_output):
        filename = "{}.png".format(str(i))
        plot.savefig(filename, test_set_x, y_pred, indexes=[i])
コード例 #6
0
ファイル: experiment.py プロジェクト: tglovernuppy/traffic
def main():
    # setup output directory
    d = datetime.datetime.today()
    output_folder = "out/{}-{}-{}_{}:{}:{}".format(d.year, d.month, d.day, d.hour, d.minute, d.second)
    if not os.path.isdir(output_folder):
        os.makedirs(output_folder)
    os.chdir(output_folder)

    # load dataset
    datasets = load_data()

    train_set_x, train_set_y = util.shared_dataset(datasets[0])
    valid_set_x, valid_set_y = util.shared_dataset(datasets[1])
    test_set_x, test_set_y = util.shared_dataset(datasets[2])

    train_set = (train_set_x, train_set_y)
    valid_set = (valid_set_x, valid_set_y)
    test_set = (test_set_x, test_set_y)

    n_input = train_set_x.get_value(borrow=True).shape[1]
    n_output = train_set_y.get_value(borrow=True).shape[1]

    # compute number of minibatches for training, validation and testing
    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size

    # numpy random generator
    # start-snippet-3
    numpy_rng = numpy.random.RandomState(89677)
    print '... building the model'
    # construct the stacked denoising autoencoder class
    sda = SdA(
        numpy_rng=numpy_rng,
        n_ins=n_input,
        hidden_layers_sizes=[1000, 1000, 1000],
        n_outs=n_output
    )

    predict_fn = sda.build_predict_function()

    #########################
    # PRETRAINING THE MODEL #
    #########################
    print '... getting the pretraining functions'
    pretraining_fns = sda.pretraining_functions(train_set_x=train_set_x, batch_size=batch_size)

    print '... pre-training the model'
    start_time = time.clock()
    ## Pre-train layer-wise
    corruption_levels = [.1, .2, .3]
    for i in xrange(sda.n_layers):
        # go through pretraining epochs
        for epoch in xrange(pretraining_epochs):
            # go through the training set
            c = []
            for batch_index in xrange(n_train_batches):
                c.append(pretraining_fns[i](index=batch_index,
                                            corruption=corruption_levels[i],
                                            lr=pretrain_lr))
            print("Pre-training layer {}, epoch {}, cost ".format(i, epoch)),
            print("{}".format(numpy.mean(c)))

    end_time = time.clock()

    print >> sys.stderr, ('The pretraining code for file ' + os.path.split(__file__)[1] + ' ran for %.2fm' % ((end_time - start_time) / 60.))


    ########################
    # FINETUNING THE MODEL #
    ########################

    # get the training, validation and testing function for the model
    print '... getting the finetuning functions'
    train_fn, validate_model, test_model = sda.build_finetune_functions(
        datasets=(train_set, valid_set, test_set),
        batch_size=batch_size,
        learning_rate=finetune_lr
    )

    print '... finetunning the model'
    # early-stopping parameters
    patience = 10 * n_train_batches  # look as this many examples regardless
    patience_increase = 2.  # wait this much longer when a new best is
    # found
    improvement_threshold = 0.995  # a relative improvement of this much is
    # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
    # go through this many
    # minibatche before checking the network
    # on the validation set; in this case we
    # check every epoch

    best_validation_loss = numpy.inf
    test_score = 0.
    start_time = time.clock()

    done_looping = False
    epoch = 0

    while (epoch < training_epochs) and (not done_looping):
        epoch = epoch + 1
        for minibatch_index in xrange(n_train_batches):
            minibatch_avg_cost = train_fn(minibatch_index)
            iter = (epoch - 1) * n_train_batches + minibatch_index

            if (iter + 1) % validation_frequency == 0:
                validation_losses = validate_model()
                this_validation_loss = numpy.mean(validation_losses)
                print('epoch %i, minibatch %i/%i, validation error %f %%' %
                      (epoch, minibatch_index + 1, n_train_batches,
                       this_validation_loss * 100.))

                # if we got the best validation score until now
                if this_validation_loss < best_validation_loss:

                    #improve patience if loss improvement is good enough
                    if (
                                this_validation_loss < best_validation_loss *
                                improvement_threshold
                    ):
                        patience = max(patience, iter * patience_increase)

                    # save best validation score and iteration number
                    best_validation_loss = this_validation_loss
                    best_iter = iter

                    # test it on the test set
                    test_losses = test_model()
                    test_score = numpy.mean(test_losses)
                    print(('     epoch %i, minibatch %i/%i, test error of '
                           'best model %f %%') %
                          (epoch, minibatch_index + 1, n_train_batches,
                           test_score * 100.))

            if patience <= iter:
                done_looping = True
                break

    end_time = time.clock()
    print(
        (
            'Optimization complete with best validation score of %f %%, '
            'on iteration %i, '
            'with test performance %f %%'
        )
        % (best_validation_loss * 100., best_iter + 1, test_score * 100.)
    )
    print >> sys.stderr, ('The training code for file ' +
                          os.path.split(__file__)[1] +
                          ' ran for %.2fm' % ((end_time - start_time) / 60.))

    ###########
    # PREDICT #
    ###########
    y_pred = predict_fn(test_set_x.get_value(borrow=True))
    mae, mre = util.calculate_error_indexes(test_set_y, y_pred)
    print("-*-*RESULT*-*-")
    print("mae={}".format(mae))
    print("mre={}".format(mre))

    # plot
    for i in xrange(n_output):
        filename = "{}.png".format(str(i))
        plot.savefig(filename, test_set_x, y_pred, indexes=[i])
コード例 #7
0
ファイル: Higgs.py プロジェクト: brockmoir/Projects
        print >> sys.stderr, ('The pretraining code for file ' +
                              os.path.split(__file__)[1] +
                              ' ran for %.2fm' % ((end_time - start_time) / 60.))

        if dump_pretrain: 
            f = file(pre_name, 'wb')
            sda.save(f)
            f.close()

    ########################
    # FINETUNING THE MODEL #
    ########################

    # get the training, validation and testing function for the model
    print '... getting the finetuning functions'
    train_fn, validate_model = sda.build_finetune_functions(
                datasets=[[theano.shared(X_train), theano.shared(y_train)], [theano.shared(X_test), theano.shared(y_test)]], batch_size=batch_size)#, learning_rate=finetune_lr)

    print '... finetunning the model'
    # early-stopping parameters
    patience = 10 * n_train_batches  # look as this many examples regardless
    patience_increase = 2.  # wait this much longer when a new best is
                            # found
    improvement_threshold = 0.995  # a relative improvement of this much is
                                   # considered significant
    validation_frequency = min(n_train_batches, patience / 2)
                                  # go through this many
                                  # minibatche before checking the network
                                  # on the validation set; in this case we
                                  # check every epoch

    best_params = None
コード例 #8
0
ファイル: runner.py プロジェクト: hallvardnydal/autoencoder
def test_SdA(sample_size         = 60,
             finetune_lr         = 0.01, 
             pretraining_epochs  = 20,
             pretrain_lr         = 0.01, 
             training_epochs     = 100, 
             batch_size          = 30,
             corruption_levels   = [0.2],
             hidden_layers_sizes = [2000],
             img_size = (1020,1020),
             img_size_test = (600,1020)):
    
    process = Process()
    img_input,img_labels = process.read_in_images(["train-input"],["train-labels"])
    
    img_input = process.normalize(img_input)  
    #img_input = process.apply_clahe(img_input)
    #img_input = process.local_normalization(img_input)  
    
    img_input = img_input[:,:img_size[0],:img_size[1]]
    
    train_set  = img_input
    valid_set  = img_input[:1]
    test_set   = img_input[:1,:img_size_test[0],:img_size_test[1]]
    
    train_set_x,train_set_y = process.manipulate(train_set),train_set
    valid_set_x,valid_set_y = process.manipulate(valid_set),valid_set
    test_set_x,test_set_y   = process.manipulate(test_set),test_set
    
    train_set_x, table   =process.generate_set(train_set_x, sample_size = sample_size, stride = sample_size, img_size = img_size)
    valid_set_x, table   =process.generate_set(valid_set_x, sample_size = sample_size, stride = sample_size, img_size = img_size)
    test_set_x, table    =process.generate_set(test_set_x, sample_size = sample_size, stride = sample_size, img_size = img_size_test)
    train_set_y, table   =process.generate_set(train_set_y, sample_size = sample_size, stride = sample_size, img_size = img_size)
    valid_set_y, table   =process.generate_set(valid_set_y, sample_size = sample_size, stride = sample_size, img_size = img_size)
    test_set_y, table    =process.generate_set(test_set_y, sample_size = sample_size, stride = sample_size, img_size = img_size_test)
    
    train_set_x,train_set_y = train_set_x.astype(np.float32),train_set_y.astype(np.float32)
    valid_set_x,valid_set_y = valid_set_x.astype(np.float32),valid_set_y.astype(np.float32)
    test_set_x,test_set_y   = test_set_x.astype(np.float32), test_set_y.astype(np.float32)
    
    train_set_x, train_set_y = theano.shared(train_set_x,borrow=True),theano.shared(train_set_y,borrow=True)
    valid_set_x, valid_set_y = theano.shared(valid_set_x,borrow=True),theano.shared(valid_set_y,borrow=True)
    test_set_x, test_set_y   = theano.shared(test_set_x,borrow=True),theano.shared(test_set_y,borrow=True)

    n_train_batches = train_set_x.get_value(borrow=True).shape[0]
    n_train_batches /= batch_size

    np_rng = np.random.RandomState()
    print '... building the model'

    sda = SdA(
        numpy_rng = np_rng,
        n_ins     = sample_size**2,
        n_outs    = sample_size**2,
        hidden_layers_sizes = hidden_layers_sizes
    )

    print '... Initializing pretraining functions'
    pretraining_fns, output_fn = sda.pretraining_functions(train_set_x=train_set_x,
                                                batch_size=batch_size)
    
    print '... Layer-wise training of model'
    start_time = time.clock()

    for i in xrange(sda.n_layers):
        for epoch in xrange(pretraining_epochs):
            c = []
            for batch_index in xrange(n_train_batches):
                c.append(pretraining_fns[i](index      = batch_index,
                                            corruption = corruption_levels[i],
                                            lr         = pretrain_lr))
            print 'Layer %i, epoch %d, cost ' % (i, epoch),
            print np.mean(c)

    end_time = time.clock()

    print >> sys.stderr, ('Layer-wise training ran for %.2fm' % ((end_time - start_time) / 60.))
    
    ########################
    # FINETUNING THE MODEL #
    ########################

    
    datasets = [(train_set_x,train_set_y),(valid_set_x,valid_set_y),(test_set_x,test_set_y)]
    # get the training, validation and testing function for the model
    print '... getting the finetuning functions'
    train_fn, validate_model, test_model,output_fn = sda.build_finetune_functions(
        datasets=datasets,
        batch_size=batch_size,
        learning_rate=finetune_lr
    )
    
    print '... finetuning of model'
    
    for n in xrange(training_epochs):
        costs = []
        for i in xrange(n_train_batches):
            costs.append(train_fn(i))
        
        cost = np.mean(costs)    
        #val_cost = validate_model()
        
        print "Epoch:",n,"Cost:",cost #,"Validation cost:",val_cost
    
    print "Test cost:", test_model()
     
    out = np.zeros((0,sample_size**2))
    for batch_index in xrange(train_set_x.get_value().shape[0]):
        out = np.vstack((out,output_fn(batch_index)))
        
    img_output = process.post_process(out, table, sample_size,img_shape=img_size_test)
        
    plt.figure()
    plt.imshow(test_set[0],cmap=plt.cm.gray)
    plt.figure()
    plt.imshow(img_output[0],cmap=plt.cm.gray)
    
    xz = process.xz_stack(img_input)
    
    for m in xrange(xz.shape[0]):
        for n in xrange(xz.shape[1]):
            xz[m,n] = (xz[m,n]-xz[m,n].mean())/xz[m,n].std()
    
    xz_train, table    =process.generate_set(xz, sample_size = sample_size, stride = sample_size, img_size = img_size_test)
    xz_train = xz_train.astype(np.float32)
    test_set_x.set_value(xz_train)
    
    out = np.zeros((0,sample_size**2))
    for batch_index in xrange(train_set_x.get_value().shape[0]):
        out = np.vstack((out,output_fn(batch_index)))
        
    img_output = process.post_process(out, table, sample_size,img_shape=img_size_test)
    
    plt.figure()
    plt.imshow(xz[0],cmap=plt.cm.gray)
    plt.figure()
    plt.imshow(img_output[0],cmap=plt.cm.gray)
    
    plt.show()