Пример #1
0
    def get_denoising_error(self, dataset, cost, noise, corruption_level):
        """ This function returns the denoising error over the dataset """
        batch_size = 100
        # compute number of minibatches for training, validation and testing
        n_train_batches = get_constant(dataset.shape[0]) / batch_size

        # allocate symbolic variables for the data
        index = T.lscalar()  # index to a [mini]batch

        cost, updates = self.get_cost_updates(
            corruption_level=corruption_level,
            learning_rate=0.,
            noise=noise,
            cost=cost)
        get_error = theano.function(
            [index],
            cost,
            updates={},
            givens={
                self.x: dataset[index * batch_size:(index + 1) * batch_size]
            },
            name='get_error')

        denoising_error = []
        # go through the dataset
        for batch_index in xrange(n_train_batches):
            denoising_error.append(get_error(batch_index))

        return numpy.mean(denoising_error)
Пример #2
0
    def fit(self, dataset, learning_rate, batch_size=20, epochs=50, cost='CE',
        noise='gaussian', corruption_level=0.3, reg=0.):
        """ This function fits the dA to the dataset given
        some hyper-parameters and returns the loss evolution
        and the time spent during training   """


        # compute number of minibatches for training, validation and testing
        n_train_batches = get_constant(dataset.shape[0]) / batch_size

        # allocate symbolic variables for the data
        index = T.lscalar()    # index to a [mini]batch

        cost, updates = self.get_cost_updates(corruption_level = corruption_level,
            learning_rate = learning_rate,
            noise = noise,
            cost = cost,
            reg = reg)
        train_da = theano.function([index],
            cost,
            updates = updates,
            givens = {self.x:dataset[index*batch_size:(index+1)*batch_size]},
            name ='train_da'
            )

        start_time = time.clock()

        ############
        # TRAINING #
        ############
        loss = []
        print '... training model...'
        # go through training epochs


        for epoch in xrange(epochs):
            tic = time.clock()
            # go through trainng set
            c = []
            for batch_index in xrange(int(n_train_batches)):
                c.append(train_da(batch_index))

            toc = time.clock()
            loss.append(numpy.mean(c))
            print 'Training epoch %d, time spent (min) %f,  cost ' \
                %(epoch,(toc-tic)/60.), numpy.mean(c)
            toc = tic

        end_time = time.clock()
        training_time = (end_time - start_time)/60.

        return training_time, loss
Пример #3
0
    def get_denoising_error(self, dataset, cost, noise, corruption_level):
        """ This function returns the denoising error over the dataset """
        batch_size = 100
        # compute number of minibatches for training, validation and testing
        n_train_batches =  get_constant(dataset.shape[0]) / batch_size

        # allocate symbolic variables for the data
        index = T.lscalar()    # index to a [mini]batch

        cost, updates = self.get_cost_updates(corruption_level = corruption_level,
            learning_rate = 0.,
            noise = noise,
            cost = cost)
        get_error = theano.function([index], cost, updates = {}, givens = {
            self.x:dataset[index*batch_size:(index+1)*batch_size]},
            name='get_error')

        denoising_error = []
        # go through the dataset
        for batch_index in xrange(n_train_batches):
            denoising_error.append(get_error(batch_index))

        return numpy.mean(denoising_error)
Пример #4
0
def main_train(dataset, save_dir, n_hidden, tied_weights, act_enc,
    act_dec, learning_rate, batch_size, epochs, cost_type,
    noise_type, corruption_level, reg, normalize_on_the_fly = False, do_pca = False,
    num_components = numpy.inf, min_variance = .0, do_create_submission = False,
    submission_dir = None):
    ''' main function used for training '''
    
    datasets = load_data(dataset, not normalize_on_the_fly, normalize_on_the_fly)
    
    train_set_x = datasets[0]
    valid_set_x = datasets[1]
    test_set_x = datasets[2]

    #################
    # Premier block #
    #################
    from scikits.learn.pca import PCA
    pca = PCA(n_components = 75, whiten = True)
     
    print '... train PCA'
    pca.fit(train_set_x.value)
    print '... explained variance'
    print pca.explained_variance_
    print '... transform valid/test'
    train_r = pca.transform(train_set_x.value)
    valid_r = pca.transform(valid_set_x.value)
    test_r = pca.transform(test_set_x.value)

    train_set_x.value = train_r
    valid_set_x.value = valid_r
    test_set_x.value = test_r

    del PCA, train_r, valid_r, test_r

    ##################
    # Deuxieme Block #
    ##################

    da1 = dA()
    save_dir1 = '/data/lisa/exp/mesnilgr/ift6266h11/GREGAVI2_/55'
    da1.load(save_dir1)

    index = T.lscalar()    # index to a [mini]batch
    x = theano.tensor.matrix('input')
    get_rep_train = theano.function([index], da1.get_hidden_values(x), updates = {},
        givens = {x:train_set_x},
        name = 'get_rep_train')
    get_rep_valid = theano.function([index], da1.get_hidden_values(x), updates = {},
        givens = {x:valid_set_x},
        name = 'get_rep_valid')
    get_rep_test = theano.function([index], da1.get_hidden_values(x), updates = {},
        givens = {x:test_set_x},
        name = 'get_rep_test')

    # valid and test representations
    train_r = get_rep_train(0)
    valid_r = get_rep_valid(0)
    test_r = get_rep_test(0)

    train_set_x.value = train_r
    valid_set_x.value = valid_r
    test_set_x.value = test_r

    del train_r, valid_r, test_r

    d = get_constant(train_set_x.shape[1])
 
    da = dA(n_visible = d, n_hidden = n_hidden,
            tied_weights = tied_weights,
            act_enc = act_enc, act_dec = act_dec)

    time_spent, loss = da.fit(train_set_x, learning_rate, batch_size, epochs,
        cost_type, noise_type, corruption_level, reg)

    if save_dir:
        da.save(save_dir)

    denoising_error = da.get_denoising_error(valid_set_x, cost_type,
        noise_type, corruption_level)
    print 'Training complete in %f (min) with final denoising error %f' \
        %(time_spent,denoising_error)

    if do_pca:
        print "... computing PCA"
        x = theano.tensor.matrix('input')
        get_rep_train = theano.function([], da.get_hidden_values(x), updates = {},
            givens = {x:train_set_x}, name = 'get_rep_valid')
        pca_trainer = pca.PCATrainer(get_rep_train(), num_components = num_components,
            min_variance = min_variance)
        pca_trainer.updates()
        pca_trainer.save(save_dir)

    if do_create_submission:
        print "... creating submission"
        if submission_dir is None:
            submission_dir = save_dir
        create_submission(dataset, save_dir, submission_dir, normalize_on_the_fly, do_pca)

    return denoising_error, time_spent, loss
Пример #5
0
def main_train(dataset,
               save_dir,
               n_hidden,
               tied_weights,
               act_enc,
               act_dec,
               learning_rate,
               batch_size,
               epochs,
               cost_type,
               noise_type,
               corruption_level,
               reg,
               normalize_on_the_fly=False,
               do_pca=False,
               num_components=numpy.inf,
               min_variance=.0,
               do_create_submission=False,
               submission_dir=None):
    ''' main function used for training '''

    datasets = load_data(dataset, not normalize_on_the_fly,
                         normalize_on_the_fly)

    train_set_x = datasets[0]
    valid_set_x = datasets[1]
    test_set_x = datasets[2]

    #################
    # Premier block #
    #################
    from scikits.learn.pca import PCA
    pca = PCA(n_components=75, whiten=True)

    print '... train PCA'
    pca.fit(train_set_x.value)
    print '... explained variance'
    print pca.explained_variance_
    print '... transform valid/test'
    train_r = pca.transform(train_set_x.value)
    valid_r = pca.transform(valid_set_x.value)
    test_r = pca.transform(test_set_x.value)

    train_set_x.value = train_r
    valid_set_x.value = valid_r
    test_set_x.value = test_r

    del PCA, train_r, valid_r, test_r

    ##################
    # Deuxieme Block #
    ##################

    da1 = dA()
    save_dir1 = '/data/lisa/exp/mesnilgr/ift6266h11/GREGAVI2_/55'
    da1.load(save_dir1)

    index = T.lscalar()  # index to a [mini]batch
    x = theano.tensor.matrix('input')
    get_rep_train = theano.function([index],
                                    da1.get_hidden_values(x),
                                    updates={},
                                    givens={x: train_set_x},
                                    name='get_rep_train')
    get_rep_valid = theano.function([index],
                                    da1.get_hidden_values(x),
                                    updates={},
                                    givens={x: valid_set_x},
                                    name='get_rep_valid')
    get_rep_test = theano.function([index],
                                   da1.get_hidden_values(x),
                                   updates={},
                                   givens={x: test_set_x},
                                   name='get_rep_test')

    # valid and test representations
    train_r = get_rep_train(0)
    valid_r = get_rep_valid(0)
    test_r = get_rep_test(0)

    train_set_x.value = train_r
    valid_set_x.value = valid_r
    test_set_x.value = test_r

    del train_r, valid_r, test_r

    d = get_constant(train_set_x.shape[1])

    da = dA(n_visible=d,
            n_hidden=n_hidden,
            tied_weights=tied_weights,
            act_enc=act_enc,
            act_dec=act_dec)

    time_spent, loss = da.fit(train_set_x, learning_rate, batch_size, epochs,
                              cost_type, noise_type, corruption_level, reg)

    if save_dir:
        da.save(save_dir)

    denoising_error = da.get_denoising_error(valid_set_x, cost_type,
                                             noise_type, corruption_level)
    print 'Training complete in %f (min) with final denoising error %f' \
        %(time_spent,denoising_error)

    if do_pca:
        print "... computing PCA"
        x = theano.tensor.matrix('input')
        get_rep_train = theano.function([],
                                        da.get_hidden_values(x),
                                        updates={},
                                        givens={x: train_set_x},
                                        name='get_rep_valid')
        pca_trainer = pca.PCATrainer(get_rep_train(),
                                     num_components=num_components,
                                     min_variance=min_variance)
        pca_trainer.updates()
        pca_trainer.save(save_dir)

    if do_create_submission:
        print "... creating submission"
        if submission_dir is None:
            submission_dir = save_dir
        create_submission(dataset, save_dir, submission_dir,
                          normalize_on_the_fly, do_pca)

    return denoising_error, time_spent, loss
Пример #6
0
    def fit(self,
            dataset,
            learning_rate,
            batch_size=20,
            epochs=50,
            cost='CE',
            noise='gaussian',
            corruption_level=0.3,
            reg=0.):
        """ This function fits the dA to the dataset given
        some hyper-parameters and returns the loss evolution
        and the time spent during training   """

        # compute number of minibatches for training, validation and testing
        n_train_batches = get_constant(dataset.shape[0]) / batch_size

        # allocate symbolic variables for the data
        index = T.lscalar()  # index to a [mini]batch

        cost, updates = self.get_cost_updates(
            corruption_level=corruption_level,
            learning_rate=learning_rate,
            noise=noise,
            cost=cost,
            reg=reg)
        train_da = theano.function([index],
                                   cost,
                                   updates=updates,
                                   givens={
                                       self.x:
                                       dataset[index * batch_size:(index + 1) *
                                               batch_size]
                                   },
                                   name='train_da')

        start_time = time.clock()

        ############
        # TRAINING #
        ############
        loss = []
        print '... training model...'
        # go through training epochs

        for epoch in xrange(epochs):
            tic = time.clock()
            # go through trainng set
            c = []
            for batch_index in xrange(int(n_train_batches)):
                c.append(train_da(batch_index))

            toc = time.clock()
            loss.append(numpy.mean(c))
            print 'Training epoch %d, time spent (min) %f,  cost ' \
                %(epoch,(toc-tic)/60.), numpy.mean(c)
            toc = tic

        end_time = time.clock()
        training_time = (end_time - start_time) / 60.

        return training_time, loss