batchsize=batchsize, learningrate=finetune_lr, momentum=momentum,
    rng=npy_rng
)

init_lr = trainer.learningrate
prev_cost = numpy.inf
for epoch in xrange(finetune_epc):
    i = 0
    cost = 0.
    for ipart in train_dg:
        print "part %d " % i,
        noise_mask = numpy.tile(npy_rng.binomial(1, 1-noise, (2500, 1, 250, 250)).astype(theano.config.floatX), (1, 3, 1, 1))
        train_x.set_value(noise_mask * ipart[0])
        train_y.set_value(ipart[1])
        i += 1
        cost += trainer.step()
        
        # horizontal flip
        noise_mask = numpy.tile(npy_rng.binomial(1, 1-noise, (2500, 1, 250, 250)).astype(theano.config.floatX), (1, 3, 1, 1))
        train_x.set_value(noise_mask * ipart[0][:, :, :, ::-1])
        print "       ",
        cost += trainer.step()
        
        """
        # vertical flip
        noise_mask = numpy.tile(npy_rng.binomial(1, 1-noise, (batchsize, 1, 250, 250)), (1, 3, 1, 1))
        train_x.set_value(noise_mask * ipart[0][:, :, ::-1, :])
        print "       ",
        cost += trainer.step()

        # 180 rotate
Exemplo n.º 2
0
for i in range(len(model.models_stack)-1):
    print "\n\nPre-training layer %d:" % i
    trainer = GraddescentMinibatch(
        varin=model.varin, data=train_set_x,
        cost=model.models_stack[i].cost(),
        params=model.models_stack[i].params_private,
        supervised=False,
        batchsize=1, learningrate=0.001, momentum=0., rng=npy_rng
    )

    layer_analyse = model.models_stack[i].encoder()
    layer_analyse.draw_weight(patch_shape=(28, 28, 1), npatch=100)
    layer_analyse.hist_weight()
    
    for epoch in xrange(15):
        trainer.step()
        layer_analyse.draw_weight(patch_shape=(28, 28, 1), npatch=100)
        layer_analyse.hist_weight()

save_params(model=model, filename="mnist_sae_784_784_784_10.npy")


#############
# FINE-TUNE #
#############

print "\n\nBegin fine-tune: normal backprop"
bp_trainer = GraddescentMinibatch(
    varin=model.varin, data=train_set_x, 
    truth=model.models_stack[-1].vartruth, truth_data=train_set_y,
    supervised=True, cost=model.models_stack[-1].cost(),
Exemplo n.º 3
0
    gd = GraddescentMinibatch(model=model, data=data_s,
                              batchsize=MINIBATCH_SIZE,
                              learningrate=learningrate,
                              momentum=momentum,
                              normalizefilters=normalizefilters,
                              rng=rng,
                              verbose=verbose)

    ##,---------
    ##| Training
    ##`---------

    #for epoch in np.arange(epochs):
    #    gd.step()
    while (model.cost(data_s.get_value()) > eps) and gd.epochcount < epochs:
        gd.step()

    ##################################
    ## Stage 4: Visualizing results ##
    ##################################

    ##,-----------------------
    ##| Getting reconstruction
    ##`-----------------------
    scaled_input = np.float32(sampled_input)
    #scaled_input = np.float32(sampled_input)
    F_rec = model.reconstruct(scaled_input) - scaled_input

    ##,---------------------------
    ##| Visualizing reconstruction
    ##`---------------------------
Exemplo n.º 4
0
    trainer = GraddescentMinibatch(varin=model.varin,
                                   data=train_set_x,
                                   cost=model.models_stack[i].cost(),
                                   params=model.models_stack[i].params_private,
                                   supervised=False,
                                   batchsize=1,
                                   learningrate=0.001,
                                   momentum=0.,
                                   rng=npy_rng)

    layer_analyse = model.models_stack[i].encoder()
    layer_analyse.draw_weight(patch_shape=(28, 28, 1), npatch=100)
    layer_analyse.hist_weight()

    for epoch in xrange(15):
        trainer.step()
        layer_analyse.draw_weight(patch_shape=(28, 28, 1), npatch=100)
        layer_analyse.hist_weight()

save_params(model=model, filename="mnist_sae_784_784_784_10.npy")

#############
# FINE-TUNE #
#############

print "\n\nBegin fine-tune: normal backprop"
bp_trainer = GraddescentMinibatch(varin=model.varin,
                                  data=train_set_x,
                                  truth=model.models_stack[-1].vartruth,
                                  truth_data=train_set_y,
                                  supervised=True,