Exemplo n.º 1
0
trainer = GraddescentMinibatch(
    varin=model.varin, data=train_x, 
    truth=model.models_stack[-1].vartruth, truth_data=train_y,
    supervised=True,
    cost=model.models_stack[-1].cost(), 
    params=model.params,
    batchsize=batchsize, learningrate=finetune_lr, momentum=momentum,
    rng=npy_rng
)

init_lr = trainer.learningrate
prev_cost = numpy.inf
for epoch in xrange(finetune_epc):
    cost = trainer.epoch()
    if prev_cost <= cost:
        if trainer.learningrate < (init_lr * 1e-7):
            break
        trainer.set_learningrate(trainer.learningrate*0.8)
    prev_cost = cost
    if epoch % 10 == 0:
        print "*** error rate: train: %f, test: %f" % (train_error(), test_error())
    try:
        if epoch % 100 == 0:
            save_params(model, 'svhn_CONV_flt2*5_pool2*5_nflt128*5_64*3-10.npy')
    except:
        pass
print "***FINAL error rate: train: %f, test: %f" % (train_error(), test_error())
print "Done."

pdb.set_trace()
    #apply_mask[2]()

    epc_cost += cost
    if step % (50000 / batchsize) == 0 and step > 0:
        # set stop rule
        ind = (step / (50000 / batchsize)) % avg
        hist_avg[ind] = crnt_avg[ind]
        crnt_avg[ind] = epc_cost
        if sum(hist_avg) < sum(crnt_avg):
            break
    
        # adjust learning rate
        if prev_cost <= epc_cost:
            patience += 1
        if patience > 10:
            trainer.set_learningrate(0.9 * trainer.learningrate)
            patience = 0
        prev_cost = epc_cost

        # evaluate
        print "***error rate: train: %f, test: %f" % (
            train_error(), test_error())
        
        epc_cost = 0.
print "Done."
print "***FINAL error rate, train: %f, test: %f" % (
    train_error(), test_error()
)
save_params(model, __file__.split('.')[0] + '_params.npy')

pdb.set_trace()
                                   params=layer_dropout.params_private,
                                   supervised=False,
                                   batchsize=batchsize,
                                   learningrate=pretrain_lr,
                                   momentum=momentum,
                                   rng=npy_rng)

    prev_cost = numpy.inf
    patience = 0
    for epoch in xrange(pretrain_epc):
        cost = trainer.epoch()
        if prev_cost <= cost:
            patience += 1
            if patience > 10:
                patience = 0
                trainer.set_learningrate(0.9 * trainer.learningrate)
            if trainer.learningrate < 1e-10:
                break
        prev_cost = cost
    save_params(
        model,
        'ZLIN_4000_1000_4000_1000_4000_1000_4000_normhid_nolinb_cae1_dropout.npy'
    )
print "Done."

#########################
# BUILD FINE-TUNE MODEL #
#########################

print "\n\n... building fine-tune model -- contraction 1"
for imodel in model.models_stack: