Esempio n. 1
0
# motivation is to avoid having a square weight matrix between hidden
# layers to avoid matrix transpose errors.
num_vis = inputs.shape[1]
num_hid1 = 529  # 23^2
num_hid2 = 484  # 22^2
num_top = 1936  # 44^2

batches = data.BatchIterator(inputs)
initial_params = rbm.initial_params(num_hid1, num_vis)
params = sgd(rbm_obj, initial_params, batches, momentum)

inputs = logistic(inputs.dot(params.W.T) + params.h_bias)
batches = data.BatchIterator(inputs)
initial_params = rbm.initial_params(num_hid2, num_hid1)
params = sgd(rbm_obj, initial_params, batches, momentum)

inputs = logistic(inputs.dot(params.W.T) + params.h_bias)
batches = data.BatchIterator(np.hstack((targets, inputs)))
initial_params = rbm.initial_params(num_top, num_hid2 + mnist.NUM_CLASSES)


def post_epoch(*args):
    print 'Mean hidden activation prob. is %.2f' % pcd.q


# Optimization objective for the top-level RBM.
pcd = rbm.pcd(rbm.sample_h, sample_v_softmax, rbm.neg_free_energy_grad,
              weight_decay)

params = sgd(pcd, initial_params, batches, 0, post_epoch)
Esempio n. 2
0
    g = itertools.imap(utils.tile, g)
    utils.save_images(g, tempfile.mkdtemp(dir=OUTPUT_PATH))


# def f(params, inputs):
#     return rbm.cd(params, inputs,
#                   rbm.sample_h,
#                   rbm.sample_v,
#                   rbm.neg_free_energy_grad,
#                   weight_decay=weight_decay,
#                   k=k)

output_dir = utils.make_output_directory(OUTPUT_PATH)
save_params = parameters.save_hook(output_dir)

f = rbm.pcd(rbm.sample_h, rbm.sample_v, rbm.neg_free_energy_grad, weight_decay)


def post_epoch(*args):
    #save_params(*args)
    #print 'Mean hidden activation prob. is %f.' % f.q
    pass


params = optimize.sgd(f,
                      initial_params,
                      batches,
                      epochs,
                      learning_rate,
                      momentum,
                      weight_constraint=weight_constraint,
Esempio n. 3
0
    g = itertools.islice(g, count)
    g = itertools.imap(operator.itemgetter(1), g)
    g = itertools.imap(utils.tile, g)
    utils.save_images(g, tempfile.mkdtemp(dir=OUTPUT_PATH))

# def f(params, inputs):
#     return rbm.cd(params, inputs,
#                   rbm.sample_h,
#                   rbm.sample_v,
#                   rbm.neg_free_energy_grad,
#                   weight_decay=weight_decay,
#                   k=k)


output_dir = utils.make_output_directory(OUTPUT_PATH)
save_params = parameters.save_hook(output_dir)

f = rbm.pcd(rbm.sample_h, rbm.sample_v,
            rbm.neg_free_energy_grad, weight_decay)

def post_epoch(*args):
    #save_params(*args)
    #print 'Mean hidden activation prob. is %f.' % f.q
    pass
   
params = optimize.sgd(f, initial_params, batches, epochs,
                      learning_rate,
                      momentum,
                      weight_constraint=weight_constraint,
                      post_epoch=post_epoch)
Esempio n. 4
0
# These layers differ slightly from those in the paper. My main
# motivation is to avoid having a square weight matrix between hidden
# layers to avoid matrix transpose errors.
num_vis = inputs.shape[1]
num_hid1 = 529 # 23^2 
num_hid2 = 484 # 22^2
num_top = 1936 # 44^2

batches = data.BatchIterator(inputs)
initial_params = rbm.initial_params(num_hid1, num_vis)
params = sgd(rbm_obj, initial_params, batches, momentum)

inputs = logistic(inputs.dot(params.W.T) + params.h_bias)
batches = data.BatchIterator(inputs)
initial_params = rbm.initial_params(num_hid2, num_hid1)
params = sgd(rbm_obj, initial_params, batches, momentum)

inputs = logistic(inputs.dot(params.W.T) + params.h_bias)
batches = data.BatchIterator(np.hstack((targets, inputs)))
initial_params = rbm.initial_params(num_top, num_hid2 + mnist.NUM_CLASSES)

def post_epoch(*args):
    print 'Mean hidden activation prob. is %.2f' % pcd.q

# Optimization objective for the top-level RBM.
pcd = rbm.pcd(rbm.sample_h, sample_v_softmax,
              rbm.neg_free_energy_grad, weight_decay)

params = sgd(pcd, initial_params, batches, 0, post_epoch)