Exemplo n.º 1
0
train_y = map(lambda x: map_y_48(x), train_y)
valid_y, test_y = map_y_48(valid_y), map_y_48(test_y)

train_x, train_y  = timit.make_shared_partitions(train_x, train_y)
valid_x, valid_y = timit.shared_dataset((valid_x, valid_y))
test_x, test_y = timit.shared_dataset((test_x, test_y))

train_set_x = train_x_unsup
print train_x_all.get_value().shape[0]
print train_set_x.get_value().shape[0]

# nn_ae = DNN(numpy_rng, [5096, 5096], 429, 144)
# nn_ae = DNN(numpy_rng, [6000, 6000], 429, 39)
nn_ae = DNN(numpy_rng, [2000], 429, 48)

ae1 = SdA(train_x_unsup, numpy_rng, theano_rng, [2000], nn_ae, mode='contractive', activations_layers=['tanh', 'tanh', 'tanh'])

pretrain_fns = ae1.pretraining_functions(train_x_unsup, BATCH_SIZE)
num_samples_part = train_x_unsup.get_value(borrow=True).shape[1]
num_samples = train_x_unsup.get_value(borrow=True).shape[1]

num_batches = num_samples / BATCH_SIZE
indices = np.arange(num_samples, dtype=np.dtype('int32'))


# layer-wise pretraining
for i in xrange(len(ae1.da_layers)):
	for epoch in xrange(NUM_EPOCHS):
		c = []
		for j in xrange(num_batches):
			index = indices[j*BATCH_SIZE:(j+1)*BATCH_SIZE] 
Exemplo n.º 2
0
print mnist
# train_set_x, train_set_y = mnist_data[0]
valid_set_x, valid_set_y = mnist_data[1]
test_set_x, test_set_y = mnist_data[2]

train_set_x, train_set_y = mnist_full[0]

numpy_rng = np.random.RandomState(1111)

theano_rng = RandomStreams(numpy_rng.randint( 2**30 ))

# nn_ae = DNN(numpy_rng, [1024, 1024], 429, 144)
# configuration for mnist

nn_ae = DNN(numpy_rng, [1000, 1000], 784, 10)
ae1 = SdA(train_set_x, numpy_rng, theano_rng, [500, 500], nn_ae, mode='contractive', activations_layers=['tanh', 'tanh', 'tanh'])

pretrain_fns = ae1.pretraining_functions(train_set_x, BATCH_SIZE)

num_samples = train_set_x.get_value(borrow=True).shape[1]
num_batches = num_samples / BATCH_SIZE
indices = np.arange(num_samples, dtype=np.dtype('int32'))


# layer-wise pretraining
for i in xrange(len(ae1.da_layers)):
	for epoch in xrange(NUM_EPOCHS):
		c = []
		for i in xrange(num_batches):
			index = indices[i*BATCH_SIZE:(i+1)*BATCH_SIZE] 
			c.append(pretrain_fns[i](index=index))