示例#1
0
# numpy random generator
#  start-snippet-3

numpy_rng = random.RandomState(89677)
print_flush("... building the model")
# construct the stacked denoising autoencoder class

sample_dimension = training_set.get_value(borrow=True).shape[1]
label_dimension = training_labels.get_value(borrow=True).shape[1]
print_flush("... sample dimension %d" % sample_dimension)
print_flush("... label dimension %d" % label_dimension)


stacked_autoencoder = StackedAutoencoder(numpy_rng=numpy_rng, n_ins=sample_dimension, n_outs=label_dimension,
                                         hidden_layer_sizes=hidden_layer_sizes, tied_weights=tied_weights,
                                         sigmoid_compressions=sigmoid_compressions,
                                         sigmoid_reconstructions=sigmoid_reconstructions,
                                         supervised_sigmoid_activation=supervised_sigmoid_activation)


print_flush("... getting the pre-training functions")
pretraining_fns = stacked_autoencoder.pretraining_functions(training_set=training_set, batch_size=batch_size)


if ENABLE_FINE_TUNING:
    print_flush("... getting the fine-tune function")
    if fine_tune_supervised:
        finetune_fn, validate_model = stacked_autoencoder.finetune_functions(training_set=training_set,
                                                                             training_labels=training_labels,
                                                                             test_set=test_set, test_labels=test_labels,
                                                                             batch_size=batch_size,
示例#2
0
# model
model = chainer.FunctionSet(
        x_to_h = F.Linear(16, n_units * 4),
        h_to_h = F.Linear(n_units, n_units * 4),
        h_to_y = F.Linear(n_units, 12))
if args.gpu >= 0:
    print('using GPU #%s' % args.gpu)
    cuda.check_cuda_available()
    cuda.get_device(args.gpu).use()
    model.to_gpu()

# optimizer
optimizer = optimizers.SGD(lr=1.)
optimizer.setup(model.collect_parameters())

sae = StackedAutoencoder(args.gpu)

# one-step forward propagation
def forward_one_step(x, t, state, train=True):
    # if args.gpu >= 0:
    #     data = cuda.to_gpu(data)
    #     targets = cuda.to_gpu(targets)
    x = chainer.Variable(x, volatile=not train)
    t = chainer.Variable(t, volatile=not train)
    h_in = model.x_to_h(x) + model.h_to_h(state['h'])
    c, h = F.lstm(state['c'], h_in)
    y = model.h_to_y(h)
    state = {'c': c, 'h': h}

    sigmoid_y = 1 / (1 + mod.exp(-y.data))
    squared_sum_error = ((t.data - sigmoid_y) ** 2).sum()
示例#3
0
import os
from numpy import random, asarray
from stacked_autoencoder import StackedAutoencoder
import theano

user_path = os.path.expanduser("~")
autoencoder_weights_path = user_path + "/Documents/dnn/autoencoder/conf_120_optical_flow_linear/"

numpy_rng = random.RandomState(89677)

stacked_autoencoder = StackedAutoencoder.from_config_path(numpy_rng, autoencoder_weights_path)

samples = asarray(random.rand(5000, stacked_autoencoder.input_dimension()), dtype=theano.config.floatX)
predictions = stacked_autoencoder.predict(samples)

result = ""
for x, y in zip(samples.tolist(), predictions.tolist()):
    str_sample = ", ".join(str(val) for val in x)
    str_y = ", ".join(str(val) for val in y)
    result += str_sample + "\n" + str_y + "\n"

with open(user_path + "/Documents/dnn/results/implementation.txt", 'w+') as output_file:
    output_file.write(result)