Beispiel #1
0
# get image data
# -----

# parse data from files using get_digits
dataset = get_mnist.MNIST(n_occ=FLAGS.n_occluders)

# initialize classes with parameters
# -----

network = networks.ReCoNNet("ReCoNNet", is_training.placeholder, activations,
                            conv_filter_shapes, bias_shapes, ksizes,
                            pool_strides, topdown_filter_shapes,
                            topdown_output_shapes, keep_prob.placeholder,
                            FLAGS)

one_time_error = m.ErrorModule("cross_entropy", CROSSENTROPY_FN)
error = m.TimeAddModule("add_error")
optimizer = m.OptimizerModule("adam", tf.train.AdamOptimizer(lrate))
accuracy = m.BatchAccuracyModule("accuracy")

network.add_input(inp)
one_time_error.add_input(network)
one_time_error.add_input(labels)
error.add_input(one_time_error, 0)
error.add_input(error, -1)
optimizer.add_input(error)
accuracy.add_input(network)
accuracy.add_input(labels)

error.create_output(TIME_DEPTH + TIME_DEPTH_BEYOND)
optimizer.create_output(TIME_DEPTH)
inp = mod.ConstantPlaceholderModule("input", shape=(BATCH_SIZE, 28, 28, 1))
labels = mod.ConstantPlaceholderModule("input_labels", shape=(BATCH_SIZE, 10))
keep_prob = mod.ConstantPlaceholderModule("keep_prob",
                                          shape=(),
                                          dtype=tf.float32)

activations = [tf.nn.relu, tf.nn.relu, tf.nn.relu, tf.identity]
filter_shapes = [[8, 8, 1, 6], [8, 8, 6, 16]]
bias_shapes = [[1, 28, 28, 6], [1, 14, 14, 16], [1, 120], [1, 10]]
ksizes = [[1, 4, 4, 1], [1, 4, 4, 1]]
pool_strides = [[1, 2, 2, 1], [1, 2, 2, 1]]
network = recurrentLenet5("rlenet5", activations, filter_shapes, bias_shapes,
                          ksizes, pool_strides, keep_prob.placeholder)

one_time_error = mod.ErrorModule("cross_entropy", cross_entropy)
error = mod.TimeAddModule("add_error")
accuracy = mod.BatchAccuracyModule("accuracy")
optimizer = mod.OptimizerModule("adam", tf.train.AdamOptimizer())

network.add_input(inp)
one_time_error.add_input(network)
one_time_error.add_input(labels)
error.add_input(one_time_error, 0)
error.add_input(error, -1)
accuracy.add_input(network)
accuracy.add_input(labels)
optimizer.add_input(error)
optimizer.create_output(TIME_DEPTH)
accuracy.create_output(TIME_DEPTH)
testX = reformdata_test[:-WINDOW-delay]
testY = reformdata_test[WINDOW+delay:, 0:s_test.shape[1]]

CELL_SIZE = 300
TIME_DEPTH = 5
BATCH_SIZE = 1
NFFT = 128
in_size = (NFFT + 1) * WINDOW
out_size = NFFT + 1

inp = mod.ConstantPlaceholderModule("input", shape=(BATCH_SIZE, in_size))
target = mod.ConstantPlaceholderModule("target", shape=(BATCH_SIZE, out_size))
cell = lstm.LSTM_Cell("lstm_cell", in_size, CELL_SIZE)

out_prediction = mod.FullyConnectedLayerModule("out_prediction", tf.identity, CELL_SIZE, out_size)
err = mod.ErrorModule("mse", mean_squared_error)
opt = mod.OptimizerModule("adam", tf.train.AdamOptimizer())

#  Connect input
cell.add_input(inp)
out_prediction.add_input(cell)
err.add_input(target)
err.add_input(out_prediction)
opt.add_input(err)
opt.create_output(TIME_DEPTH)
out_prediction.create_output(1)

myplot = Plot()

train_length = trainX.shape[0] #2000#