keep_prob = mod.ConstantPlaceholderModule("keep_prob", shape=(), dtype=tf.float32) activations = [tf.nn.relu, tf.nn.relu, tf.nn.relu, tf.identity] filter_shapes = [[8, 8, 1, 6], [8, 8, 6, 16]] bias_shapes = [[1, 28, 28, 6], [1, 14, 14, 16], [1, 120], [1, 10]] ksizes = [[1, 4, 4, 1], [1, 4, 4, 1]] pool_strides = [[1, 2, 2, 1], [1, 2, 2, 1]] network = recurrentLenet5("rlenet5", activations, filter_shapes, bias_shapes, ksizes, pool_strides, keep_prob.placeholder) one_time_error = mod.ErrorModule("cross_entropy", cross_entropy) error = mod.TimeAddModule("add_error") accuracy = mod.BatchAccuracyModule("accuracy") optimizer = mod.OptimizerModule("adam", tf.train.AdamOptimizer()) network.add_input(inp) one_time_error.add_input(network) one_time_error.add_input(labels) error.add_input(one_time_error, 0) error.add_input(error, -1) accuracy.add_input(network) accuracy.add_input(labels) optimizer.add_input(error) optimizer.create_output(TIME_DEPTH) accuracy.create_output(TIME_DEPTH) def train_batch(sess, i): batch = train_mnist[i * BATCH_SIZE:(i + 1) * BATCH_SIZE]
# parse data from files using get_digits dataset = get_mnist.MNIST(n_occ=FLAGS.n_occluders) # initialize classes with parameters # ----- network = networks.ReCoNNet("ReCoNNet", is_training.placeholder, activations, conv_filter_shapes, bias_shapes, ksizes, pool_strides, topdown_filter_shapes, topdown_output_shapes, keep_prob.placeholder, FLAGS) one_time_error = m.ErrorModule("cross_entropy", CROSSENTROPY_FN) error = m.TimeAddModule("add_error") optimizer = m.OptimizerModule("adam", tf.train.AdamOptimizer(lrate)) accuracy = m.BatchAccuracyModule("accuracy") network.add_input(inp) one_time_error.add_input(network) one_time_error.add_input(labels) error.add_input(one_time_error, 0) error.add_input(error, -1) optimizer.add_input(error) accuracy.add_input(network) accuracy.add_input(labels) error.create_output(TIME_DEPTH + TIME_DEPTH_BEYOND) optimizer.create_output(TIME_DEPTH) for time in range(0, (TIME_DEPTH + TIME_DEPTH_BEYOND + 1)):