tuple((
        tf.placeholder(tf.float32, [batch_size, cell.layer_size], name="u"),
        tf.placeholder(tf.float32, [batch_size, cell.layer_size], name="a"),
        tf.placeholder(tf.float32, [batch_size, cell.layer_size], name="a_m"),
        tf.placeholder(
            tf.float32,
            [batch_size, cell.filter_len * cell.input_size, cell.layer_size],
            name="dF"),
    )) for cell in net._cells)

get_zero_state = lambda: tuple(
    np.zeros((batch_size, ) + tuple(t.get_shape().as_list()[1:]))
    for tup in state for t in tup)

(u_ta, a_ta, a_m_ta, x_hat_flat_ta), finstate, _ = tf.nn.raw_rnn(
    net, rnn_with_hist_loop_fn(input, sequence_length, state, filter_len))

u, a, a_m, x_hat_flat = u_ta.stack(), a_ta.stack(), a_m_ta.stack(
), x_hat_flat_ta.stack()

x_hat = tf.reshape(x_hat_flat, (seq_size, batch_size, filter_len, input_size))

grads_and_vars = []
for li, s in enumerate(finstate):
    dF = s[-1]
    grads_and_vars += [
        (-tf.reduce_mean(dF, 0), net._cells[li].F_flat),
    ]

sess = tf.Session()
Exemple #2
0
cell = GLMCell(filters_num, hidden_size, visible_size, filter)

net_input_tuple = RnnHistInputTuple(input_n, target)

state = GLMStateTuple(
    tf.placeholder(tf.float32, [batch_size, net_size], name="u"),
    tf.placeholder(tf.float32, [batch_size, filters_num + net_size], name="s"),
    tf.placeholder(tf.float32, [batch_size, net_size], name="r"),
    tf.placeholder(tf.float32, [batch_size, net_size], name="spikes"),
    tf.placeholder(tf.float32, [batch_size, L, net_size], name="spike_history"),
    tf.placeholder(tf.float32, [batch_size, filters_num + net_size, net_size], name="dW"),
    tf.placeholder(tf.float32, [batch_size, L, filters_num], name="dF"),
    tf.placeholder(tf.float32, [batch_size, L, 1], name="dR"),
)

net_out, finstate, _ = tf.nn.raw_rnn(cell, rnn_with_hist_loop_fn(input_n, target_n, sequence_length, state, L))

spikes_ta, a_ta, output_ta, loss_ta = net_out
spikes = spikes_ta.stack()
a = a_ta.stack()
output = output_ta.stack()
loss = loss_ta.stack()


# l2_loss = tf.nn.l2_loss(input_n - result)


## RUNNING

inputs_v =  moving_average(np.random.randn(seq_size), 10).reshape(seq_size, batch_size, 1)
inputs = tf.placeholder(tf.float32,
                        shape=(max_time, batch_size, input_depth),
                        name="Input")
targets = tf.placeholder(tf.float32,
                         shape=(max_time, batch_size, input_depth),
                         name="Targets")

cell = BasicRNNCellWitHist(num_units)
state = StateTuple(
    tf.placeholder(tf.float32, [batch_size, L, num_units],
                   name="output_history"),
    tf.placeholder(tf.float32, [batch_size, num_units], name="output"),
)

output_tuple, final_state, _ = tf.nn.raw_rnn(
    cell, rnn_with_hist_loop_fn(inputs, targets, sequence_length, state, L))
outputs_ta, outputs_inner_ta, inputs_ta, outputs_hist_ta = output_tuple

outputs = outputs_ta.stack()
outputs_inner = outputs_inner_ta.stack()

outputs_n = tf.nn.l2_normalize(outputs, 0)
inputs_n = tf.nn.l2_normalize(inputs, 0)
loss = tf.nn.l2_loss(outputs_n - inputs_n)

train_step = tf.train.AdadeltaOptimizer(0.05).minimize(loss)

input_history = inputs_ta.stack()
output_history = outputs_hist_ta.stack()

sess = tf.Session()



sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)

inputs = tf.placeholder(tf.float32, shape=(max_time, batch_size, input_depth), name="Input")
targets = tf.placeholder(tf.float32, shape=(max_time, batch_size, input_depth), name="Targets")

cell = BasicRNNCellWitHist(num_units)
state = StateTuple(
    tf.placeholder(tf.float32, [batch_size, L, num_units], name="output_history"),
    tf.placeholder(tf.float32, [batch_size, num_units], name="output"),
)

output_tuple, final_state, _ = tf.nn.raw_rnn(cell, rnn_with_hist_loop_fn(inputs, targets, sequence_length, state, L))
outputs_ta, outputs_inner_ta, inputs_ta, outputs_hist_ta = output_tuple

outputs = outputs_ta.stack()
outputs_inner = outputs_inner_ta.stack()

outputs_n = tf.nn.l2_normalize(outputs, 0)
inputs_n = tf.nn.l2_normalize(inputs, 0)
loss = tf.nn.l2_loss(outputs_n - inputs_n)

train_step = tf.train.AdadeltaOptimizer(0.05).minimize(loss)

input_history = inputs_ta.stack()
output_history = outputs_hist_ta.stack()

sess = tf.Session()
Exemple #5
0
x_vec = 1.0*np.sin(T/10.0)


c = Config()
c.lam = 1.0
c.weight_init_factor = 1.0
c.epsilon = 1.0
c.tau = 5.0


input = tf.placeholder(tf.float32, shape=(seq_size, batch_size, input_size), name="Input")
sequence_length = tf.placeholder(shape=(batch_size,), dtype=tf.int32)


cell = LCACell(layer_size, filter_size, c)

net_out, finstate, _ = tf.nn.raw_rnn(cell, rnn_with_hist_loop_fn(input_n, sequence_length, state, L))


sess = tf.Session()
saver = tf.train.Saver()

model_fname = env.run("glm_model.ckpt")
if os.path.exists(model_fname):
    print "Restoring from {}".format(model_fname)
    saver.restore(sess, model_fname)
    epochs = 0
else:
    sess.run(tf.global_variables_initializer())