Exemplo n.º 1
0
    z_v, decode_out_v, kl_v, error_v, loss_v, _ = sess_out[0:6]
    z_mu_v, z_sigma_v = sess_out[6:8]
    
    output_data.append(decode_out_v)
    input_data.append(data)
    perf.append((
        loss_v,
        np.mean(kl_v),
        np.mean(error_v),
        np.mean(z_mu_v),
        np.mean(z_sigma_v)
    ))

    shs(z_mu_v, file=pj(tmp_dir, "z_{}.png".format(e)), labels=[str(c) for c in classes])
    # shm(sigmoid(input_data[0][:,0,:]), sigmoid(output_data[0][:,0,:]), file=pj(tmp_dir, "rec_{}.png".format(e)))
    shl(input_data[0][:,(0,50,100),0], output_data[0][:,(0,50,100),0], file=pj(tmp_dir, "rec_{}.png".format(e)), labels=["input","output"])

    loss_v = np.mean(map(lambda x: x[0], perf))
    kl_v = np.mean(map(lambda x: x[1], perf))
    error_v = np.mean(map(lambda x: x[2], perf))
    mu_diff_v = np.mean(map(lambda x: x[3], perf))
    sigma_diff_v = np.mean(map(lambda x: x[4], perf))

    # for g, gr, tv in zip(grads_v, grads_raw_v, tvars):
    #     vname = tv.name.replace("/", "_").replace(":","_")

    #     f = pj(tmp_grad_dir, "{}_{}.png".format(vname, e))
    #     fr = pj(tmp_grad_dir, "{}_raw_{}.png".format(vname, e))

    #     work_dims = [s for s in g.shape if s > 1]
    #     if len(work_dims) == 1:
Exemplo n.º 2
0
tvars = tf.trainable_variables()
grads_raw = tf.gradients(loss, tvars)
grads, _ = tf.clip_by_global_norm(grads_raw, 5.0)

apply_grads = optimizer.apply_gradients(zip(grads, tvars))



sess = tf.Session()
sess.run(tf.global_variables_initializer())


input_v = np.random.randn(batch_size, dim_size)

target_v = np.sin(np.linspace(0, 20, num=seq_size)).reshape((seq_size, 1, 1))


for e in xrange(epochs):
	outputs_v, finstate_v, loss_v, _ = sess.run(
		[outputs, finstate, loss, apply_grads], 
		{
			input: input_v, 
			state: np.zeros((batch_size, net_size)),
			outputs_target: target_v,
		}
	)

	shl(outputs_v, target_v, file=pj("{}/{}_out.png".format(tmp_dir, e)))
	print "Epoch {}, loss {}".format(e, loss_v)
Exemplo n.º 3
0
        # u += epsilon * (- u + np.dot(r, F) - 0.1*Sd(a))/tau

        u += epsilon * (-u + np.dot(x, F) - np.dot(a, Fc)) / tau

        # a_entry = gamma * (u - lam)
        # a = (u - alpha * lam) * act(np.clip(a_entry, -100.0, 100.0))

        a = relu(u - lam)  # or just simple

        dF += (1.0 / Tmax) * oja_rule(x, a, F)

        # dF += (1.0/Tmax) * hebb_rule(x, a, F)

        a_acc = np.concatenate([np.expand_dims(a, 0), a_acc[:-1]])

        u_vec[ti] = u.copy()
        a_vec[ti] = a.copy()
        x_hat_vec[(xi - filter_size):xi] += np.dot(F, a.T) / tau
        fb_vec[ti] = np.dot(a, Fc)

    # shm(dF)
    F += 0.1 * dF
    # F = norm(F)
    print "Epoch {}, MSE {}".format(
        e, np.mean(np.square(x_hat_vec[50:-50] - x_vec_pad[50:-100])))

# x_hat_vec = np.pad(x_hat_vec, (filter_size, 0), 'constant')

shl(x_hat_vec, x_vec_pad, show=False)
shm(a_vec)
Exemplo n.º 4
0
u_vec = np.zeros((Tsize, layer_size))
a_vec = np.zeros((Tsize, layer_size))
u_hat_vec = np.zeros((Tsize, layer_size))
u_hat_vec2 = np.zeros((Tsize + filter_size, ))
a_acc = np.zeros((filter_size, layer_size))

Fc = np.dot(F.T, F) - np.eye(layer_size)
for ti, t in enumerate(T):
    xi = ti + filter_size

    x = x_vec_pad[(xi - filter_size):xi]

    b = np.dot(x, F)

    u += epsilon * (b - u - np.dot(Fc, a)) / tau

    # a_entry = gamma * (u - lam)
    # a = (u - alpha * lam) * act(np.clip(a_entry, -100.0, 100.0))

    a = relu(u - lam)  # or just simple

    a_acc = np.concatenate([np.expand_dims(a, 0), a_acc[:-1]])

    u_vec[ti] = u
    a_vec[ti] = a
    u_hat_vec[ti] = np.sum(batch_inner(F.T, a_acc.T)) / tau
    u_hat_vec2[(xi - filter_size):xi] += np.dot(F, a.T) / tau

shl(u_hat_vec2, x_vec_pad, show=False)
shm(a_vec)
Exemplo n.º 5
0
            print "Epoch {}, {}".format(e, rule.__name__)

    ww = np.asarray(ww)
    a0mm = np.asarray(a0mm)

    vv = np.asarray(vv)

    wd.append(("{}{}".format(
        rule.__name__,
        "_{}".format(threshold) if not threshold is None else ""), ww.copy()))
    vd.append(("recc_{}{}".format(
        rule.__name__,
        "_{}".format(threshold) if not threshold is None else ""), vv.copy()))

for wname, ww in wd:
    shl(*[ww[:, i, :] for i in xrange(ww.shape[1])], title=wname, show=False)

for wname, vv in vd:
    shl(*[vv[:, i, :] for i in xrange(vv.shape[1])], title=wname, show=False)

plt.show()

ev, em = np.linalg.eig(np.cov(x_v.T))
em = em[:, list(reversed(np.argsort(ev)))]

cols = np.asarray(len(y_v) * ["#FF0000"])
cols[np.where(y_v == 1)] = "#0000FF"

pd.scatter_matrix(pd.DataFrame(a0), c=cols)
plt.show()
Exemplo n.º 6
0
	nc2.p.W += lrate * dW2

	# y_hat = np.abs(np.ceil(y_hat-0.5))

	info.append([
		mi(h0, x), 
		mi(h1, x), 
		mi(h0, y),
		mi(h1, y), 
		#mi(np.abs(np.ceil(y_hat-0.5)), y), 
		# mi(y_hat, y),
	])
	grad_stat.append([
		np.mean(dW0), 
		np.mean(dW1),
		np.mean(dW2),
		np.var(dW0),
		np.var(dW1),
		np.var(dW2),
	])
	print "Epoch {}, error {}".format(e, error)



i = np.array(info)
shl(i[:,0], i[:,1], i[:,2], i[:,3], labels=["xh0", "xh1", "h0y", "h1y"], file=pj(tmp_dir, "info.png"))

gs = np.array(grad_stat)
shl(gs[:,0], gs[:,1], gs[:,2], labels=["m0", "m1", "m2"], file=pj(tmp_dir, "grad_mean.png"))
shl(gs[:,3], gs[:,4], gs[:,5], labels=["v0", "v1", "v2"], file=pj(tmp_dir, "grad_var.png"))
Exemplo n.º 7
0
        # mi(y_hat, y),
    ])
    grad_stat.append([
        np.mean(dW0),
        np.mean(dW1),
        np.mean(dW2),
        np.var(dW0),
        np.var(dW1),
        np.var(dW2),
    ])
    print "Epoch {}, error {}".format(e, error)

i = np.array(info)
shl(i[:, 0],
    i[:, 1],
    i[:, 2],
    i[:, 3],
    labels=["xh0", "xh1", "h0y", "h1y"],
    file=pj(tmp_dir, "info.png"))

gs = np.array(grad_stat)
shl(gs[:, 0],
    gs[:, 1],
    gs[:, 2],
    labels=["m0", "m1", "m2"],
    file=pj(tmp_dir, "grad_mean.png"))
shl(gs[:, 3],
    gs[:, 4],
    gs[:, 5],
    labels=["v0", "v1", "v2"],
    file=pj(tmp_dir, "grad_var.png"))
Exemplo n.º 8
0
    ww.append(W_vals)
    wr.append(Wr_vals)

    osize = len(cell._cells[0].output_size)

    shm(*[spikes_v[li * osize] for li in xrange(layers_num)],
        file=env.run("spikes_{}.png".format(e)))

    shm(*W_vals, file=env.run("W_{}.png".format(e)))
    shm(*Wr_vals, file=env.run("Wr_{}.png".format(e)))
    print "Epoch {}".format(e)

ww = np.concatenate(ww)
wr = np.concatenate(wr)
for syn_i in xrange(input_size):
    shl(ww[:, syn_i, :], file=env.run("w_hist", "syn_{}.png".format(syn_i)))

for ni in xrange(net_size):
    shl(wr[:, ni, :], file=env.run("wr_hist", "n_{}.png".format(ni)))

# pre_stdp = np.squeeze(spikes_v[3])
# pre_spikes = np.squeeze(inputs_v)
# post_stdp = np.squeeze(spikes_v[4])
# post_spikes = np.squeeze(spikes_v[0])

# dw = np.zeros(W.shape)
# for ti in xrange(seq_size):
#     # for ni in xrange(net_size):
#     #     for syn_i in xrange(input_size):
#     #         dw[syn_i, ni] += pre_stdp[ti, syn_i] * post_spikes[ti, ni] - post_stdp[ti, ni] * pre_spikes[ti, syn_i]
Exemplo n.º 9
0
    wr.append(Wr_vals)


    osize = len(cell._cells[0].output_size)
    
    shm(*[spikes_v[li*osize] for li in xrange(layers_num)], file=env.run("spikes_{}.png".format(e)))
    

    shm(*W_vals, file=env.run("W_{}.png".format(e)))
    shm(*Wr_vals, file=env.run("Wr_{}.png".format(e)))
    print "Epoch {}".format(e)

ww = np.concatenate(ww)
wr = np.concatenate(wr)
for syn_i in  xrange(input_size):
    shl(ww[:,syn_i,:], file=env.run("w_hist", "syn_{}.png".format(syn_i)))

for ni in  xrange(net_size):
    shl(wr[:,ni,:], file=env.run("wr_hist", "n_{}.png".format(ni)))


# pre_stdp = np.squeeze(spikes_v[3])
# pre_spikes = np.squeeze(inputs_v)
# post_stdp = np.squeeze(spikes_v[4])
# post_spikes = np.squeeze(spikes_v[0])

# dw = np.zeros(W.shape)
# for ti in xrange(seq_size):
#     # for ni in xrange(net_size):
#     #     for syn_i in xrange(input_size):
#     #         dw[syn_i, ni] += pre_stdp[ti, syn_i] * post_spikes[ti, ni] - post_stdp[ti, ni] * pre_spikes[ti, syn_i]
Exemplo n.º 10
0
			vv.append(v.copy())
			a0mm.append(a0m.copy())
	
		if e % 25 == 0:
			print "Epoch {}, {}".format(e, rule.__name__)

	ww = np.asarray(ww)
	a0mm = np.asarray(a0mm)

	vv = np.asarray(vv)

	wd.append(("{}{}".format(rule.__name__, "_{}".format(threshold) if not threshold is None else ""), ww.copy()))
	vd.append(("recc_{}{}".format(rule.__name__, "_{}".format(threshold) if not threshold is None else ""), vv.copy()))


for wname, ww in wd:
	shl(*[ww[:,i,:] for i in xrange(ww.shape[1])], title=wname, show=False)

for wname, vv in vd:
	shl(*[vv[:,i,:] for i in xrange(vv.shape[1])], title=wname, show=False)

plt.show()

ev, em = np.linalg.eig(np.cov(x_v.T))
em = em[:, list(reversed(np.argsort(ev)))]

cols = np.asarray(len(y_v)*["#FF0000"])
cols[np.where(y_v == 1)] = "#0000FF"

pd.scatter_matrix(pd.DataFrame(a0), c=cols); plt.show()
Exemplo n.º 11
0
    return step(t) * (np.exp(-t / tau_l) - np.exp(-t / tau_s)) * k1


g = 0
h = 0
i = 0
h0 = 1.0
dt = 1.0

t = np.linspace(0, 100, 100)

stat = np.zeros((
    2,
    100,
))
for ti, _ in enumerate(t):
    if ti == 0:
        i = 1.0
    else:
        i = 0.0

    g += -dt * g / tau_l + 1.5 * k1 * h
    h += -dt * h / tau_s + k1 * i

    stat[0, ti] = g
    stat[1, ti] = h

ans = epsp_kernel(t, tau_s, tau_l)

shl(ans, stat[0, :])
Exemplo n.º 12
0
u_vec = np.zeros((Tsize, layer_size))
a_vec = np.zeros((Tsize, layer_size))
u_hat_vec = np.zeros((Tsize, layer_size))
u_hat_vec2 = np.zeros((Tsize+filter_size,))
a_acc = np.zeros((filter_size, layer_size))

Fc = np.dot(F.T, F) - np.eye(layer_size)
for ti, t in enumerate(T):
    xi = ti + filter_size

    x = x_vec_pad[(xi-filter_size):xi]

    b = np.dot(x, F)

    u += epsilon * (b - u - np.dot(Fc, a))/tau
    
    # a_entry = gamma * (u - lam)
    # a = (u - alpha * lam) * act(np.clip(a_entry, -100.0, 100.0))
    
    a = relu(u - lam)   # or just simple
    
    a_acc = np.concatenate([np.expand_dims(a, 0), a_acc[:-1]])

    u_vec[ti] = u
    a_vec[ti] = a
    u_hat_vec[ti] = np.sum(batch_inner(F.T, a_acc.T))/tau
    u_hat_vec2[(xi-filter_size):xi] += np.dot(F, a.T)/tau

shl(u_hat_vec2, x_vec_pad, show=False)
shm(a_vec)