Пример #1
0
def simulate(seed, na=2, rate=100, predict=False):
    net = NetPopPredict() if predict else NetPop(na)
    W = np.copy(net.W)
    W[:-1, :-1] *= gamma
    W -= (1 - gamma) * net.competition
    u0 = np.linspace(-2, .04, len(W))
    np.random.seed(seed)
    s, u = runpopU(W * 1. / popsize, u0, step, popsize, rate)
    pre = np.zeros((int(250 / step), len(W)))
    for t in [-40]:
        pre[int(t / step), range(-2, -2 - na, -1)] = 1
    for t in [-240, -200, -160, -120, -80, -10, -2, -1]:
        pre[int(t / step), range(-2 - na, -2 - 2 * na, -1)] = .1
    pres = np.vstack((pre, s))
    tmp = np.array([
        np.mean(np.array([
            smooth_spikes(pres[:, j * popsize + i], 40, .2)
            for i in range(popsize)
        ]),
                axis=0) for j in range(len(W) - na,
                                       len(W) - 8 * na - 1, -na)
    ])[:, 250 / step:750 / step]
    tmp2 = np.array([
        np.mean(np.array([
            smooth_spikes(pres[:, j * popsize + i], 40, .2)
            for i in range(popsize)
        ]),
                axis=0)
        for j in range(len(W) - na - 1,
                       len(W) - 8 * na - 2, -na)
    ])[:, 250 / step:750 / step]
    return [np.array(map(np.argmax, tmp)) * step, tmp, tmp2, s]
Пример #2
0
def sim(u0, rate, reset, tm, th, rb, r0, r1, r2, rp, tmean, tsigma, ir, runs):
    step = .2
    net = NetPop(1)
    np.random.seed(0)
    tmp = u0 * np.ones(len(net.W))
    tmp[-1] = .5
    s = np.array([
        run(net.W,
            u0 * np.ones(len(net.W)),
            step,
            1,
            rate=rate,
            T=1700,
            tm=tm,
            reset=reset,
            seed=i,
            th=th,
            rbase=rb,
            r0=r0,
            r1=r1,
            rp=rp,
            tmean=tmean,
            tsigma=tsigma) for i in range(runs)
    ])
    s3 = np.array([
        run(net.W,
            u0 * np.ones(len(net.W)),
            step,
            1,
            rate=rate,
            T=1700,
            tm=tm,
            reset=reset,
            seed=i,
            th=th,
            rbase=rb,
            r0=r0,
            r1=r2,
            rp=rp,
            tmean=tmean,
            tsigma=tsigma) for i in range(runs)
    ])
    pre = np.ones((int(250 / step), len(net.W))) * step * ir * 100 / 1000.
    pre[:, -1] = 0
    return np.array([
        smooth_spikes(np.vstack((pre, np.mean(s3, axis=0)))[:, 1], 40, step),
        smooth_spikes(np.vstack((pre, np.mean(s3, axis=0)))[:, 0], 40, step),
        smooth_spikes(np.vstack((pre, np.mean(s, axis=0)))[:, 1], 40, step),
        smooth_spikes(np.vstack((pre, np.mean(s, axis=0)))[:, 0], 40, step),
        smooth_spikes(np.vstack((pre, np.mean(s, axis=0)))[:, 2], 40, step)
    ])
def simulate(seed, na=2, rate=100, predict=False):
    net = NetPopPredict() if predict else NetPop(na)
    W = np.copy(net.W)
    W[:-1, :-1] *= gamma
    W -= (1 - gamma) * net.competition
    u0 = np.linspace(-2, .04, len(W))
    np.random.seed(seed)
    s, u = runpopU(W * 1. / popsize, u0, step, popsize, rate)
    pre = np.zeros((int(250 / step), len(W)))
    for t in [-40]:
        pre[int(t / step), range(-2, -2 - na, -1)] = 1
    for t in [-240, -200, -160, -120, -80, -10, -2, -1]:
        pre[int(t / step), range(-2 - na, -2 - 2 * na, -1)] = .1
    pres = np.vstack((pre, s))
    tmp = np.array([np.mean(np.array([smooth_spikes(pres[:, j * popsize + i], 40, .2)
                                      for i in range(popsize)]), axis=0)
                    for j in range(len(W) - na, len(W) - 8 * na - 1, -na)])[:, 250 / step:750 / step]
    tmp2 = np.array([np.mean(np.array([smooth_spikes(pres[:, j * popsize + i], 40, .2)
                                       for i in range(popsize)]), axis=0)
                     for j in range(len(W) - na - 1, len(W) - 8 * na - 2, -na)])[:, 250 / step:750 / step]
    return [np.array(map(np.argmax, tmp)) * step, tmp, tmp2, s]
def sim(u0, rate, reset, tm, th, rb, r0, r1, r2, rp, tmean, tsigma, ir, runs):
    step = .2
    net = NetPop(1)
    np.random.seed(0)
    tmp = u0 * np.ones(len(net.W))
    tmp[-1] = .5
    s = np.array([run(net.W, u0 * np.ones(len(net.W)), step, 1,
                      rate=rate, T=1700, tm=tm, reset=reset, seed=i,
                      th=th, rbase=rb, r0=r0, r1=r1, rp=rp, tmean=tmean, tsigma=tsigma)
                  for i in range(runs)])
    s3 = np.array([run(net.W, u0 * np.ones(len(net.W)), step, 1,
                       rate=rate, T=1700, tm=tm, reset=reset, seed=i,
                       th=th, rbase=rb, r0=r0, r1=r2, rp=rp, tmean=tmean, tsigma=tsigma)
                   for i in range(runs)])
    pre = np.ones((int(250 / step), len(net.W))) * step * ir * 100 / 1000.
    pre[:, -1] = 0
    return np.array(
        [smooth_spikes(np.vstack((pre, np.mean(s3, axis=0)))[:, 1], 40, step),
            smooth_spikes(np.vstack((pre, np.mean(s3, axis=0)))[:, 0], 40, step),
            smooth_spikes(np.vstack((pre, np.mean(s, axis=0)))[:, 1], 40, step),
            smooth_spikes(np.vstack((pre, np.mean(s, axis=0)))[:, 0], 40, step),
            smooth_spikes(np.vstack((pre, np.mean(s, axis=0)))[:, 2], 40, step)])
Ratels4 = np.array(map(lambda a: a[1], res4))


### plot neural activities from simulation ###

pl.figure()
for c in range(8):
    pl.plot(Ratels.mean(axis=0)[c], color=col[c])
pl.xlabel('Time from target [ms]', labelpad=0)
pl.ylabel('Firing rate [Hz]', labelpad=10)
pl.xticks([0, 250 / step, 500 / step], [0, 250, 500])
pl.yticks([0, 20, 40, 60, 80], [0, 20, 40, 60, 80])
pl.xlim(0, 500 / step)
pl.ylim(0, 64)
pl.plot(smooth_spikes([step * 100 / 1000. *
                       (.1 + .65 * np.exp(-(t * step - 500) ** 2 / tau_r ** 2))
                       for t in range(int(1000 / step))], 40, .2)[250 / step:750 / step],
        color='black', zorder=-1, lw=2)
pl.subplots_adjust(.18, .21, .945, .99)
simpleaxis(pl.gca())
if savefig:
    pl.savefig('Sohn.pdf', dpi=600)
else:
    pl.show()


pl.figure()
pl.plot(Ratels.mean(axis=0)[3], color=col[3])
l, = pl.plot(Ratels3.mean(axis=0)[3], '--', color=col[3])
l.set_dashes([10, 10])
l, = pl.plot(Ratels4.mean(axis=0)[3], ':', color=col[3])
pl.subplots_adjust(.17, .22, .99, .99)
if savefig:
    pl.savefig('Delta(t).pdf', dpi=600)
else:
    pl.show()


### activity profiles ###

errx = -5.2
fac = .022

res = sim(.3, 1000, 0., 25., np.array([[.97, 1], [1.07, 1], [1.3, 1]]),
          1, tdec, trise, tdly, 30).mean(1)
pre = np.zeros((int(250 / step), len(net.W)))
res = np.array([map(lambda x: smooth_spikes(x, 40, step),
                    np.vstack((pre, res[j])).T) for j in range(3)])
res2 = res[:, 0, 250 * 5:1250 * 5]

fig = pl.figure()
ax = fig.add_subplot(111)
p3, = ax.plot(np.arange(0, 1000, step), fac * res2[0], color=col[6])
p2, = ax.plot(np.arange(0, 1000, step), fac * res2[1], color=col[1])
p1, = ax.plot(np.arange(0, 1000, step), fac * res2[2], color=col[2])
# labels & layout
pl.xlabel('Time from offer [s]')
pl.ylabel('Relative firing rate [Hz]', labelpad=15, y=.38)
pl.xticks([0, 500], [0, 0.5])
pl.xlim(0, 1000)
pl.yticks([0, 5], [0, 5])
pl.ylim(-1.5, 9.5)
Пример #7
0
pl.subplots_adjust(.17, .22, .99, .99)
if savefig:
    pl.savefig('Delta(t).pdf', dpi=600)
else:
    pl.show()

### activity profiles ###

errx = -5.2
fac = .022

res = sim(.3, 1000, 0., 25., np.array([[.97, 1], [1.07, 1], [1.3, 1]]), 1,
          tdec, trise, tdly, 30).mean(1)
pre = np.zeros((int(250 / step), len(net.W)))
res = np.array([
    map(lambda x: smooth_spikes(x, 40, step),
        np.vstack((pre, res[j])).T) for j in range(3)
])
res2 = res[:, 0, 250 * 5:1250 * 5]

fig = pl.figure()
ax = fig.add_subplot(111)
p3, = ax.plot(np.arange(0, 1000, step), fac * res2[0], color=col[6])
p2, = ax.plot(np.arange(0, 1000, step), fac * res2[1], color=col[1])
p1, = ax.plot(np.arange(0, 1000, step), fac * res2[2], color=col[2])
# labels & layout
pl.xlabel('Time from offer [s]')
pl.ylabel('Relative firing rate [Hz]', labelpad=15, y=.38)
pl.xticks([0, 500], [0, 0.5])
pl.xlim(0, 1000)
pl.yticks([0, 5], [0, 5])
Пример #8
0
res4 = map(lambda a: simulate(a, 4), range(30))
Ratels4 = np.array(map(lambda a: a[1], res4))

### plot neural activities from simulation ###

pl.figure()
for c in range(8):
    pl.plot(Ratels.mean(axis=0)[c], color=col[c])
pl.xlabel('Time from target [ms]', labelpad=0)
pl.ylabel('Firing rate [Hz]', labelpad=10)
pl.xticks([0, 250 / step, 500 / step], [0, 250, 500])
pl.yticks([0, 20, 40, 60, 80], [0, 20, 40, 60, 80])
pl.xlim(0, 500 / step)
pl.ylim(0, 64)
pl.plot(smooth_spikes([
    step * 100 / 1000. * (.1 + .65 * np.exp(-(t * step - 500)**2 / tau_r**2))
    for t in range(int(1000 / step))
], 40, .2)[250 / step:750 / step],
        color='black',
        zorder=-1,
        lw=2)
pl.subplots_adjust(.18, .21, .945, .99)
simpleaxis(pl.gca())
if savefig:
    pl.savefig('Sohn.pdf', dpi=600)
else:
    pl.show()

pl.figure()
pl.plot(Ratels.mean(axis=0)[3], color=col[3])
l, = pl.plot(Ratels3.mean(axis=0)[3], '--', color=col[3])
l.set_dashes([10, 10])
###  simulation ###
net = NetPop()
W = np.copy(net.W)
W[:-1, :-1] *= gamma
W -= (1 - gamma) * net.competition
if deval:
    W[2, -1] /= 2.
s = []
for seed in range(30):
    u0 = np.zeros(len(W))
    np.random.seed(seed)
    s += [runU(W, u0, step, rate)]
s = np.array(s)
pre = np.zeros((int(250 / step), len(W)))
pres = np.array([np.vstack((pre, ss)) for ss in s])
Ratels = [np.array([np.mean(np.array([smooth_spikes(p[:, j * popsize + i], 40, .2)
                                      for i in range(popsize)]), axis=0)
                    for j in range(len(W))])[:, 250 / step:750 / step] for p in pres]


# plot neural activities:
pl.figure()
for c in range(6):
    pl.plot(np.array(Ratels).mean(axis=0)[c], color=col[c], zorder=5)
pl.xlabel('Time [ms]', labelpad=0)
pl.ylabel('Firing rate [Hz]', labelpad=10)
pl.xticks([0, 250 / step, 500 / step], [0, 250, 500])
pl.yticks(*[[0, 20, 40, 60, 80]] * 2)
pl.xlim(0, 500 / step)
pl.ylim(0, 64)
pl.plot(smooth_spikes([step * 100 / 1000. *
pl.yticks([15, 20, 25], [15, 20, 25])
pl.ylim(15, 25.8)
simpleaxis(pl.gca())
# reward neuron
ax2 = ax.twinx()
# fake outside plotrange for legend
p3, = ax2.plot([0, 100], [-10, -10], c='grey', lw=6, zorder=-5)
# fake outside plotrange for legend
p4, = ax2.plot([0, 100], [-10, -10], c='grey', lw=2, zorder=-5)
# fake outside plotrange for legend
p5, = ax2.plot([0, 100], [-10, -10], c=col[1], lw=4, zorder=-5)
# fake outside plotrange for legend
p6, = ax2.plot([0, 100], [-10, -10], c=col[6], lw=4, zorder=-5)
rp, tmean, tsigma = 5.2, 95, 30
ax2.plot(np.arange(0, 1500, .2), smooth_spikes(
    [.2 * 450 / 1000. / 100 * (1 + rp * np.exp(-(t * .2 - tmean)**2 / tsigma**2))
     for t in range(int(1750 / .2))], 40, .2)[:1500 * 5], c='black', lw=2, zorder=-5)
ax2.set_ylim(2, 60)
ax2.set_yticks([0, 10])
ax2.set_yticklabels([0, 10])
ax.set_zorder(ax2.get_zorder() + 1)  # put ax in front of ax2
ax.patch.set_visible(False)  # hide the 'canvas'
# right axis  & layout
pl.plot([1500, 1500], [0, 15], c='black', lw=2, clip_on=False)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
fig.subplots_adjust(.17, .22, .92, .95)
lg2 = pl.legend([p3, p4, p5, p6], [r'$r_A = 3$', r'$r_A = 1$', 'A', 'B'],
                bbox_to_anchor=(1.07, 1.14), labelspacing=.23, handletextpad=.15, handlelength=1.6)
lg2.draw_frame(False)
Пример #11
0
simpleaxis(pl.gca())
# reward neuron
ax2 = ax.twinx()
# fake outside plotrange for legend
p3, = ax2.plot([0, 100], [-10, -10], c='grey', lw=6, zorder=-5)
# fake outside plotrange for legend
p4, = ax2.plot([0, 100], [-10, -10], c='grey', lw=2, zorder=-5)
# fake outside plotrange for legend
p5, = ax2.plot([0, 100], [-10, -10], c=col[1], lw=4, zorder=-5)
# fake outside plotrange for legend
p6, = ax2.plot([0, 100], [-10, -10], c=col[6], lw=4, zorder=-5)
rp, tmean, tsigma = 5.2, 95, 30
ax2.plot(np.arange(0, 1500, .2),
         smooth_spikes([
             .2 * 450 / 1000. / 100 *
             (1 + rp * np.exp(-(t * .2 - tmean)**2 / tsigma**2))
             for t in range(int(1750 / .2))
         ], 40, .2)[:1500 * 5],
         c='black',
         lw=2,
         zorder=-5)
ax2.set_ylim(2, 60)
ax2.set_yticks([0, 10])
ax2.set_yticklabels([0, 10])
ax.set_zorder(ax2.get_zorder() + 1)  # put ax in front of ax2
ax.patch.set_visible(False)  # hide the 'canvas'
# right axis  & layout
pl.plot([1500, 1500], [0, 15], c='black', lw=2, clip_on=False)
ax2.spines['top'].set_visible(False)
ax2.spines['right'].set_visible(False)
fig.subplots_adjust(.17, .22, .92, .95)