Exemple #1
0
def compare_data(leftfiles, rightfiles, leftheader, rightheader):
    """Compare two simulations side by side"""
    figs = []
    axeslist = []
    psthaxlist = []
    for left, right in zip(leftfiles, rightfiles):
        fig, axes = plt.subplots(nrows=6, ncols=2, sharey='row')
        psth_axes = []
        
        for ii, fname in enumerate([left, right]):
            fpath = os.path.join(datadir, fname)                         
            with h5.File(fpath, 'r') as fd:
                config = nda.load_config(fd)
                bins = np.arange(0, nda.get_simtime(fd)+0.5, 50.0)
                try:
                    pns = list(fd[nda.pn_st_path].keys())
                except KeyError:
                    print('Could not find PNs in', fname)
                    return figs, axeslist, psthaxlist
                pns = sorted(pns, key=lambda x: int(x.split('_')[-1]))
                pn_st, pn_y = nda.get_event_times(fd[nda.pn_st_path], pns)
                axes[0, ii].plot(np.concatenate(pn_st), np.concatenate(pn_y), ',')
                psth_ax = axes[0, ii].twinx()
                psth_axes.append(psth_ax)
                plot_population_psth(psth_ax, pn_st, config['pn']['number'], bins)
                lines, kc_st, kc_y = plot_kc_spikes_by_cluster(axes[1, ii], fd, 'LCA')
                plot_population_psth(axes[2, ii], kc_st, len(kc_st), bins, rate_sym='b^', cell_sym='rv')
                stiminfo = nda.get_stimtime(fd)
                stimend = stiminfo['onset'] + stiminfo['duration'] + stiminfo['offdur']
                rates = [len(st[(st > stiminfo['onset']) & (st < stimend)]) * 1e3 
                         / (stimend - stiminfo['onset']) for st in kc_st]
                print(rates[:5])
                axes[3, ii].hist(rates, bins=np.arange(21))
                axes[3, ii].set_xlabel('Firing rate')
                plot_kc_vm(axes[4, ii], fd, 'LCA', 5)
                plot_ggn_vm(axes[5, ii], fd,
                                   fd['/data/uniform/ggn_output/GGN_output_Vm'],
                                   'LCA', 5, color='r')
                plot_ggn_vm(axes[5, ii], fd,
                                   fd['/data/uniform/ggn_basal/GGN_basal_Vm'],
                                   'basal', 5, color='g')
                axes[5, ii].set_ylim((-53, -35))
                axes[0, ii].set_title('{}\nFAKE? {}'.format(fname, nda.load_config(fd)['kc']['fake_clusters']))
        time_axes = [axes[ii, jj] for ii in [0, 1, 2, 4, 5] for jj in [0, 1]]
        for ax in time_axes[:-1]:
            ax.set_xticks([])
        axes[0, 0].get_shared_x_axes().join(*time_axes)
        axes[2, 0].get_shared_x_axes().join(*axes[2, :])
        # psth_axes[0].get_shared_y_axes().join(*psth_axes)
        psth_axes[0].autoscale()
        # axes[-1, -1].autoscale()
        fig.text(0.1, 0.95, leftheader, ha='left', va='bottom')
        fig.text(0.6, 0.95, rightheader, ha='left', va='bottom')
        fig.set_size_inches(15, 10)
        # fig.tight_layout()
        figs.append(fig)
        axeslist.append(axes)
        psthaxlist.append(psth_axes)
    return figs, axeslist, psthaxlist
def get_dv_kc_data(fname):
    """Create a dataset where each row contains a KC, its presynaptic GGN
    section, maximum conductance from GGN, peak depolarization of that
    section and the number of spikes generated by that KC"""
    with h5.File(fname, 'r') as fd:
        print(jid, len(nda.get_spiking_kcs(fd)))
        print(yaml.dump(nda.load_config(fd), default_style=''))
        ggn_kc_gbar = pd.DataFrame(fd[nda.ggn_kc_syn_path]['pre', 'post',
                                                           'gbar'][:, 0])
        ggn_peak_vm = fd['/data/uniform/ggn_output/GGN_output_Vm'].value.max(
            axis=1)
        ggn_sec = fd['/data/uniform/ggn_output/GGN_output_Vm'].dims[0][
            'source']
        ggn_sec_vm = pd.DataFrame(data={'sec': ggn_sec, 'vm': ggn_peak_vm})
        pn_kc_gmax = pd.DataFrame(fd[nda.pn_kc_syn_path]['post', 'gmax'][:, 0])
        pn_kc_gmax_by_kc = pn_kc_gmax.groupby('post').sum().reset_index()
        kc_sc = {
            kc_st.attrs['source']: len(kc_st)
            for kc_st in fd[nda.kc_st_path].values()
        }
    df_kc_sc = pd.DataFrame(data={
        'kc': list(kc_sc.keys()),
        'spike_count': list(kc_sc.values())
    })
    combined = pd.merge(ggn_kc_gbar, df_kc_sc, left_on='post', right_on='kc')
    combined = pd.merge(combined, ggn_sec_vm, left_on='pre', right_on='sec')
    combined.drop(columns=['pre', 'post'], inplace=True)
    return combined
_ur = pint.UnitRegistry()
Q_ = _ur.Quantity
# datadir = 'Z:/Subhasis/ggn_model_data/olfactory_network'
# datadir = '/data/rays3/ggn/olfactory_network/'
datadir = 'D:/biowulf_stage/olfactory_network'

#** jid_sc for only shifting PN, constant GGN->KC inhibition - but this did not
# have PN-KC clustered conn

jid_sc = '22087969'
fname_sc = nda.find_h5_file(jid_sc, datadir)
fd_sc = h5.File(fname_sc, 'r')
print('shifting PN, jid: {}, spiking KCs {}'.format(
    jid_sc, len(nda.get_spiking_kcs(fd_sc))))
print(yaml.dump(nda.load_config(fd_sc), default_style=''))
print('-' * 20)

stiminfo = nda.get_stimtime(fd_sc)
pn_st = []
pn_id = []
for pn in fd_sc[nda.pn_st_path].values():
    pn_st.append(pn[:])
    pn_id.append([int(pn.name.rpartition('_')[-1])] * len(pn))

kc_st = []
kc_id = []
for kc, st in fd_sc[nda.kc_st_path].items():
    kc_st.append(st[:])
    kc_id.append([int(kc)] * len(st))
Exemple #4
0
                if common is None:
                    common = set(jid_spiking_kcs[jid])
                else:
                    common = common.intersection(jid_spiking_kcs[jid])
            pos = (common_kcs.odor
                   == odor) & (common_kcs.connection
                               == conn) & (common_kcs.template_jid == template)
            common_kcs.loc[pos, 'common_kcs'] = len(common)
            common_kcs.loc[pos, 'avg_kcs'] = np.mean(counts)

templates = odor_trials_data.template_jid.unique()
for t in templates:
    fname = nda.find_h5_file(str(t), datadir)
    print(fname)
    with h5.File(fname, 'r') as fd:
        print(yaml.dump(nda.load_config(fd), default_flow_style=''))

tmp_color_map = {
    10829002: '#e66101',  # iid
    10829014: '#b2abd2',  # clus
    9932209: '#fdb863',  # iid
    9932198: '#5e3c99'  # clus
}

tmp_label_map = {
    10829002: 'Diffuse 2',  # iid
    10829014: 'Clustered 2',  # clus
    9932209: 'Diffuse 1',  # iid
    9932198: 'Clustered 1'  # clus
}
Exemple #5
0
datadir = '/data/rays3/ggn/fixed_net'
os.chdir(analysis_dir)

fig, ax = plt.subplots(nrows=3, ncols=2, sharex='all', sharey='row')
# jid = '13081213'   # this reproduces GGN Vm well, PN->KC clustered, same KC cluster receives coactive PNs
jid = '21841553'   # new simulation 
fname = nda.find_h5_file(jid, datadir)
originals = []
with h5.File(fname, 'r') as fd:
    ax[0, 0].set_title(f'{jid}')
    print(fname)
    orig = fd.attrs['original']
    originals.append(orig)
    print(f'original:{orig}')
    with h5.File(orig, 'r') as forig:
        print(yaml.dump(nda.load_config(forig), default_flow_style=False))
    # fig, ax = plt.subplots(nrows=2, ncols=1, sharex='all')
    myplot.plot_ggn_vm(ax[1, 0], fd, fd['/data/uniform/ggn_output/GGN_output_Vm'], 'LCA', 1, color='black', alpha=1.0)
    ax[1, 0].set_ylim(-60, -45)
    ax[1, 0].set_yticks([-55, -50])
    ig_vm = fd['/data/uniform/ig/IG_Vm']
    dt = ig_vm.attrs['dt']
    ig_vm = ig_vm[0, :]
    t = np.arange(len(ig_vm)) * dt
    ax[2, 0].plot(t, ig_vm, color='black')
    ax[2, 0].hlines(y=-60.0, xmin=500, xmax=1500, color='gray', lw=10)
    ax[2, 0].hlines(y=-60.0, xmin=1500, xmax=2000, color='lightgray', lw=10)
    for axis in ax.flat:
        [sp.set_visible(False) for sp in axis.spines.values()]
        axis.tick_params(right=False, top=False)    
    ax[1, 0].tick_params(bottom=False)
fig = plt.figure()
ax0 = fig.add_subplot(gs[0])
ax1 = fig.add_subplot(gs[1], sharex=ax0)
ax2 = fig.add_subplot(gs[2], sharex=ax0, sharey=ax1)

#* open file descriptors
#** jid_shifting_lognorm for shifting PNs with lognorm distribution of synapses
# jid = '3047110'  # this was old version
# jid_lognorm = '16240941'  # newer simulation with 0.2 s off response
# jid_lognorm = '22087970'   # This is with lognorm distribution of all synaptic strengths, pn-kc gmax=2.4pS, 5K KCs spiked
jid_lognorm = '22087969'
fname_lognorm = nda.find_h5_file(jid_lognorm, datadir)
fd_lognorm = h5.File(fname_lognorm, 'r')
print('shifting PN, jid: {}, spiking KCs {}'.format(
    jid_lognorm, len(nda.get_spiking_kcs(fd_lognorm))))
print(yaml.dump(nda.load_config(fd_lognorm), default_style=''))
print('-' * 20)
#** jid_constant for only shifting PN, constant GGN->KC inhibition - but this did not
# have PN-KC clustered conn
# jid_constant = '16562835'   # Thu Mar 14 13:49:49 EDT 2019 - this file was somehow lost from backup.
# jid_constant = '22295183'   # Redid simulations with constant synaptic conductances on 2019-03-13 PN-KC gmax=3 pS, spiking KCs 1707
jid_constant = '24211204'  # The jid '22295183' in fig 5 had off-time 0.2 s, this one has 0.5s
fname_constant = nda.find_h5_file(jid_constant, datadir)
fd_constant = h5.File(fname_constant, 'r')
print('shifting PN, jid: {}, spiking KCs {}'.format(
    jid_constant, len(nda.get_spiking_kcs(fd_constant))))
print(yaml.dump(nda.load_config(fd_constant), default_style=''))
print('-' * 20)

#* plot PN activity from jid_lognorm
stiminfo = nda.get_stimtime(fd_lognorm)
Exemple #7
0
# jid = '16034794'
jid = '22295165'
fname = nda.find_h5_file(jid, datadir)

gs = gridspec.GridSpec(nrows=3, ncols=1, height_ratios=[2, 2, 1], hspace=0.05)
fig = plt.figure()
ax0 = fig.add_subplot(gs[0])
ax1 = fig.add_subplot(gs[1], sharex=ax0)
ax2 = fig.add_subplot(gs[2], sharex=ax0)

axes = [ax0, ax1, ax2]

with h5.File(fname, 'r') as fd:
    print(jid, len(nda.get_spiking_kcs(fd)))
    config = nda.load_config(fd)
    print(yaml.dump(config, default_style=''))
    pn_st = []
    pn_id = []
    for pn in fd[nda.pn_st_path].values():
        pn_st.append(pn.value)
        pn_id.append([int(pn.name.rpartition('_')[-1])] * len(pn))
    ax0.plot(np.concatenate(pn_st[::10]),
             np.concatenate(pn_id[::10]),
             marker='s',
             ms=1,
             color='#fdb863',
             ls='none')
    kc_x, kc_y = nda.get_event_times(fd[nda.kc_st_path])
    ax1.plot(np.concatenate(kc_x[::10]),
             np.concatenate(kc_y[::10]),