예제 #1
0
def plot_spike_counts(ax, fname=None):        
    if fname is None:
        for ii in range(len(SIX_SPIKES)):
            spike_counts = []
            for jj, sim_set in enumerate([SIX_SPIKES, THREE_SPIKES, ALL_SPIKES]):
                sim_list = sim_set[ii][1:-1]
                for kk, jid in enumerate(sim_list):            
                    try:
                        fname = nda.find_h5_file(jid, DATA_DIR)
                    except:
                        # First entry is old data and moved to back up
                        # Template should still have all the data
                        print(f'JID {jid} not in datadir. Looking in template dir')
                        fname = nda.find_h5_file(jid, TEMPLATE_DIR)
                    with h5.File(fname, 'r') as fd:
                        kc_st, kc_id = nda.get_event_times(fd[nda.kc_st_path])                    
                        kc_sc = sum([len(st) for st in kc_st])
                        print('JID:', jid, 'total spikes:', kc_sc)
                        # For all cases, plot the result from the last successful KC removal, and first
                        spike_counts.append(kc_sc)
                ax.plot(len(spike_counts)-1, spike_counts[-1], 'k|')
            ax.plot(spike_counts, 'o-', fillstyle='none')
    else:
        total_spike_count = pd.read_csv(fname, sep=',')
        for ii, (series_id, simgrp) in enumerate(total_spike_count.groupby('series')):
            ax.plot(simgrp['total_spikes'].values, 'o-', fillstyle='none')
            series_df = simgrp.reset_index()
            for removal, remgrp in series_df.groupby('removal'):
                print('#', removal)
                print(remgrp)
#                jj += len(remgrp)
                ax.plot(remgrp.index.values[-1], remgrp['total_spikes'].values[-1], 'k.')
예제 #2
0
def make_psth_and_vm(ax_psth, ax_vm, ax_kc_hist):
    binwidth = 100
    datalist = (SIX_SPIKES[0][1:-1], THREE_SPIKES[0][1:-1],
                ALL_SPIKES[0][1:-1])
    colors = ['#e66101', '#5e3c99', '#009292']
    ls = ['-', ':']
    for ii, group in enumerate(datalist):
        print(group)
        for jj, jid in enumerate((group[0], group[-1])):
            print(jid)
            try:
                fname = nda.find_h5_file(jid, DATA_DIR)
            except:
                # First entry is old data and moved to back up
                # Template should still have all the data
                print(f'JID {jid} not in datadir. Looking in template dir')
                fname = nda.find_h5_file(jid, TEMPLATE_DIR)
            with h5.File(fname, 'r') as fd:
                kc_st, kc_id = nda.get_event_times(fd[nda.kc_st_path])
                kc_sc = np.array([len(st) for st in kc_st])
                try:
                    ax_kc_hist.hist(kc_sc,
                                    bins=np.arange(1,
                                                   max(kc_sc) + 0.5, 1),
                                    color=colors[ii],
                                    ls=ls[jj],
                                    label=f'{ii}: {jj}: {jid}',
                                    histtype='step',
                                    linewidth=1)
                except IndexError:
                    print(jid, ':', kc_sc, '|')

                pop_st = np.concatenate(kc_st)
                try:
                    ax_psth.hist(pop_st,
                                 bins=np.arange(500, 2100, binwidth),
                                 color=colors[ii],
                                 ls=ls[jj],
                                 histtype='step',
                                 label=jid)
                except IndexError:
                    print(jid, pop_st)
                ggn_vm, t = nda.get_ggn_vm(fd, 'basal')
                ax_vm.plot(t,
                           ggn_vm[0, :],
                           label=jid,
                           color=colors[ii],
                           ls=ls[jj])
    ax_psth.legend()
    ax_vm.legend()
예제 #3
0
def main():
    plt.close('all')
    fig_ax = []
    fig_ax_hist = []
    for ii in range(len(SIX_SPIKES)):
        fig, ax = plt.subplots(nrows=4, ncols=3, sharex='row', sharey='row')
        fig_ax.append((fig, ax))    
        fig_hist, ax_hist = plt.subplots()
        fig_ax_hist.append((fig_hist, ax_hist))
        for jj, sim_set in enumerate([SIX_SPIKES, THREE_SPIKES, ALL_SPIKES]):
            for kk, jid in enumerate(sim_set[ii][:-1]):            
                try:
                    fname = nda.find_h5_file(jid, DATA_DIR)
                except:
                    # First entry is old data and moved to back up
                    # Template should still have all the data
                    print(f'JID {jid} not in datadir. Looking in template dir')
                    fname = nda.find_h5_file(jid, TEMPLATE_DIR)
                with h5.File(fname, 'r') as fd:
                    kc_st, kc_id = nda.get_event_times(fd[nda.kc_st_path])
                    kc_sc = np.array([len(st) for st in kc_st])
                    spiking_kc_count = len(np.flatnonzero(kc_sc))
                    spike_count = kc_sc.sum()
                    print('JID:', jid, ', spiking KCs:', spiking_kc_count, 'total spikes:', spike_count)
                    # For all cases, plot the result from the last successful KC removal, and first
                    if (kk == len(sim_set[ii]) - 2) or (kk == 1):
                        pn_st, pn_id = nda.get_event_times(fd[nda.pn_st_path])                    
                        ggn_vm, t = nda.get_ggn_vm(fd, 'basal')
                        ax[0, jj].plot(np.concatenate(pn_st), np.concatenate(pn_id), ',')
                        ax[1, jj].plot(np.concatenate(kc_st), np.concatenate(kc_id), ',')
                        ax[2, jj].plot(t, ggn_vm[0, :], label=f'{ii}: jj: {jid}')
                        kc_pop_st = np.concatenate(kc_st)
                        kc_pop_st.sort()
                        ax[3, jj].hist(kc_pop_st, bins=np.arange(0, t[-1], 100e-3),
                                       alpha=0.5, label=f'{ii}: {jj}: {jid}',
                                       histtype='step', linewidth=2)                    
                        scounts = [len(st) for st in kc_st]
                        if max(scounts) == 0:
                            continue
                        # bin starting with 1 to remove nonspiking KCs
                        ax_hist.hist(kc_sc, bins=np.arange(1, max(kc_sc)+0.5, 1),
                                     alpha=0.5, label=f'{ii}: {jj}: {jid}',
                                     histtype='step', linewidth=2)                    
        fig.set_size_inches(210/25.4, 290/25.4)
        fig.savefig(f'fig_kc_removal_jid_{SIX_SPIKES[ii][0]}.svg', transparent=True)
        fig_hist.savefig(f'fig_kc_hist_kc_removal_jid_{SIX_SPIKES[ii][0]}.svg', transparent=True)

    plt.show()
예제 #4
0
def find_jid_spiking_kcs(jid_list, datadir):
    ret = []
    for jid in jid_list:
        fname = nda.find_h5_file(str(jid), datadir)
        with h5.File(fname, 'r') as fd:
            ret.append(nda.get_spiking_kcs(fd))
    return ret
예제 #5
0
def remove_high_firing_kcs(jid, limit, sdir, tdir):
    """Remove KCs firing more than `limit` from dataset of `jid`. `sdir`
    points to directory containing the data file, `tdir` is where the
    output file will be written. If source file is called `x.h5`,
    output file will be `x_kc{limit}.h5`

    """
    fpath = nda.find_h5_file(jid, sdir)
    kc_spike_count = []
    with h5.File(fpath, 'r') as fd:
        for kc, spikes in fd[nda.kc_st_path].items():
            kc_spike_count.append((spikes.attrs['source'], len(spikes)))
        try:
            forig_path = fd.attrs['original']
            print(jid, ': original template:', forig_path)
        except KeyError:
            forig_path = fpath
        syn = fd[nda.pn_kc_syn_path]
        if len(syn.shape) == 2:
            syn_orig = pd.DataFrame(data=syn[:, 0])
        else:
            syn_orig = pd.DataFrame(data=syn[:])
    kc_spikes = pd.DataFrame(kc_spike_count, columns=['kc', 'spikes'])
    print('# kcs > 5 spikes:', len(kc_spikes[kc_spikes['spikes'] > 5]))
    print('# kcs > 10 spikes:', len(kc_spikes[kc_spikes['spikes'] > 10]))
    print('# spiking kcs:', len(kc_spikes[kc_spikes['spikes'] > 0]))
    orig_0 = set(np.where(syn_orig['gmax'] == 0)[0])
    over = kc_spikes[kc_spikes['spikes'] > limit]
    print('{} kcs spiked more than {} spikes'.format(len(over), limit))
    if len(over) == 0:
        print('No KCs meet criterion. Nothing to do')
        return 0
    over_syn = pd.merge(syn_orig, over, left_on='post', right_on='kc')
    print('Synapses to > limit kcs', len(over_syn))
    print('Synapse to these kcs set to 0?',
          len(np.flatnonzero(over_syn['gmax'].values == 0)))
    # This is tricky - a simulation based on a template used to
    # generate a datafile with external reference to the synapse
    # datasets in the template. So attempt to generate another
    # template by updating synapses in the produced datafile
    # referred back to the original template.
    fname = os.path.basename(fpath).rpartition('.')[0]
    out_fname = '{}_kc{}.h5'.format(fname, limit)
    outfile = os.path.join(tdir, out_fname)
    if os.path.exists(outfile):
        print(f'File already exists: {outfile}')
        return 0
    # 2019-07-15 - copying forig_path resulted in losing KC spike
    # information from new simulation (fpath). Now that I save the
    # synapses directly, instead of an external ref, there is no need
    # to copy forig.
    print('Copying data from {} as {} to update using {} KC spiking'.format(
        forig_path, outfile, fpath))
    shutil.copyfile(forig_path, outfile)
    print('Disabling PN synapses to KCs firing > {} spikes'.format(limit))
    changed_syn_count = 0
    with h5.File(outfile, 'a') as ofd:
        syndf = ofd[nda.pn_kc_syn_path]
        # Set the conductances to each KC spiking more than 5 spikes to 0
        for row in over.itertuples():
            idx = np.where(syn_orig['post'] == row.kc)[0]
            # print('Common 0 syn:', len(set(idx).intersection(orig_0)))
            changed_syn_count += len(idx)
            syndf[idx, 0, 'gmax'] = 0.0
        print('Modified: synapses set to 0 conductance:', changed_syn_count)
    ofd.close()
    print('Original: synapses with 0 conductance:',
          len(syn_orig[syn_orig['gmax'] == 0.0]))

    with h5.File(outfile, 'r') as o2:
        syn_new = pd.DataFrame(data=o2[nda.pn_kc_syn_path][:, 0])
        print('# synapses in updated file', len(syn_new))
        print('# shape of synapse data in updated file', syn_new.shape)
        print('# synapses with 0 conductance',
              len(syn_new[syn_new['gmax'] == 0.0]))
        assert (len(syn_new[syn_new['gmax'] == 0.0]) -
                len(syn_orig[syn_orig['gmax'] == 0.0])) == changed_syn_count
        print('Finished checking updated file')
    return changed_syn_count
import matplotlib.gridspec as gridspec
import pn_kc_ggn_plot_mpl as myplot

plt.rc('font', size=8)

_ur = pint.UnitRegistry()
Q_ = _ur.Quantity
# datadir = 'Z:/Subhasis/ggn_model_data/olfactory_network'
# datadir = '/data/rays3/ggn/olfactory_network/'
datadir = 'D:/biowulf_stage/olfactory_network'

#** jid_sc for only shifting PN, constant GGN->KC inhibition - but this did not
# have PN-KC clustered conn

jid_sc = '22087969'
fname_sc = nda.find_h5_file(jid_sc, datadir)
fd_sc = h5.File(fname_sc, 'r')
print('shifting PN, jid: {}, spiking KCs {}'.format(
    jid_sc, len(nda.get_spiking_kcs(fd_sc))))
print(yaml.dump(nda.load_config(fd_sc), default_style=''))
print('-' * 20)

stiminfo = nda.get_stimtime(fd_sc)
pn_st = []
pn_id = []
for pn in fd_sc[nda.pn_st_path].values():
    pn_st.append(pn[:])
    pn_id.append([int(pn.name.rpartition('_')[-1])] * len(pn))

kc_st = []
kc_id = []
예제 #7
0
jids = [
    30184869,
    30184873,
    30184874,
    30184876,
    30184878,
    30184880,
    30184882,
    30184884,
    30184886,
    30184888,
]

for jid in jids:
    fpath = nda.find_h5_file(jid, datadir)
    forig_path = fpath
    kc_spike_count = []
    with h5.File(fpath, 'r') as fd:
        for kc, spikes in fd[nda.kc_st_path].items():
            kc_spike_count.append((spikes.attrs['source'], len(spikes)))
        try:
            forig_path = fd.attrs['original']
        except KeyError:
            forig_path = fpath
        syn_orig = pd.DataFrame(data=fd[nda.pn_kc_syn_path][:, 0])

    kc_spikes = pd.DataFrame(kc_spike_count, columns=['kc', 'spikes'])
    #kc_spikes['spikes'].plot('hist')
    print('# kcs > 5 spikes:', len(kc_spikes[kc_spikes['spikes'] > 5]))
    print('# kcs > 10 spikes:', len(kc_spikes[kc_spikes['spikes'] > 10]))
예제 #8
0
                counts.append(len(jid_spiking_kcs[jid]))
                jid_kc_count.loc[jid_kc_count.jid == jid, 'spiking_kcs']   \
                    = counts[-1]
                if common is None:
                    common = set(jid_spiking_kcs[jid])
                else:
                    common = common.intersection(jid_spiking_kcs[jid])
            pos = (common_kcs.odor
                   == odor) & (common_kcs.connection
                               == conn) & (common_kcs.template_jid == template)
            common_kcs.loc[pos, 'common_kcs'] = len(common)
            common_kcs.loc[pos, 'avg_kcs'] = np.mean(counts)

templates = odor_trials_data.template_jid.unique()
for t in templates:
    fname = nda.find_h5_file(str(t), datadir)
    print(fname)
    with h5.File(fname, 'r') as fd:
        print(yaml.dump(nda.load_config(fd), default_flow_style=''))

tmp_color_map = {
    10829002: '#e66101',  # iid
    10829014: '#b2abd2',  # clus
    9932209: '#fdb863',  # iid
    9932198: '#5e3c99'  # clus
}

tmp_label_map = {
    10829002: 'Diffuse 2',  # iid
    10829014: 'Clustered 2',  # clus
    9932209: 'Diffuse 1',  # iid
예제 #9
0
import network_data_analysis as nda
# from matplotlib.backends.backend_pdf import PdfPages
import pn_kc_ggn_plot_mpl as myplot
import clust_fixed_net_multi_trial as cl

plt.rc('font', size=11)

analysis_dir = '/home/rays3/projects/ggn/analysis'
# datadir = '/data/rays3/ggn/olfactory_network'
datadir = '/data/rays3/ggn/fixed_net'
os.chdir(analysis_dir)

fig, ax = plt.subplots(nrows=3, ncols=2, sharex='all', sharey='row')
# jid = '13081213'   # this reproduces GGN Vm well, PN->KC clustered, same KC cluster receives coactive PNs
jid = '21841553'   # new simulation 
fname = nda.find_h5_file(jid, datadir)
originals = []
with h5.File(fname, 'r') as fd:
    ax[0, 0].set_title(f'{jid}')
    print(fname)
    orig = fd.attrs['original']
    originals.append(orig)
    print(f'original:{orig}')
    with h5.File(orig, 'r') as forig:
        print(yaml.dump(nda.load_config(forig), default_flow_style=False))
    # fig, ax = plt.subplots(nrows=2, ncols=1, sharex='all')
    myplot.plot_ggn_vm(ax[1, 0], fd, fd['/data/uniform/ggn_output/GGN_output_Vm'], 'LCA', 1, color='black', alpha=1.0)
    ax[1, 0].set_ylim(-60, -45)
    ax[1, 0].set_yticks([-55, -50])
    ig_vm = fd['/data/uniform/ig/IG_Vm']
    dt = ig_vm.attrs['dt']
datadir = '/data/rays3/ggn/olfactory_network/'

#* setup figure and axes
gs = gridspec.GridSpec(nrows=3, ncols=1, height_ratios=[2, 1, 1], hspace=0.05)
fig = plt.figure()
ax0 = fig.add_subplot(gs[0])
ax1 = fig.add_subplot(gs[1], sharex=ax0)
ax2 = fig.add_subplot(gs[2], sharex=ax0, sharey=ax1)

#* open file descriptors
#** jid_shifting_lognorm for shifting PNs with lognorm distribution of synapses
# jid = '3047110'  # this was old version
# jid_lognorm = '16240941'  # newer simulation with 0.2 s off response
# jid_lognorm = '22087970'   # This is with lognorm distribution of all synaptic strengths, pn-kc gmax=2.4pS, 5K KCs spiked
jid_lognorm = '22087969'
fname_lognorm = nda.find_h5_file(jid_lognorm, datadir)
fd_lognorm = h5.File(fname_lognorm, 'r')
print('shifting PN, jid: {}, spiking KCs {}'.format(
    jid_lognorm, len(nda.get_spiking_kcs(fd_lognorm))))
print(yaml.dump(nda.load_config(fd_lognorm), default_style=''))
print('-' * 20)
#** jid_constant for only shifting PN, constant GGN->KC inhibition - but this did not
# have PN-KC clustered conn
# jid_constant = '16562835'   # Thu Mar 14 13:49:49 EDT 2019 - this file was somehow lost from backup.
# jid_constant = '22295183'   # Redid simulations with constant synaptic conductances on 2019-03-13 PN-KC gmax=3 pS, spiking KCs 1707
jid_constant = '24211204'  # The jid '22295183' in fig 5 had off-time 0.2 s, this one has 0.5s
fname_constant = nda.find_h5_file(jid_constant, datadir)
fd_constant = h5.File(fname_constant, 'r')
print('shifting PN, jid: {}, spiking KCs {}'.format(
    jid_constant, len(nda.get_spiking_kcs(fd_constant))))
print(yaml.dump(nda.load_config(fd_constant), default_style=''))
예제 #11
0
siminfo = pd.read_csv(
    StringIO("""jid,pn_kc_gmax,shifting,lognorm,spiking_kcs
22072442,2.5pS,y,all,6627
22087964,2.5pS,y,all,6497
22087965,2.5pS,y,all,7933
22087966,2.5pS,y,all,6833
22087967,2.5pS,y,all,7591
22087969,2.4pS,y,all,
22087970,2.4pS,y,all,
22087971,2.4pS,y,all,
22087972,2.4pS,y,all,
22087973,2.4pS,y,all,
"""))

for row in siminfo.itertuples():
    filename = nda.find_h5_file(row.jid, datadir)
    # fig, ax = plt.subplots()
    with h5.File(filename, 'r') as fd:
        config = yaml.load(fd.attrs['config'])

        # ax0 = fig.add_subplot(rows, cols, nax)
        spiking_kcs = 0
        hyper_kcs = 0
        spike_counts = []
        for kc, st in fd['/data/event/kc/kc_spiketime'].items():
            spike_counts.append(len(st))
            if len(st) > 0:
                spiking_kcs += 1
            if len(st) > 3:
                hyper_kcs += 1
        print('=' * 20)