Ejemplo n.º 1
0
def VSDI(sheet, folder, stimulus, parameter, num_stim=2, addon=""):
    import matplotlib as ml
    import quantities as pq
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet
    polarity = True  # exc
    c = 'red'
    if "Inh" in sheet:
        polarity = False
        c = 'blue'

    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    segs = sorted(
        param_filter_query(data_store, st_name=stimulus,
                           sheet_name=sheet).get_segments(),
        key=lambda x: getattr(
            MozaikParametrized.idd(x.annotations['stimulus']), parameter))
    spont_segs = sorted(
        param_filter_query(data_store, st_name=stimulus,
                           sheet_name=sheet).get_segments(
                               null=True),  # Init 150ms with no stimulus
        # param_filter_query(data_store, sheet_name=sheet, st_direct_stimulation_name="None", st_name='InternalStimulus').get_segments(),
        # param_filter_query(data_store, direct_stimulation_name='None', sheet_name=sheet).get_segments(), # 1029ms NoStimulation
        key=lambda x: getattr(
            MozaikParametrized.idd(x.annotations['stimulus']), parameter))
    # print segs
    print "spont_trials:", len(spont_segs)
    spont_trials = len(spont_segs) / num_stim
    print "spont_trials:", spont_trials
    trials = len(segs) / num_stim
    print "trials:", trials

    analog_ids = param_filter_query(
        data_store, sheet_name=sheet,
        st_name=stimulus).get_segments()[0].get_stored_vm_ids()
    if analog_ids == None or len(analog_ids) < 1:
        print "No Vm recorded.\n"
        return
    print "Recorded neurons:", len(analog_ids)
    # 900 neurons over 6000 micrometers, 200 micrometers interval

    # avg vm
    sheet_indexes = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=analog_ids)
    positions = data_store.get_neuron_postions()[sheet]
    print positions.shape  # all 10800

    ###############################
    # # Vm PLOTS
    ###############################
    # segs = spont_segs

    # the cortical surface is going to be divided into annuli (beyond the current stimulus size)
    # this mean vm composes a plot of each annulus (row) over time
    # annulus_radius = 0.3
    # start = 1.4
    # stop = 3. - annulus_radius
    # num = 5 # annuli

    annulus_radius = 0.3
    start = 0.0
    stop = 1.6 - annulus_radius
    num = 5  # annuli

    # open image
    fig = plt.figure(figsize=(8, 8))
    gs = gridspec.GridSpec(num, 1, hspace=0.3)
    arrival = []
    for n, r in enumerate(numpy.linspace(start, stop, num=num)):
        radius = [r, r + annulus_radius]
        annulus_ids = select_ids_by_position(positions,
                                             sheet_indexes,
                                             radius=radius)
        print "annulus:  ", radius, "(radii)  ", len(annulus_ids), "(#ids)"
        # print len(annulus_ids), annulus_ids

        trial_avg_prime_response = []
        trial_avg_annulus_mean_vm = []
        for s in segs:

            dist = eval(s.annotations['stimulus'])
            if dist['radius'] < 0.1:
                continue
            print "radius", dist['radius'], "trial", dist['trial']

            s.load_full()
            # print "s.analogsignalarrays", s.analogsignalarrays # if not pre-loaded, it results empty in loop

            # print gs, n
            ax = plt.subplot(gs[n])

            for a in s.analogsignalarrays:
                # print "a.name: ",a.name
                if a.name == 'v':
                    # print "a",a.shape # (10291, 900)  (vm instants t, cells)

                    # annulus population average
                    # print "annulus_ids",len(annulus_ids)
                    # print annulus_ids
                    # for aid in annulus_ids:
                    # 	print aid, numpy.nonzero(sheet_indexes == aid)[0][0]

                    # annulus_vms = numpy.array([a[:,numpy.nonzero(sheet_indexes == aid)[0]] for aid in annulus_ids])
                    annulus_mean_vm = numpy.array([
                        a[:, numpy.nonzero(sheet_indexes == aid)[0]]
                        for aid in annulus_ids
                    ]).mean(axis=0)[0:2000, :]
                    # print "annulus_vms",annulus_vms.shape
                    # only annulus ids in the mean
                    # annulus_mean_vm = numpy.mean( annulus_vms, axis=0)[0:2000,:]
                    # print "annulus_mean_vm", annulus_mean_vm.shape
                    trial_avg_annulus_mean_vm.append(annulus_mean_vm)
                    # print "annulus_mean_vm", annulus_mean_vm
                    # threshold = annulus_mean_vm.max() - (annulus_mean_vm.max()-annulus_mean_vm.min())/10 # threshold at: 90% of the max-min interval
                    # prime_response = numpy.argmax(annulus_mean_vm > threshold)
                    # trial_avg_prime_response.append(prime_response)

                    plt.axvline(x=numpy.argmax(annulus_mean_vm),
                                color=c,
                                alpha=0.5)
                    ax.plot(annulus_mean_vm, color=c, alpha=0.5)
                    ax.set_ylim([-75., -50.])

        # means
        # trial_avg_prime_response = numpy.mean(trial_avg_prime_response)
        trial_avg_annulus_mean_vm = numpy.mean(trial_avg_annulus_mean_vm,
                                               axis=0)

        from scipy.signal import argrelextrema
        peaks = argrelextrema(trial_avg_annulus_mean_vm,
                              numpy.greater,
                              order=200)[0]
        print peaks

        for peak in peaks:
            plt.axvline(x=peak, color=c, linewidth=3.)  #, linestyle=linestyle)

        ax.plot(trial_avg_annulus_mean_vm, color=c, linewidth=3.)
        ax.set_ylim([-75., -50.])
        fig.add_subplot(ax)
        # s.release()

    # close image
    # title = "propagation velocity {:f} SD {:f} m/s".format((annulus_radius*.001)/(numpy.mean(arrival)*.0001), numpy.std(arrival)) #
    plt.xlabel("time (0.1 ms) ")  #+title)
    plt.savefig(folder + "/VSDI_mean_vm_" + parameter + "_" + str(sheet) +
                "_radius" + str(dist['radius']) + "_" + addon + ".svg",
                dpi=300,
                transparent=True)
    plt.close()
    gc.collect()
Ejemplo n.º 2
0
def trial_averaged_LFP_rate(sheet,
                            folder,
                            stimulus,
                            parameter,
                            start,
                            end,
                            xlabel="",
                            ylabel="",
                            color="black",
                            ylim=[0., 100.],
                            radius=None,
                            addon=""):
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    neurons = []
    neurons = param_filter_query(
        data_store, sheet_name=sheet,
        st_name=stimulus).get_segments()[0].get_stored_spike_train_ids()
    print "Recorded neurons:", len(neurons)

    ### cascading requirements
    if radius:
        sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=neurons)
        positions = data_store.get_neuron_postions()[sheet]
        if radius:
            ids1 = select_ids_by_position(positions, sheet_ids, radius=radius)
        neurons = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1)
    ####
    # if orientation:
    #	NeuronAnnotationsToPerNeuronValues(data_store,ParameterSet({})).analyse()
    # 	l4_or = data_store.get_analysis_result(identifier='PerNeuronValue',value_name='LGNAfferentOrientation', sheet_name=sheet)
    # 	l4_phase = data_store.get_analysis_result(identifier='PerNeuronValue',value_name='LGNAfferentPhase', sheet_name=sheet)
    # 	# print "l4_phase", l4_phase
    # 	neurons = numpy.array([neurons[numpy.argmin([circular_dist(o,numpy.pi/2,numpy.pi) for (o,p) in zip(l4_or[0].get_value_by_id(neurons),l4_phase[0].get_value_by_id(neurons))])] ])

    print "Selected neurons:", len(neurons)  #, neurons
    if len(neurons) < 1:
        return

    SpikeCount(
        param_filter_query(data_store, sheet_name=sheet, st_name=stimulus),
        ParameterSet({
            'bin_length': 5,
            'neurons': list(neurons),
            'null': False
        })
        # ParameterSet({'bin_length':bin, 'neurons':list(neurons), 'null':False})
    ).analyse()
    # datastore.save()
    TrialMean(
        param_filter_query(data_store,
                           name='AnalogSignalList',
                           analysis_algorithm='SpikeCount'),
        ParameterSet({
            'vm': False,
            'cond_exc': False,
            'cond_inh': False
        })).analyse()

    dsvTM = param_filter_query(data_store,
                               sheet_name=sheet,
                               st_name=stimulus,
                               analysis_algorithm='TrialMean')
    # dsvTM.print_content(full_recordings=False)
    pnvsTM = [dsvTM.get_analysis_result()]
    # print pnvsTM
    # get stimuli from PerNeuronValues
    st = [MozaikParametrized.idd(s.stimulus_id) for s in pnvsTM[-1]]

    asl_id = numpy.array([z.get_asl_by_id(neurons) for z in pnvsTM[-1]])
    print asl_id.shape
    # Example:
    # (8, 133, 1029)
    # 8 stimuli
    # 133 cells
    # 1029 bins

    dic = colapse_to_dictionary([z.get_asl_by_id(neurons) for z in pnvsTM[-1]],
                                st, parameter)
    for k in dic:
        (b, a) = dic[k]
        par, val = zip(*sorted(zip(b, numpy.array(a))))
        dic[k] = (par, numpy.array(val))

    stimuli = dic.values()[0][0]
    means = asl_id.mean(axis=1)  # mean of
    print means.shape
    # print "means", means, "stimuli", stimuli

    #plot the LFP for each stimulus
    for s in range(0, len(means)):
        # for each stimulus plot the average conductance per cell over time
        matplotlib.rcParams.update({'font.size': 22})
        fig, ax = plt.subplots()

        ax.plot(range(0, len(means[s])), means[s], color=color, linewidth=3)

        # ax.set_ylim([lfp.min(), lfp.max()])
        # ax.set_ylim(ylim)
        ax.set_ylabel("LFP (uV)")
        ax.set_xlabel("Time (us)")
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)

        # text
        plt.tight_layout()
        plt.savefig(folder + "/TimecourseLFPrate_" + sheet + "_" + parameter +
                    "_" + str(s) + "_" + addon + ".svg",
                    dpi=200,
                    transparent=True)
        fig.clf()
        plt.close()
        # garbage
        gc.collect()
Ejemplo n.º 3
0
def trial_averaged_tuning_curve_errorbar(sheet,
                                         folder,
                                         stimulus,
                                         parameter,
                                         start,
                                         end,
                                         xlabel="",
                                         ylabel="",
                                         color="black",
                                         percentile=False,
                                         useXlog=False,
                                         useYlog=False,
                                         ylim=[0., 100.],
                                         xlim=False,
                                         opposite=False,
                                         box=None,
                                         radius=None,
                                         addon="",
                                         data=None,
                                         data_curve=True):
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    neurons = []
    neurons = param_filter_query(
        data_store, sheet_name=sheet,
        st_name=stimulus).get_segments()[0].get_stored_spike_train_ids()
    print "Recorded neurons:", len(neurons)

    if radius:
        sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=neurons)
        positions = data_store.get_neuron_postions()[sheet]
        if radius:
            ids1 = select_ids_by_position(positions, sheet_ids, radius=radius)
        neurons = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1)

    NeuronAnnotationsToPerNeuronValues(data_store, ParameterSet({})).analyse()
    l4_exc_or = data_store.get_analysis_result(
        identifier='PerNeuronValue',
        value_name='LGNAfferentOrientation',
        sheet_name=sheet)[0]
    l4_exc_or_many = numpy.array(neurons)[numpy.nonzero(
        numpy.array([
            circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi)
            for i in neurons
        ]) < .1)[0]]
    neurons = list(l4_exc_or_many)

    print "Selected neurons:", len(neurons)  #, neurons
    if len(neurons) < 1:
        return

    TrialAveragedFiringRate(
        param_filter_query(data_store, sheet_name=sheet, st_name=stimulus),
        ParameterSet({'neurons': list(neurons)})).analyse()

    PlotTuningCurve(
        param_filter_query(data_store,
                           st_name=stimulus,
                           analysis_algorithm=['TrialAveragedFiringRate']),
        ParameterSet({
            'polar': False,
            'pool': False,
            'centered': False,
            'percent': False,
            'mean': True,
            'parameter_name': parameter,
            'neurons': list(neurons),
            'sheet_name': sheet
        }),
        fig_param={
            'dpi': 200
        },
        plot_file_name=folder + "/TrialAveragedSensitivityNew_" + stimulus +
        "_" + parameter + "_" + str(sheet) + "_" + addon + "_mean.svg"
    ).plot({
        # '*.y_lim':(0,30),
        # '*.x_lim':(-10,100),
        # '*.x_scale':'log', '*.x_scale_base':10,
        '*.fontsize': 17
    })
    return
Ejemplo n.º 4
0
def LFP(sheet,
        folder,
        stimulus,
        parameter,
        tip=[.0, .0, .0],
        sigma=0.300,
        ylim=[0., -1.],
        addon="",
        color='black'):
    import matplotlib as ml
    import quantities as pq
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet

    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    ids = param_filter_query(
        data_store, sheet_name=sheet,
        st_name=stimulus).get_segments()[0].get_stored_esyn_ids()
    if ids == None or len(ids) < 1:
        print "No gesyn recorded.\n"
        return
    print "Recorded gesyn:", len(ids), ids

    ids = param_filter_query(
        data_store, sheet_name=sheet,
        st_name=stimulus).get_segments()[0].get_stored_vm_ids()
    if ids == None or len(ids) < 1:
        print "No Vm recorded.\n"
        return
    print "Recorded Vm:", len(ids), ids

    NeuronAnnotationsToPerNeuronValues(data_store, ParameterSet({})).analyse()
    l4_exc_or = data_store.get_analysis_result(
        identifier='PerNeuronValue',
        value_name='LGNAfferentOrientation',
        sheet_name=sheet)[0]
    l4_exc_or_many = numpy.array(ids)[numpy.nonzero(
        numpy.array([
            circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi)
            for i in ids
        ]) < .1)[0]]
    ids = list(l4_exc_or_many)

    print "Recorded neurons:", len(ids), ids
    # 900 neurons over 6000 micrometers, 200 micrometers interval

    sheet_indexes = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=ids)

    positions = data_store.get_neuron_postions()[sheet]
    print positions.shape  # all 10800

    # take the positions of the ids
    ids_positions = numpy.transpose(positions)[sheet_indexes, :]
    print ids_positions.shape
    print ids_positions

    # Pre-compute distances from the LFP tip
    distances = []
    for i in range(len(ids)):
        distances.append(
            numpy.linalg.norm(
                numpy.array(ids_positions[i][0]) - numpy.array(tip)))
    distances = numpy.array(distances)
    print "distances:", len(distances), distances

    # ##############################
    # LFP
    # tip = [[x],[y],[.0]]
    # For each recorded cell:
    # Gaussianly weight it by its distance from tip
    # produce the currents
    # Divide the whole by the norm factor (area): 4 * numpy.pi * sigma

    # 95% of the LFP signal is a result of all exc and inh cells conductances from 250um radius from the tip of the electrode (Katzner et al. 2009).
    # Mostly excitatory neurons are relevant for the LFP (because of their geometry) Bartos
    # Therefore we include all recorded cells but account for the distance-dependent contribution weighting currents /r^2
    # We assume that the electrode has been placed in the cortical coordinates <tip>
    # Given that the current V1 orientation map has a pixel for each 100 um, a reasonable way to look at a neighborhood is in a radius of 300 um

    print "LFP electrode tip location (x,y) in degrees:", tip

    # Gather vm and conductances
    segs = sorted(
        param_filter_query(data_store, st_name=stimulus,
                           sheet_name=sheet).get_segments(),
        key=lambda x: getattr(
            MozaikParametrized.idd(x.annotations['stimulus']), parameter))
    ticks = set([])
    for x in segs:
        ticks.add(
            getattr(MozaikParametrized.idd(x.annotations['stimulus']),
                    parameter))
    ticks = sorted(ticks)
    num_ticks = len(ticks)
    print ticks
    trials = len(segs) / num_ticks
    print "trials:", trials

    pop_vm = []
    pop_gsyn_e = []
    pop_gsyn_i = []
    for n, idd in enumerate(ids):
        print "idd", idd
        full_vm = [s.get_vm(idd) for s in segs]  # all segments
        full_gsyn_es = [s.get_esyn(idd) for s in segs]
        full_gsyn_is = [s.get_isyn(idd) for s in segs]
        print "len full_gsyn_e", len(
            full_gsyn_es)  # segments = stimuli * trials
        print "shape gsyn_e[0]", full_gsyn_es[0].shape  # stimulus lenght
        # mean input over trials
        mean_full_vm = numpy.zeros((num_ticks, full_vm[0].shape[0]))  # init
        mean_full_gsyn_e = numpy.zeros(
            (num_ticks, full_gsyn_es[0].shape[0]))  # init
        mean_full_gsyn_i = numpy.zeros((num_ticks, full_gsyn_es[0].shape[0]))
        # print "shape mean_full_gsyn_e/i", mean_full_gsyn_e.shape
        sampling_period = full_gsyn_es[0].sampling_period
        t_stop = float(full_gsyn_es[0].t_stop - sampling_period)  # 200.0
        t_start = float(full_gsyn_es[0].t_start)
        time_axis = numpy.arange(0, len(full_gsyn_es[0]), 1) / float(
            len(full_gsyn_es[0])) * abs(t_start - t_stop) + t_start
        # sum by size
        t = 0
        for v, e, i in zip(full_vm, full_gsyn_es, full_gsyn_is):
            s = int(t / trials)
            v = v.rescale(mozaik.tools.units.mV)
            e = e.rescale(
                mozaik.tools.units.nS)  # NEST is in nS, PyNN is in uS
            i = i.rescale(
                mozaik.tools.units.nS)  # NEST is in nS, PyNN is in uS
            mean_full_vm[s] = mean_full_vm[s] + numpy.array(v.tolist())
            mean_full_gsyn_e[s] = mean_full_gsyn_e[s] + numpy.array(e.tolist())
            mean_full_gsyn_i[s] = mean_full_gsyn_i[s] + numpy.array(i.tolist())
            t = t + 1

        # average by trials
        for st in range(num_ticks):
            mean_full_vm[st] = mean_full_vm[st] / trials
            mean_full_gsyn_e[st] = mean_full_gsyn_e[st] / trials
            mean_full_gsyn_i[st] = mean_full_gsyn_i[st] / trials

        pop_vm.append(mean_full_vm)
        pop_gsyn_e.append(mean_full_gsyn_e)
        pop_gsyn_i.append(mean_full_gsyn_i)

    pop_v = numpy.array(pop_vm)
    pop_e = numpy.array(pop_gsyn_e)
    pop_i = numpy.array(pop_gsyn_i)

    # Produce the current for each cell for this time interval, with the Ohm law:
    # I = ge(V-Ee) + gi(V+Ei)
    # where
    # Ee is the equilibrium for exc, which is 0.0
    # Ei is the equilibrium for inh, which is -80.0
    i = pop_e * (pop_v - 0.0) + pop_i * (pop_v - 80.0)
    # i = pop_e*(pop_v-0.0) + 0.3*pop_i*(pop_v-80.0)
    # i = pop_e*(pop_v-0.0) # only exc
    # the LFP is the result of cells' currents divided by the distance
    sum_i = numpy.sum(i, axis=0)
    lfp = sum_i / (4 * numpy.pi * sigma)  #
    lfp /= 1000.  # from milli to micro
    print "LFP:", lfp.shape, lfp.mean(), lfp.min(), lfp.max()
    # print lfp
    # lfp = np.convolve(lfp, np.ones((10,))/10, mode='valid') # moving avg or running mean implemented as a convolution over steps of 10, divided by 10
    # lfp = np.convolve(lfp, np.ones((10,))/10, mode='valid') # moving avg or running mean implemented as a convolution over steps of 10, divided by 10

    #plot the LFP for each stimulus
    for s in range(num_ticks):
        # for each stimulus plot the average conductance per cell over time
        matplotlib.rcParams.update({'font.size': 22})
        fig, ax = plt.subplots()

        ax.plot(range(0, len(lfp[s])), lfp[s], color=color, linewidth=3)

        # ax.set_ylim([lfp.min(), lfp.max()])
        ax.set_ylim(ylim)
        ax.set_ylabel("LFP (uV)")
        ax.set_xlabel("Time (us)")
        ax.spines['right'].set_visible(False)
        ax.spines['top'].set_visible(False)
        ax.xaxis.set_ticks_position('bottom')
        ax.xaxis.set_ticks(ticks, ticks)
        ax.yaxis.set_ticks_position('left')

        # text
        plt.tight_layout()
        plt.savefig(folder + "/TimecourseLFP_" + sheet + "_" + parameter +
                    "_" + str(ticks[s]) + "_" + addon + ".svg",
                    dpi=200,
                    transparent=True)
        fig.clf()
        plt.close()
        # garbage
        gc.collect()
Ejemplo n.º 5
0
def trial_averaged_raster(sheet,
                          folder,
                          stimulus,
                          parameter,
                          opposite=False,
                          box=None,
                          radius=None,
                          addon=""):
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    spike_ids = param_filter_query(
        data_store,
        sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
    if spike_ids == None:
        print "No spikes recorded.\n"
        return
    print "Recorded neurons:", len(spike_ids)

    if sheet == 'V1_Exc_L4' or sheet == 'V1_Inh_L4':
        NeuronAnnotationsToPerNeuronValues(data_store,
                                           ParameterSet({})).analyse()
        l4_exc_or = data_store.get_analysis_result(
            identifier='PerNeuronValue',
            value_name='LGNAfferentOrientation',
            sheet_name=sheet)[0]
        if opposite:
            addon = addon + "_opposite"
            l4_exc_or_many = numpy.array(spike_ids)[numpy.nonzero(
                numpy.array([
                    circular_dist(l4_exc_or.get_value_by_id(i), numpy.pi /
                                  2, numpy.pi) for i in spike_ids
                ]) < .1)[0]]
        else:
            addon = addon + "_same"
            l4_exc_or_many = numpy.array(spike_ids)[numpy.nonzero(
                numpy.array([
                    circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi)
                    for i in spike_ids
                ]) < .1)[0]]
        spike_ids = list(l4_exc_or_many)

    if radius or box:
        sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=spike_ids)
        positions = data_store.get_neuron_postions()[sheet]
        if box:
            ids1 = select_ids_by_position(positions, sheet_ids, box=box)
        if radius:
            ids1 = select_ids_by_position(positions, sheet_ids, radius=radius)
        spike_ids = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1)

    print "Selected neurons:", len(spike_ids)
    if len(spike_ids) < 1:
        return

    dsv = param_filter_query(data_store, sheet_name=sheet, st_name=stimulus)
    dist = box if not radius else radius

    # Raster + Histogram
    RasterPlot(dsv,
               ParameterSet({
                   'sheet_name': sheet,
                   'neurons': list(spike_ids),
                   'trial_averaged_histogram': True,
                   'spontaneous': True
               }),
               fig_param={
                   'dpi': 100,
                   'figsize': (100, 50)
               },
               plot_file_name=folder + "/HistRaster_" + parameter + "_" +
               str(sheet) + "_radius" + str(dist) + "_" + addon + ".svg").plot(
                   {'SpikeRasterPlot.group_trials': True})
Ejemplo n.º 6
0
def trial_averaged_Vm(sheet,
                      folder,
                      stimulus,
                      parameter,
                      opposite=False,
                      box=None,
                      radius=None,
                      addon=""):
    print inspect.stack()[0][3]
    print "folder: ", folder
    print "sheet: ", sheet
    data_store = PickledDataStore(load=True,
                                  parameters=ParameterSet({
                                      'root_directory':
                                      folder,
                                      'store_stimuli':
                                      False
                                  }),
                                  replace=True)
    data_store.print_content(full_recordings=False)

    analog_ids = param_filter_query(
        data_store, sheet_name=sheet).get_segments()[0].get_stored_vm_ids()
    if analog_ids == None:
        print "No Vm recorded.\n"
        return
    print "Recorded neurons:", len(analog_ids)

    if sheet == 'V1_Exc_L4' or sheet == 'V1_Inh_L4':
        NeuronAnnotationsToPerNeuronValues(data_store,
                                           ParameterSet({})).analyse()
        l4_exc_or = data_store.get_analysis_result(
            identifier='PerNeuronValue',
            value_name='LGNAfferentOrientation',
            sheet_name=sheet)[0]
        if opposite:
            addon = addon + "_opposite"
            l4_exc_or_many = numpy.array(analog_ids)[numpy.nonzero(
                numpy.array([
                    circular_dist(l4_exc_or.get_value_by_id(i), numpy.pi /
                                  2, numpy.pi) for i in analog_ids
                ]) < .1)[0]]
        else:
            addon = addon + "_same"
            l4_exc_or_many = numpy.array(analog_ids)[numpy.nonzero(
                numpy.array([
                    circular_dist(l4_exc_or.get_value_by_id(i), 0, numpy.pi)
                    for i in analog_ids
                ]) < .1)[0]]
        analog_ids = list(l4_exc_or_many)

    if radius or box:
        sheet_ids = data_store.get_sheet_indexes(sheet_name=sheet,
                                                 neuron_ids=analog_ids)
        positions = data_store.get_neuron_postions()[sheet]
        if box:
            ids1 = select_ids_by_position(positions, sheet_ids, box=box)
        if radius:
            ids1 = select_ids_by_position(positions, sheet_ids, radius=radius)
        analog_ids = data_store.get_sheet_ids(sheet_name=sheet, indexes=ids1)

    print "Selected neurons:", len(analog_ids)
    if len(analog_ids) < 1:
        return

    dsv = param_filter_query(data_store, sheet_name=sheet, st_name=stimulus)

    dist = box if not radius else radius
    for n in analog_ids:
        VmPlot(
            dsv,
            ParameterSet({
                'neuron': n,
                'sheet_name': sheet,
                'spontaneous': True,
            }),
            fig_param={
                'dpi': 300,
                'figsize': (40, 5)
            },
            # plot_file_name=folder+"/Vm_"+parameter+"_"+str(sheet)+"_"+str(dist)+"_"+str(n)+"_"+addon+".png"
            plot_file_name=folder + "/Vm_" + parameter + "_" + str(sheet) +
            "_radius" + str(dist) + "_" + str(n) + "_" + addon + ".svg"
        ).plot({
            # '*.y_lim':(0,60),
            # '*.x_scale':'log', '*.x_scale_base':2,
            # '*.y_ticks':[5, 10, 25, 50, 60],
            # # '*.y_scale':'linear',
            # '*.y_scale':'log', '*.y_scale_base':2,
            # '*.fontsize':24
        })
Ejemplo n.º 7
0
def perform_comparison_size_tuning(sheet,
                                   reference_position,
                                   step,
                                   sizes,
                                   folder_full,
                                   folder_inactive,
                                   reverse=False,
                                   Ismaller=[2, 3],
                                   Iequal=[4, 5],
                                   Ilarger=[6, 8],
                                   box=[],
                                   csvfile=None):
    print folder_full
    data_store_full = PickledDataStore(load=True,
                                       parameters=ParameterSet({
                                           'root_directory':
                                           folder_full,
                                           'store_stimuli':
                                           False
                                       }),
                                       replace=True)
    data_store_full.print_content(full_recordings=False)
    print folder_inactive
    data_store_inac = PickledDataStore(load=True,
                                       parameters=ParameterSet({
                                           'root_directory':
                                           folder_inactive,
                                           'store_stimuli':
                                           False
                                       }),
                                       replace=True)
    data_store_inac.print_content(full_recordings=False)

    print "Checking data..."
    # Full
    dsv1 = queries.param_filter_query(data_store_full,
                                      identifier='PerNeuronValue',
                                      sheet_name=sheet)
    # dsv1.print_content(full_recordings=False)
    pnvs1 = [dsv1.get_analysis_result()]
    # get stimuli
    st1 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs1[-1]]
    # print st1

    # Inactivated
    dsv2 = queries.param_filter_query(data_store_inac,
                                      identifier='PerNeuronValue',
                                      sheet_name=sheet)
    pnvs2 = [dsv2.get_analysis_result()]
    # get stimuli
    st2 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs2[-1]]

    # rings analysis
    neurons_full = []
    neurons_inac = []
    rowplots = 0
    max_size = 0.6

    # GET RECORDINGS BY POSITION (either step or box. In case of using box, inefficiently repetition of box-ing step times!)
    slice_ranges = numpy.arange(step, max_size + step, step)
    print "slice_ranges:", slice_ranges
    for col, cur_range in enumerate(slice_ranges):
        radius = [cur_range - step, cur_range]
        print col
        # get the list of all recorded neurons in X_ON
        # Full
        spike_ids1 = param_filter_query(
            data_store_full,
            sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
        positions1 = data_store_full.get_neuron_postions()[sheet]
        # print numpy.min(positions1), numpy.max(positions1)
        sheet_ids1 = data_store_full.get_sheet_indexes(sheet_name=sheet,
                                                       neuron_ids=spike_ids1)
        radius_ids1 = select_ids_by_position(reference_position, radius,
                                             sheet_ids1, positions1, reverse,
                                             box)
        neurons1 = data_store_full.get_sheet_ids(sheet_name=sheet,
                                                 indexes=radius_ids1)

        # Inactivated
        spike_ids2 = param_filter_query(
            data_store_inac,
            sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
        positions2 = data_store_inac.get_neuron_postions()[sheet]
        sheet_ids2 = data_store_inac.get_sheet_indexes(sheet_name=sheet,
                                                       neuron_ids=spike_ids2)
        radius_ids2 = select_ids_by_position(reference_position, radius,
                                             sheet_ids2, positions2, reverse,
                                             box)
        neurons2 = data_store_inac.get_sheet_ids(sheet_name=sheet,
                                                 indexes=radius_ids2)

        print neurons1
        print neurons2
        if not set(neurons1) == set(neurons2):
            neurons1 = numpy.intersect1d(neurons1, neurons2)
            neurons2 = neurons1

        if len(neurons1) > rowplots:
            rowplots = len(neurons1)

        neurons_full.append(neurons1)
        neurons_inac.append(neurons2)

        print "radius_ids", radius_ids2
        print "neurons_full:", len(neurons_full[col]), neurons_full[col]
        print "neurons_inac:", len(neurons_inac[col]), neurons_inac[col]

        assert len(neurons_full[col]
                   ) > 0, "ERROR: the number of recorded neurons is 0"

    # subplot figure creation
    plotOnlyPop = False
    print 'rowplots', rowplots
    print "Starting plotting ..."
    print "slice_ranges:", len(slice_ranges), slice_ranges
    if len(slice_ranges) > 1:
        fig, axes = plt.subplots(nrows=len(slice_ranges),
                                 ncols=rowplots + 1,
                                 figsize=(3 * rowplots, 3 * len(slice_ranges)),
                                 sharey=False)
    else:
        fig, axes = plt.subplots(nrows=2, ncols=2, sharey=False)
        plotOnlyPop = True
    print axes.shape

    p_significance = .02
    for col, cur_range in enumerate(slice_ranges):
        radius = [cur_range - step, cur_range]
        print col
        interval = str(radius[0]) + " - " + str(radius[1]) + " deg radius"
        print interval
        axes[col, 0].set_ylabel(interval + "\n\nResponse change (%)")
        print "range:", col
        if len(neurons_full[col]) < 1:
            continue

        tc_dict1 = []
        tc_dict2 = []

        # Full
        # group values
        dic = colapse_to_dictionary(
            [z.get_value_by_id(neurons_full[col]) for z in pnvs1[-1]], st1,
            'radius')
        for k in dic:
            (b, a) = dic[k]
            par, val = zip(*sorted(zip(b, numpy.array(a))))
            dic[k] = (par, numpy.array(val))
        tc_dict1.append(dic)

        # Inactivated
        # group values
        dic = colapse_to_dictionary(
            [z.get_value_by_id(neurons_inac[col]) for z in pnvs2[-1]], st2,
            'radius')
        for k in dic:
            (b, a) = dic[k]
            par, val = zip(*sorted(zip(b, numpy.array(a))))
            dic[k] = (par, numpy.array(val))
        tc_dict2.append(dic)

        print "(stimulus conditions, cells):", tc_dict1[0].values()[0][
            1].shape  # ex. (10, 32) firing rate for each stimulus condition (10) and each cell (32)

        # Population histogram
        diff_full_inac = []
        sem_full_inac = []
        num_cells = tc_dict1[0].values()[0][1].shape[1]
        smaller_pvalue = 0.
        equal_pvalue = 0.
        larger_pvalue = 0.

        # 1. SELECT ONLY CHANGING UNITS
        all_open_values = tc_dict2[0].values()[0][1]
        all_closed_values = tc_dict1[0].values()[0][1]

        # 1.1 Search for the units that are NOT changing (within a certain absolute tolerance)
        unchanged_units = numpy.isclose(all_closed_values,
                                        all_open_values,
                                        rtol=0.,
                                        atol=4.)
        # print unchanged_units.shape

        # 1.2 Reverse them into those that are changing
        changed_units = numpy.invert(unchanged_units)
        # print numpy.nonzero(changed_units)

        # 1.3 Get the indexes of all units that are changing
        changing_idxs = []
        for i in numpy.nonzero(changed_units)[0]:
            for j in numpy.nonzero(changed_units)[1]:
                if j not in changing_idxs:
                    changing_idxs.append(j)
        # print sorted(changing_idxs)

        # 1.4 Get the changing units
        open_values = [x[changing_idxs] for x in all_open_values]
        open_values = numpy.array(open_values)
        closed_values = [x[changing_idxs] for x in all_closed_values]
        closed_values = numpy.array(closed_values)
        print "chosen open units:", open_values.shape
        print "chosen closed units:", closed_values.shape
        num_cells = closed_values.shape[1]

        # 2. AUTOMATIC SEARCH FOR INTERVALS
        # peak = max(numpy.argmax(closed_values, axis=0 ))
        peaks = numpy.argmax(closed_values, axis=0)
        # peak = int( numpy.argmax( closed_values ) / closed_values.shape[1] ) # the returned single value is from the flattened array
        # print "numpy.argmax( closed_values ):", numpy.argmax( closed_values )
        print "peaks:", peaks
        # minimum = int( numpy.argmin( closed_values ) / closed_values.shape[1] )
        # minimum = min(numpy.argmin(closed_values, axis=0 ))
        minimums = numpy.argmin(
            closed_values,
            axis=0) + 1  # +N to get the response out of the smallest
        # print "numpy.argmin( closed_values ):", numpy.argmin( closed_values )
        print "minimums:", minimums

        # -------------------------------------
        # DIFFERENCE BETWEEN INACTIVATED AND CONTROL
        # We want to have a summary measure of the population of cells with and without inactivation.
        # Our null-hypothesis is that the inactivation does not change the activity of cells.
        # A different result will tell us that the inactivation DOES something.
        # Therefore our null-hypothesis is the result obtained in the intact system.
        # Procedure:
        # We have several stimulus sizes
        # We want to group them in three: smaller than optimal, optimal, larger than optimal
        # We do the mean response for each cell for the grouped stimuli
        #    i.e. sum the responses for each cell across stimuli in the group, divided by the number of stimuli in the group
        # We repeat for each group

        # average of all trial-averaged response for each cell for grouped stimulus size
        # we want the difference / normalized by the highest value * expressed as percentage
        # print num_cells
        # print "inac",numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0)
        # print "full",numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)
        # print "diff",(numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))
        # print "diff_norm",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)))
        # print "diff_norm_perc",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))) * 100

        # USING PROVIDED INTERVALS
        # diff_smaller = ((numpy.sum(open_values[Ismaller[0]:Ismaller[1]], axis=0) - numpy.sum(closed_values[Ismaller[0]:Ismaller[1]], axis=0)) / numpy.sum(closed_values[Ismaller[0]:Ismaller[1]], axis=0)) * 100
        # diff_equal = ((numpy.sum(open_values[Iequal[0]:Iequal[1]], axis=0) - numpy.sum(closed_values[Iequal[0]:Iequal[1]], axis=0)) / numpy.sum(closed_values[Iequal[0]:Iequal[1]], axis=0)) * 100
        # diff_larger = ((numpy.sum(open_values[Ilarger[0]:Ilarger[1]], axis=0) - numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) / numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) * 100

        # USING AUTOMATIC SEARCH
        # print "open"
        # print open_values[minimums]
        # print "closed"
        # print closed_values[minimums]
        # print open_values[peaks]
        # print closed_values[peaks]

        diff_smaller = ((numpy.sum(open_values[minimums], axis=0) -
                         numpy.sum(closed_values[minimums], axis=0)) /
                        numpy.sum(closed_values[minimums], axis=0)) * 100
        diff_equal = ((numpy.sum(open_values[peaks], axis=0) -
                       numpy.sum(closed_values[peaks], axis=0)) /
                      numpy.sum(closed_values[peaks], axis=0)) * 100
        diff_larger = (
            (numpy.sum(open_values[Ilarger[0]:Ilarger[1]], axis=0) -
             numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) /
            numpy.sum(closed_values[Ilarger[0]:Ilarger[1]], axis=0)) * 100
        # print "diff_smaller", diff_smaller
        # print "diff_equal", diff_smaller
        # print "diff_larger", diff_smaller

        # average of all cells
        smaller = sum(diff_smaller) / num_cells
        equal = sum(diff_equal) / num_cells
        larger = sum(diff_larger) / num_cells
        print "smaller", smaller
        print "equal", equal
        print "larger", larger

        if csvfile:
            csvfile.write("(" + str(smaller) + ", " + str(equal) + ", " +
                          str(larger) + "), ")

        # 0/0
        # Check using scipy
        # and we want to compare the responses of full and inactivated
        # smaller, smaller_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][0:3], axis=0)/3, numpy.sum(tc_dict1[0].values()[0][1][0:3], axis=0)/3 )
        # equal, equal_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][3:5], axis=0)/2, numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2 )
        # larger, larger_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][5:], axis=0)/5, numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5 )
        # print "smaller, smaller_pvalue:", smaller, smaller_pvalue
        # print "equal, equal_pvalue:", equal, equal_pvalue
        # print "larger, larger_pvalue:", larger, larger_pvalue

        diff_full_inac.append(smaller)
        diff_full_inac.append(equal)
        diff_full_inac.append(larger)

        # -------------------------------------
        # Standard Error Mean calculated on the full sequence
        sem_full_inac.append(scipy.stats.sem(diff_smaller))
        sem_full_inac.append(scipy.stats.sem(diff_equal))
        sem_full_inac.append(scipy.stats.sem(diff_larger))

        # print diff_full_inac
        # print sem_full_inac
        barlist = axes[col, 0].bar([0.5, 1.5, 2.5],
                                   diff_full_inac,
                                   yerr=sem_full_inac,
                                   width=0.8)
        axes[col, 0].plot([0, 4], [0, 0], 'k-')  # horizontal 0 line
        for ba in barlist:
            ba.set_color('white')
        if smaller_pvalue < p_significance:
            barlist[0].set_color('brown')
        if equal_pvalue < p_significance:
            barlist[1].set_color('darkgreen')
        if larger_pvalue < p_significance:
            barlist[2].set_color('blue')

        # Plotting tuning curves
        x_full = tc_dict1[0].values()[0][0]
        x_inac = tc_dict2[0].values()[0][0]
        # each cell couple
        axes[col, 1].set_ylabel("Response (spikes/sec)", fontsize=10)
        for j, nid in enumerate(neurons_full[col][changing_idxs]):
            # print col,j,nid
            if len(neurons_full[col][changing_idxs]
                   ) > 1:  # case with just one neuron in the group
                y_full = closed_values[:, j]
                y_inac = open_values[:, j]
            else:
                y_full = closed_values
                y_inac = open_values
            if not plotOnlyPop:
                axes[col, j + 1].plot(x_full, y_full, linewidth=2, color='b')
                axes[col, j + 1].plot(x_inac, y_inac, linewidth=2, color='r')
                axes[col, j + 1].set_title(str(nid), fontsize=10)
                axes[col, j + 1].set_xscale("log")

    fig.subplots_adjust(hspace=0.4)
    # fig.suptitle("All recorded cells grouped by circular distance", size='xx-large')
    fig.text(0.5, 0.04, 'cells', ha='center', va='center')
    fig.text(0.06,
             0.5,
             'ranges',
             ha='center',
             va='center',
             rotation='vertical')
    for ax in axes.flatten():
        ax.set_ylim([0, 60])
        ax.set_xticks(sizes)
        ax.set_xticklabels([0.1, '', '', '', '', 1, '', 2, 4, 6])
        # ax.set_xticklabels([0.1, '', '', '', '', '', '', '', '', '', '', 1, '', '', 2, '', '', '', 4, '', 6])

    for col, _ in enumerate(slice_ranges):
        # axes[col,0].set_ylim([-.8,.8])
        axes[col, 0].set_ylim([-60, 60])
        axes[col, 0].set_yticks([-60, -40, -20, 0., 20, 40, 60])
        axes[col, 0].set_yticklabels([-60, -40, -20, 0, 20, 40, 60])
        axes[col, 0].set_xlim([0, 4])
        axes[col, 0].set_xticks([.9, 1.9, 2.9])
        axes[col, 0].set_xticklabels(['small', 'equal', 'larger'])
        axes[col, 0].spines['right'].set_visible(False)
        axes[col, 0].spines['top'].set_visible(False)
        axes[col, 0].spines['bottom'].set_visible(False)

    # plt.show()
    plt.savefig(folder_inactive + "/TrialAveragedSizeTuningComparison_" +
                sheet + "_step" + str(step) + "_box" + str(box) + ".png",
                dpi=100)
    # plt.savefig( folder_full+"/TrialAveragedSizeTuningComparison_"+sheet+"_"+interval+".png", dpi=100 )
    fig.clf()
    plt.close()
    # garbage
    gc.collect()
Ejemplo n.º 8
0
def perform_comparison_size_tuning( sheet, reference_position, step, sizes, folder_full, folder_inactive, reverse=False, Ssmaller=3, Sequal=4, SequalStop=5, Slarger=6, box=[] ):
	print folder_full
	data_store_full = PickledDataStore(load=True, parameters=ParameterSet({'root_directory':folder_full, 'store_stimuli' : False}),replace=True)
	data_store_full.print_content(full_recordings=False)
	print folder_inactive
	data_store_inac = PickledDataStore(load=True, parameters=ParameterSet({'root_directory':folder_inactive, 'store_stimuli' : False}),replace=True)
	data_store_inac.print_content(full_recordings=False)

	print "Checking data..."
	# Full
	dsv1 = queries.param_filter_query( data_store_full, identifier='PerNeuronValue', sheet_name=sheet )
	# dsv1.print_content(full_recordings=False)
	pnvs1 = [ dsv1.get_analysis_result() ]
	# get stimuli
	st1 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs1[-1]]
	# print st1

	# Inactivated
	dsv2 = queries.param_filter_query( data_store_inac, identifier='PerNeuronValue', sheet_name=sheet )
	pnvs2 = [ dsv2.get_analysis_result() ]
	# get stimuli
	st2 = [MozaikParametrized.idd(s.stimulus_id) for s in pnvs2[-1]]

	# rings analysis
	neurons_full = []
	neurons_inac = []
	rowplots = 0
	max_size = 0.6

	slice_ranges = numpy.arange(step, max_size+step, step)
	for col,cur_range in enumerate(slice_ranges):
		radius = [cur_range-step,cur_range]
		print col
		# get the list of all recorded neurons in X_ON
		# Full
		spike_ids1 = param_filter_query(data_store_full, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
		positions1 = data_store_full.get_neuron_postions()[sheet]
		# print numpy.min(positions1), numpy.max(positions1) 
		sheet_ids1 = data_store_full.get_sheet_indexes(sheet_name=sheet,neuron_ids=spike_ids1)
		radius_ids1 = select_ids_by_position(reference_position, radius, sheet_ids1, positions1, reverse, box)
		# 0/0
		neurons1 = data_store_full.get_sheet_ids(sheet_name=sheet, indexes=radius_ids1)
		if len(neurons1) > rowplots:
			rowplots = len(neurons1)
		neurons_full.append(neurons1)

		# Inactivated
		spike_ids2 = param_filter_query(data_store_inac, sheet_name=sheet).get_segments()[0].get_stored_spike_train_ids()
		positions2 = data_store_inac.get_neuron_postions()[sheet]
		sheet_ids2 = data_store_inac.get_sheet_indexes(sheet_name=sheet,neuron_ids=spike_ids2)
		radius_ids2 = select_ids_by_position(reference_position, radius, sheet_ids2, positions2, reverse, box)
		neurons2 = data_store_inac.get_sheet_ids(sheet_name=sheet, indexes=radius_ids2)
		neurons_inac.append(neurons2)

		print "radius_ids", radius_ids2
		print "neurons_full", neurons_full
		print "neurons_inac", neurons_inac

		assert len(neurons_full[col]) == len(neurons_inac[col]) , "ERROR: the number of recorded neurons is different"
		assert set(neurons_full[col]) == set(neurons_inac[col]) , "ERROR: the neurons in the two arrays are not the same"

	# to analyse old simulation it is necessary to choose corresponding ids,
	# do it by hand, running this script several times and noting them down here:
	# neurons_full = [numpy.array([2912, 3205, 1867, 2731, 2248])]
	# neurons_inac = [numpy.array([2912, 3205, 1867, 2731, 2248])]
	# neurons_full =[numpy.array([10921, 10024, 13851,  9855, 11648, 13277])]
	# neurons_inac =[numpy.array([10921, 10024, 13851,  9855, 11648, 13277])]

	# subplot figure creation
	print 'rowplots', rowplots
	print "Starting plotting ..."
	print len(slice_ranges), slice_ranges
	fig, axes = plt.subplots(nrows=len(slice_ranges), ncols=rowplots+1, figsize=(3*rowplots, 3*len(slice_ranges)), sharey=False)
	# fig, axes = plt.subplots(nrows=2, ncols=rowplots+1, figsize=(3*rowplots, 3*len(slice_ranges)), sharey=False)
	print axes.shape

	p_significance = .02
	for col,cur_range in enumerate(slice_ranges):
		radius = [cur_range-step,cur_range]
		print col
		interval = str(radius[0]) +" - "+ str(radius[1]) +" deg radius"
		print interval
		axes[col,0].set_ylabel(interval+"\n\nResponse change (%)")
		print "range:",col
		if len(neurons_full[col]) < 1:
			continue
		print "neurons_full:", len(neurons_full[col]), neurons_full[col]
		print "neurons_inac:", len(neurons_inac[col]), neurons_inac[col]

		tc_dict1 = []
		tc_dict2 = []

		# Full
		# group values 
		dic = colapse_to_dictionary([z.get_value_by_id(neurons_full[col]) for z in pnvs1[-1]], st1, 'radius')
		for k in dic:
		    (b, a) = dic[k]
		    par, val = zip( *sorted( zip(b, numpy.array(a)) ) )
		    dic[k] = (par,numpy.array(val))
		tc_dict1.append(dic)

		# Inactivated
		# group values 
		dic = colapse_to_dictionary([z.get_value_by_id(neurons_inac[col]) for z in pnvs2[-1]], st2, 'radius')
		for k in dic:
		    (b, a) = dic[k]
		    par, val = zip( *sorted( zip(b, numpy.array(a)) ) )
		    dic[k] = (par,numpy.array(val))
		tc_dict2.append(dic)

		# Plotting tuning curves
		x_full = tc_dict1[0].values()[0][0]
		x_inac = tc_dict2[0].values()[0][0]
		# each cell couple 
		print "(stimulus conditions, cells):", tc_dict1[0].values()[0][1].shape # ex. (10, 32) firing rate for each stimulus condition (10) and each cell (32)
		axes[col,1].set_ylabel("Response (spikes/sec)", fontsize=10)
		for j,nid in enumerate(neurons_full[col]):
			# print col,j,nid
			if len(neurons_full[col])>1: # case with just one neuron in the group
				y_full = tc_dict1[0].values()[0][1][:,j]
				y_inac = tc_dict2[0].values()[0][1][:,j]
			else:
				y_full = tc_dict1[0].values()[0][1]
				y_inac = tc_dict2[0].values()[0][1]
			axes[col,j+1].plot(x_full, y_full, linewidth=2, color='b')
			axes[col,j+1].plot(x_inac, y_inac, linewidth=2, color='r')
			axes[col,j+1].set_title(str(nid), fontsize=10)
			axes[col,j+1].set_xscale("log")

		# Population histogram
		diff_full_inac = []
		sem_full_inac = []
		num_cells = tc_dict1[0].values()[0][1].shape[1]
		smaller_pvalue = 0.
		equal_pvalue = 0.
		larger_pvalue = 0.

		# -------------------------------------
		# NON-PARAMETRIC TWO-TAILED TEST ON THE DIFFERENCE BETWEEN INACTIVATED AND CONTROL
		# We want to have a summary measure of the population of cells with and without inactivation.
		# Our null-hypothesis is that the inactivation does not change the activity of cells.
		# A different result will tell us that the inactivation DOES something.
		# Therefore our null-hypothesis is the result obtained in the intact system.
		# Procedure:
		# We have several stimulus sizes
		# We want to group them in three: smaller than optimal, optimal, larger than optimal
		# We do the mean response for each cell for the grouped stimuli
		#    i.e. sum the responses for each cell across stimuli in the group, divided by the number of stimuli in the group
		# We repeat for each group

		# average of all trial-averaged response for each cell for grouped stimulus size
		# we want the difference / normalized by the highest value * expressed as percentage
		# print num_cells
		# print "inac",numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0)
		# print "full",numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)
		# print "diff",(numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))
		# print "diff_norm",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)))
		# print "diff_norm_perc",((numpy.sum(tc_dict2[0].values()[0][1][2:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0)) / (numpy.sum(tc_dict1[0].values()[0][1][2:3], axis=0))) * 100

		# diff_smaller = ((numpy.sum(tc_dict2[0].values()[0][1][1:3], axis=0)/2 - numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)/2) / (numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)/2)) * 100
		# diff_equal = ((numpy.sum(tc_dict2[0].values()[0][1][3:5], axis=0)/2 - numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2) / (numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2)) * 100
		# diff_larger = ((numpy.sum(tc_dict2[0].values()[0][1][5:], axis=0)/5 - numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5) / (numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5)) * 100
		# diff_smaller = ((numpy.sum(tc_dict2[0].values()[0][1][1:3], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][1:3], axis=0)) * 100
		diff_smaller = ((numpy.sum(tc_dict2[0].values()[0][1][Ssmaller:Sequal], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][Ssmaller:Sequal], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][Ssmaller:Sequal], axis=0)) * 100
		diff_equal = ((numpy.sum(tc_dict2[0].values()[0][1][Sequal:SequalStop], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][Sequal:SequalStop], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][Sequal:SequalStop], axis=0)) * 100
		diff_larger = ((numpy.sum(tc_dict2[0].values()[0][1][Slarger:], axis=0) - numpy.sum(tc_dict1[0].values()[0][1][Slarger:], axis=0)) / numpy.sum(tc_dict1[0].values()[0][1][Slarger:], axis=0)) * 100
		# print "diff_smaller", diff_smaller
		# average of all cells
		smaller = sum(diff_smaller) / num_cells
		equal = sum(diff_equal) / num_cells
		larger = sum(diff_larger) / num_cells

		# Check using scipy
		# and we want to compare the responses of full and inactivated
		# smaller, smaller_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][0:3], axis=0)/3, numpy.sum(tc_dict1[0].values()[0][1][0:3], axis=0)/3 )
		# equal, equal_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][3:5], axis=0)/2, numpy.sum(tc_dict1[0].values()[0][1][3:5], axis=0)/2 )
		# larger, larger_pvalue = scipy.stats.ttest_rel( numpy.sum(tc_dict2[0].values()[0][1][5:], axis=0)/5, numpy.sum(tc_dict1[0].values()[0][1][5:], axis=0)/5 )
		# print "smaller, smaller_pvalue:", smaller, smaller_pvalue
		# print "equal, equal_pvalue:", equal, equal_pvalue
		# print "larger, larger_pvalue:", larger, larger_pvalue

		diff_full_inac.append( smaller )
		diff_full_inac.append( equal )
		diff_full_inac.append( larger )

		# -------------------------------------
		# Standard Error Mean calculated on the full sequence
		sem_full_inac.append( scipy.stats.sem(diff_smaller) )
		sem_full_inac.append( scipy.stats.sem(diff_equal) )
		sem_full_inac.append( scipy.stats.sem(diff_larger) )

		# print diff_full_inac
		# print sem_full_inac
		barlist = axes[col,0].bar([0.5,1.5,2.5], diff_full_inac, width=0.8)
		axes[col,0].plot([0,4], [0,0], 'k-') # horizontal 0 line
		for ba in barlist:
			ba.set_color('white')
		if smaller_pvalue < p_significance:
			barlist[0].set_color('brown')
		if equal_pvalue < p_significance:
			barlist[1].set_color('darkgreen')
		if larger_pvalue < p_significance:
			barlist[2].set_color('blue')
		# colors = ['brown', 'darkgreen', 'blue']
		# for patch, color in zip(bp['boxes'], colors):
		# 	patch.set_facecolor(color)

	fig.subplots_adjust(hspace=0.4)
	# fig.suptitle("All recorded cells grouped by circular distance", size='xx-large')
	fig.text(0.5, 0.04, 'cells', ha='center', va='center')
	fig.text(0.06, 0.5, 'ranges', ha='center', va='center', rotation='vertical')
	for ax in axes.flatten():
		ax.set_ylim([0,60])
		ax.set_xticks(sizes)
		# ax.set_xticklabels([0.1, '', '', '', '', 1, '', 2, 4, 6])
		ax.set_xticklabels([0.1, '', '', '', '', '', '', '', '', '', '', 1, '', '', 2, '', '', '', 4, '', 6])

	for col,_ in enumerate(slice_ranges):
		# axes[col,0].set_ylim([-.8,.8])
		axes[col,0].set_ylim([-60,60])
		axes[col,0].set_yticks([-60, -40, -20, 0., 20, 40, 60])
		axes[col,0].set_yticklabels([-60, -40, -20, 0, 20, 40, 60])
		axes[col,0].set_xlim([0,4])
		axes[col,0].set_xticks([.9,1.9,2.9])
		axes[col,0].set_xticklabels(['small', 'equal', 'larger'])
		axes[col,0].spines['right'].set_visible(False)
		axes[col,0].spines['top'].set_visible(False)
		axes[col,0].spines['bottom'].set_visible(False)

	# plt.show()
	plt.savefig( folder_inactive+"/TrialAveragedSizeTuningComparison_"+sheet+"_step"+str(step)+"_box"+str(box)+".png", dpi=100 )
	# plt.savefig( folder_full+"/TrialAveragedSizeTuningComparison_"+sheet+"_"+interval+".png", dpi=100 )
	fig.clf()
	plt.close()
	# garbage
	gc.collect()