def get_epochs(self, epoch, nbins=100, filter=True):
        """The function return a list of windows, with the brain activity associated to a specific task/epoch and its
         respective labels"""
        print(f'Selecting epoch {epoch} from correct segments...')
        tasks = []
        targets = []
        duration = []
        for i, seg in enumerate(self.blk.segments):
            # Every run performed by the monkey is checked, and if it is not correct it is ignored
            if filter and seg.annotations['correct'] == 0:
                print(f'\tSegment {i} discarded')
                continue
            evt = seg.events[0]
            labels = [str(lab).strip().lower() for lab in evt.labels]

            limits = None
            # Given a run, we check if the epoch of interest is part of it and in case we get the limits for it
            if epoch.lower() in labels:
                idx = labels.index(epoch.lower())
                limits = (evt[idx], evt[idx + 1], epoch)
            else:
                print(f'\t Segment {i}, epoch not present in this segment')
            spk = seg.spiketrains

            if limits is not None:
                # For every found task we return a window
                sparse_array = BinnedSpikeTrain(spk,
                                                n_bins=nbins,
                                                t_start=limits[0],
                                                t_stop=limits[1])
                tasks.append(sparse_array.to_array().astype('float32'))
                targets.append(seg.annotations['obj'])
                duration.append(limits[1] - limits[0])
        return np.array(tasks), targets, duration
Beispiel #2
0
def get_seq(data, bin_size, use_sqrt=True):
    """
    Converts the data into a rec array using internally BinnedSpikeTrain.

    Parameters
    ----------

    data : list of list of Spiketrain objects
        The outer list corresponds to trials and the inner list corresponds to
        the neurons recorded in that trial, such that data[l][n] is the
        Spiketrain of neuron n in trial l. Note that the number and order of
        Spiketrains objects per trial must be fixed such that data[l][n] and
        data[k][n] refer to the same spike generator for any choice of l,k and
        n.
    bin_size: quantity.Quantity
        Spike bin width

    use_sqrt: bool
        Boolean specifying whether or not to use square-root transform on
        spike counts (see original paper for motivation).
        Default is  True

    Returns
    -------

    seq
        data structure, whose nth entry (corresponding to the nth experimental
        trial) has fields
            * trialId: unique trial identifier
            * T: (1 x 1) number of timesteps
            * y: (yDim x T) neural data

    Raises
    ------
    ValueError
        if `bin_size` is not a pq.Quantity.

    """
    if not isinstance(bin_size, pq.Quantity):
        raise ValueError("'bin_size' must be of type pq.Quantity")

    seq = []
    for dat in data:
        trial_id = dat[0]
        sts = dat[1]
        binned_sts = BinnedSpikeTrain(sts, binsize=bin_size)
        if use_sqrt:
            binned = np.sqrt(binned_sts.to_array())
        else:
            binned = binned_sts.to_array()
        seq.append((trial_id, binned_sts.num_bins, binned))
    seq = np.array(seq, dtype=[('trialId', np.int), ('T', np.int), ('y', 'O')])

    # Remove trials that are shorter than one bin width
    if len(seq) > 0:
        trials_to_keep = seq['T'] > 0
        seq = seq[trials_to_keep]

    return seq
Beispiel #3
0
def calcCCH(i, j, dataset):

    #use elephant to recreate cross_correlation_histograms
    cch = cross_correlation_histogram(
        (BinnedSpikeTrain(neoDataset[i], binsize=1 * ms)),
        (BinnedSpikeTrain(neoDataset[j], binsize=1 * ms)),
        window=[-10, 10],
        border_correction=True,
        binary=False,
        kernel=None)
    cchArray1 = np.array(cch[0][:, 0].magnitude)
    return cchArray1.max()
    def poisson_stationary_sample(rate=5 * Hz,
                                  dt=1 * ms,
                                  t_stop=1000 * ms,
                                  binned=True,
                                  num_bins=100,
                                  num_sts=1):
        """
        Returns a non-stationary poisson process with step rate given by `rate`

        :param rate: pq.Quantity rate, e.g. in Hz
        :param dt: pq.Quantity Sampling period
        :param t_stop: pq.Quantity End time of the spike train
        :param binned: bool If the spike trains should be binned
        :param num_bins: int Number of bins
        :param num_sts: int Number of spike trains
        :return: f `binned` is **True** returns binned spiketrains,
            corresponding spikes, and the rate signal, if `binned` is
            **False** returns only spikes
        """
        if not isinstance(t_stop, pq.Quantity):
            t_stop = t_stop * ms
        if not isinstance(rate, pq.Quantity):
            rate = rate * Hz
        rate_profile = [rate for _ in range(int(t_stop / dt))]
        rate_signal = neo.AnalogSignal(signal=rate_profile,
                                       units=Hz,
                                       sampling_period=dt)
        spikes = poisson_nonstat(rate_signal, N=num_sts)
        if binned:
            binned_sts = BinnedSpikeTrain(spikes, num_bins=num_bins)
            return binned_sts, spikes, rate_signal
        return spikes
 def gen_nonstat_sample(data_type=6,
                        t=10000 * ms,
                        sample_period=10 * ms,
                        num_bins=100,
                        num_sts=1,
                        binned=True):
     """
     :param data_type: int, An integer specifying the type of
         background activity
     :param t: quantity.Quantity, Simulation time. Default is 1000 * pq.ms
     :param sample_period: quantity.Quantity, Sampling period of the
         rate profile. Default is 10 * pq.ms
     :param num_bins: int, Number of bins
     :param num_sts: int, Number of spike trains
     :param binned: bool, If the spike trains should be binned
     :return: binned spiketrains and corresponding spiketrains
     """
     sts = generate_sts(data_type,
                        T=t,
                        sampl_period=sample_period,
                        N=num_sts)[0]
     if binned:
         binned_sts = BinnedSpikeTrain(sts, num_bins=num_bins)
         return binned_sts, sts
     return sts
 def compute_binned_spikes_trains(self,
                                  spikes_trains,
                                  binsize=None,
                                  num_bins=None):
     """Method to compute a elephant.conversion.BinnedSpikeTrain data type
         from a sequence (array, list, tuple) of arrays of spikes" times.
        Arguments:
         - spikes_trains: a sequence (array, list, tuple) of arrays of spikes" times
         - binsize: the size (float, in ms) of the bin to be used. Default=None.
         - num_bins: the number (integer > 0) of bins to be used. Default=None.
         If none of binsize or num_bins if given, a bin size equal to the sampling period is used.
        Returns:
         - the elephant.conversion.BinnedSpikeTrain instance.
     """
     from quantities import ms
     from elephant.conversion import BinnedSpikeTrain
     from elephant.utils import get_common_start_stop_times
     for i_spike_train, spikes_train, in enumerate(spikes_trains):
         spikes_trains[i_spike_train] = self._assert_spike_train(
             spikes_train)
     t_start, t_stop = get_common_start_stop_times(spikes_trains)
     if binsize is not None:
         binsize = float(binsize) * ms
         num_bins = None
     elif num_bins is None:
         binsize = self.period * ms
     return BinnedSpikeTrain(spikes_trains,
                             binsize=binsize,
                             num_bins=num_bins,
                             t_start=t_start,
                             t_stop=t_stop)
    def poisson_nonstat_sample(rate1=5 * Hz,
                               rate2=10 * Hz,
                               dt=1 * ms,
                               t_stop=1000 * ms,
                               binned=True,
                               num_bins=100,
                               num_sts=1):
        """
        Returns a non-stationary poisson process with step rate given by
        `rate1` and `rate2`

        :param rate1: pq.Quantity First rate, e.g. in Hz
        :param rate2: pq.Quantity Second rate, e.g. in Hz
        :param dt: pq.Quantity Sampling period
        :param t_stop: pq.Quantity End time of the first spike train
        :param binned: bool If the spike trains should be binned
        :param num_bins: int Number of bins
        :param num_sts: int Number of spike trains
        :return: if `binned` is **True** returns binned spiketrains,
            corresponding spikes, and the rate signal, if `binned` is
            **False** returns only spikes
        """
        t1 = 2 * t_stop
        rate_profile = [rate1 for _ in range(int(t_stop / dt))
                        ] + [rate2 for _ in range(int((t1 - t_stop) / dt))]
        rate_signal = neo.AnalogSignal(signal=rate_profile,
                                       units=Hz,
                                       sampling_period=dt)
        spikes = poisson_nonstat(rate_signal, N=num_sts)
        if binned:
            binned_sts = BinnedSpikeTrain(spikes, num_bins=num_bins)
            return binned_sts, spikes, rate_signal
        return spikes
 def robust_BinnedSpikeTrain(spiketrains,
                             binsize=2 * ms,
                             num_bins=None,
                             t_start=None,
                             t_stop=None,
                             **add_args):
     return BinnedSpikeTrain(spiketrains,
                             binsize=binsize,
                             num_bins=num_bins,
                             t_start=t_start,
                             t_stop=t_stop)
Beispiel #9
0
    def setUp(self):
        # standard testsignals
        tlen0 = 100 * pq.s
        f0 = 20. * pq.Hz
        fs0 = 1 * pq.ms
        t0 = np.arange(
            0, tlen0.rescale(pq.s).magnitude,
            fs0.rescale(pq.s).magnitude) * pq.s
        self.anasig0 = AnalogSignal(
            np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
            units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)
        self.st0 = SpikeTrain(
            np.arange(0, tlen0.rescale(pq.ms).magnitude, 50) * pq.ms,
            t_start=0 * pq.ms, t_stop=tlen0)
        self.bst0 = BinnedSpikeTrain(self.st0, binsize=fs0)

        # shortened analogsignals
        self.anasig1 = self.anasig0.time_slice(1 * pq.s, None)
        self.anasig2 = self.anasig0.time_slice(None, 99 * pq.s)

        # increased sampling frequency
        fs1 = 0.1 * pq.ms
        self.anasig3 = AnalogSignal(
            np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
            units=pq.mV, t_start=0 * pq.ms, sampling_period=fs1)
        self.bst1 = BinnedSpikeTrain(
            self.st0.time_slice(self.anasig3.t_start, self.anasig3.t_stop),
            binsize=fs1)

        # analogsignal containing multiple traces
        self.anasig4 = AnalogSignal(
            np.array([
                np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
                np.sin(4 * np.pi * (f0 * t0).simplified.magnitude)]).
            transpose(),
            units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)

        # shortened spike train
        self.st3 = SpikeTrain(
            np.arange(
                (tlen0.rescale(pq.ms).magnitude * .25),
                (tlen0.rescale(pq.ms).magnitude * .75), 50) * pq.ms,
            t_start=0 * pq.ms, t_stop=tlen0)
        self.bst3 = BinnedSpikeTrain(self.st3, binsize=fs0)

        self.st4 = SpikeTrain(np.arange(
            (tlen0.rescale(pq.ms).magnitude * .25),
            (tlen0.rescale(pq.ms).magnitude * .75), 50) * pq.ms,
            t_start=5 * fs0, t_stop=tlen0 - 5 * fs0)
        self.bst4 = BinnedSpikeTrain(self.st4, binsize=fs0)

        # spike train with incompatible binsize
        self.bst5 = BinnedSpikeTrain(self.st3, binsize=fs0 * 2.)

        # spike train with same binsize as the analog signal, but with
        # bin edges not aligned to the time axis of the analog signal
        self.bst6 = BinnedSpikeTrain(
            self.st3, binsize=fs0, t_start=4.5 * fs0, t_stop=tlen0 - 4.5 * fs0)
def get_default_corrcoef_matrix():
    # set random seed explicitly, which is used in homogeneous_poisson_process,
    # to avoid using different seeds for creating target and result image
    np.random.seed(0)
    spike_train_1 = homogeneous_poisson_process(rate=10.0 * Hz,
                                                t_start=0.0 * s,
                                                t_stop=10.0 * s)
    spike_train_2 = homogeneous_poisson_process(rate=10.0 * Hz,
                                                t_start=0.0 * s,
                                                t_stop=10.0 * s)
    # the binsize of 0.1s is rather large so we might expect non-zero
    # cross-correlation
    corrcoef_matrix = stcorr.corrcoef(
        BinnedSpikeTrain([spike_train_1, spike_train_2], binsize=0.1 * s))
    return corrcoef_matrix
 def robust_BinnedSpikeTrain(self,
                             spiketrains,
                             binsize=None,
                             num_bins=None,
                             t_start=None,
                             t_stop=None,
                             **add_args):
     if t_start is None:
         t_start = min([st.t_start for st in spiketrains])
     if t_stop is None:
         t_stop = min([st.t_stop for st in spiketrains])
     if binsize is None and num_bins is None:
         binsize = self.params['binsize']
     return BinnedSpikeTrain(spiketrains,
                             binsize=binsize,
                             num_bins=num_bins,
                             t_start=t_start,
                             t_stop=t_stop)
Beispiel #12
0
    def setUp(self):
        # standard testsignals
        tlen0 = 100 * pq.s
        f0 = 20. * pq.Hz
        fs0 = 1 * pq.ms
        t0 = np.arange(
            0, tlen0.rescale(pq.s).magnitude,
            fs0.rescale(pq.s).magnitude) * pq.s
        self.anasig0 = AnalogSignal(
            np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),
            units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)
        self.st0 = SpikeTrain(
            np.arange(0, tlen0.rescale(pq.ms).magnitude, 50) * pq.ms,
            t_start=0 * pq.ms, t_stop=tlen0)
        self.bst0 = BinnedSpikeTrain(self.st0, bin_size=fs0)

        def test_old_scipy_version(self):
            self.assertRaises(AttributeError, sta.spike_field_coherence,
                              self.anasig0, self.bst0)
Beispiel #13
0
def correlation_matrix(fig, ax, tr, crun='run_00000000', N=50, nbin=None):

    if nbin == None:
        nbin = int(tr.T / (50 * ms))

    df = tr.crun.GExc_spks
    xt, xi = df.t, df.i

    sts = [
        neo.SpikeTrain(xt[xi == i] / second * pq.s,
                       t_stop=tr.T / second * pq.s) for i in range(N)
    ]

    x = corrcoef(BinnedSpikeTrain(sts, num_bins=nbin))

    x[np.diag_indices(N)] = 0

    divider = make_axes_locatable(ax)
    cax = divider.append_axes('right', size='5%', pad=0.05)

    im = ax.imshow(x)
    fig.colorbar(im, cax=cax, orientation='vertical')
Beispiel #14
0
def calc_corellation(spike_times, spike_ids, num, duration, bin_x=None):
    # Create randomly shuffled indices
    neuron_indices = np.arange(num)
    np.random.shuffle(neuron_indices)

    # Loop through indices
    spike_trains = []
    for n in neuron_indices:
        # Extract spike times
        neuron_spike_times = spike_times[spike_ids == n]

        # If there are any spikes
        if len(neuron_spike_times) > 0:
            # Add neo SpikeTrain object
            spike_trains.append(
                SpikeTrain(neuron_spike_times * ms,
                           t_start=1 * s,
                           t_stop=10 * s))

            # If we have found our 200 spike trains, stop
            if len(spike_trains) == 200:
                break

    # Check that 200 spike trains containing spikes could be found
    assert len(spike_trains) == 200

    # Bin spikes using bins corresponding to 2ms refractory period
    binned_spike_trains = BinnedSpikeTrain(spike_trains, binsize=2.0 * ms)

    # Calculate correlation matrix
    correlation = corrcoef(binned_spike_trains)

    # Take lower triangle of matrix (minus diagonal)
    correlation_non_disjoint = correlation[np.tril_indices_from(correlation,
                                                                k=-1)]

    # Calculate histogram
    return calc_histogram(correlation_non_disjoint, 0.002, bin_x)
Beispiel #15
0
def run_net(tr):

    # prefs.codegen.target = 'numpy'
    # prefs.codegen.target = 'cython'
    if tr.n_threads > 1:
        prefs.devices.cpp_standalone.openmp_threads = tr.n_threads

    set_device('cpp_standalone',
               directory='./builds/%.4d' % (tr.v_idx),
               build_on_run=False)

    # set brian 2 and numpy random seeds
    seed(tr.random_seed)
    np.random.seed(tr.random_seed + 11)

    print("Started process with id ", str(tr.v_idx))

    T = tr.T1 + tr.T2 + tr.T3 + tr.T4 + tr.T5

    namespace = tr.netw.f_to_dict(short_names=True, fast_access=True)
    namespace['idx'] = tr.v_idx

    defaultclock.dt = tr.netw.sim.dt

    # collect all network components dependent on configuration
    # (e.g. poisson vs. memnoise) and add them to the Brian 2
    # network object later
    netw_objects = []

    if tr.external_mode == 'memnoise':
        neuron_model = tr.condlif_memnoise
    elif tr.external_mode == 'poisson':
        raise NotImplementedError
        #neuron_model = tr.condlif_poisson

    if tr.syn_cond_mode == 'exp':
        neuron_model += tr.syn_cond_EE_exp
        print("Using EE exp mode")
    elif tr.syn_cond_mode == 'alpha':
        neuron_model += tr.syn_cond_EE_alpha
        print("Using EE alpha mode")
    elif tr.syn_cond_mode == 'biexp':
        neuron_model += tr.syn_cond_EE_biexp
        namespace['invpeakEE'] = (tr.tau_e / tr.tau_e_rise) ** \
            (tr.tau_e_rise / (tr.tau_e - tr.tau_e_rise))
        print("Using EE biexp mode")

    if tr.syn_cond_mode_EI == 'exp':
        neuron_model += tr.syn_cond_EI_exp
        print("Using EI exp mode")
    elif tr.syn_cond_mode_EI == 'alpha':
        neuron_model += tr.syn_cond_EI_alpha
        print("Using EI alpha mode")
    elif tr.syn_cond_mode_EI == 'biexp':
        neuron_model += tr.syn_cond_EI_biexp
        namespace['invpeakEI'] = (tr.tau_i / tr.tau_i_rise) ** \
            (tr.tau_i_rise / (tr.tau_i - tr.tau_i_rise))
        print("Using EI biexp mode")

    GExc = NeuronGroup(
        N=tr.N_e,
        model=neuron_model,
        threshold=tr.nrnEE_thrshld,
        reset=tr.nrnEE_reset,  #method=tr.neuron_method,
        name='GExc',
        namespace=namespace)
    GInh = NeuronGroup(
        N=tr.N_i,
        model=neuron_model,
        threshold='V > Vt',
        reset='V=Vr_i',  #method=tr.neuron_method,
        name='GInh',
        namespace=namespace)

    if tr.external_mode == 'memnoise':
        # GExc.mu, GInh.mu = [0.*mV] + (tr.N_e-1)*[tr.mu_e], tr.mu_i
        # GExc.sigma, GInh.sigma = [0.*mV] + (tr.N_e-1)*[tr.sigma_e], tr.sigma_i
        GExc.mu, GInh.mu = tr.mu_e, tr.mu_i
        GExc.sigma, GInh.sigma = tr.sigma_e, tr.sigma_i

    GExc.Vt, GInh.Vt = tr.Vt_e, tr.Vt_i
    GExc.V , GInh.V  = np.random.uniform(tr.Vr_e/mV, tr.Vt_e/mV,
                                         size=tr.N_e)*mV, \
                       np.random.uniform(tr.Vr_i/mV, tr.Vt_i/mV,
                                         size=tr.N_i)*mV

    netw_objects.extend([GExc, GInh])

    if tr.external_mode == 'poisson':

        if tr.PInp_mode == 'pool':
            PInp = PoissonGroup(tr.NPInp,
                                rates=tr.PInp_rate,
                                namespace=namespace,
                                name='poissongroup_exc')
            sPN = Synapses(target=GExc,
                           source=PInp,
                           model=tr.poisson_mod,
                           on_pre='gfwd_post += a_EPoi',
                           namespace=namespace,
                           name='synPInpExc')

            sPN_src, sPN_tar = generate_N_connections(N_tar=tr.N_e,
                                                      N_src=tr.NPInp,
                                                      N=tr.NPInp_1n)

        elif tr.PInp_mode == 'indep':
            PInp = PoissonGroup(tr.N_e,
                                rates=tr.PInp_rate,
                                namespace=namespace)
            sPN = Synapses(target=GExc,
                           source=PInp,
                           model=tr.poisson_mod,
                           on_pre='gfwd_post += a_EPoi',
                           namespace=namespace,
                           name='synPInp_inhInh')
            sPN_src, sPN_tar = range(tr.N_e), range(tr.N_e)

        sPN.connect(i=sPN_src, j=sPN_tar)

        if tr.PInp_mode == 'pool':

            PInp_inh = PoissonGroup(tr.NPInp_inh,
                                    rates=tr.PInp_inh_rate,
                                    namespace=namespace,
                                    name='poissongroup_inh')

            sPNInh = Synapses(target=GInh,
                              source=PInp_inh,
                              model=tr.poisson_mod,
                              on_pre='gfwd_post += a_EPoi',
                              namespace=namespace)

            sPNInh_src, sPNInh_tar = generate_N_connections(N_tar=tr.N_i,
                                                            N_src=tr.NPInp_inh,
                                                            N=tr.NPInp_inh_1n)

        elif tr.PInp_mode == 'indep':

            PInp_inh = PoissonGroup(tr.N_i,
                                    rates=tr.PInp_inh_rate,
                                    namespace=namespace)

            sPNInh = Synapses(target=GInh,
                              source=PInp_inh,
                              model=tr.poisson_mod,
                              on_pre='gfwd_post += a_EPoi',
                              namespace=namespace)

            sPNInh_src, sPNInh_tar = range(tr.N_i), range(tr.N_i)

        sPNInh.connect(i=sPNInh_src, j=sPNInh_tar)

        netw_objects.extend([PInp, sPN, PInp_inh, sPNInh])

    if tr.syn_noise:

        if tr.syn_noise_type == 'additive':
            synEE_mod = '''%s 
                           %s''' % (tr.synEE_noise_add, tr.synEE_mod)

            synEI_mod = '''%s 
                           %s''' % (tr.synEE_noise_add, tr.synEE_mod)

        elif tr.syn_noise_type == 'multiplicative':
            synEE_mod = '''%s 
                           %s''' % (tr.synEE_noise_mult, tr.synEE_mod)

            synEI_mod = '''%s 
                           %s''' % (tr.synEE_noise_mult, tr.synEE_mod)

    else:
        synEE_mod = '''%s 
                       %s''' % (tr.synEE_static, tr.synEE_mod)

        synEI_mod = '''%s 
                       %s''' % (tr.synEE_static, tr.synEE_mod)

    if tr.scl_active:
        synEE_mod = '''%s
                       %s''' % (synEE_mod, tr.synEE_scl_mod)
        synEI_mod = '''%s
                       %s''' % (synEI_mod, tr.synEI_scl_mod)

    if tr.syn_cond_mode == 'exp':
        synEE_pre_mod = mod.synEE_pre_exp
    elif tr.syn_cond_mode == 'alpha':
        synEE_pre_mod = mod.synEE_pre_alpha
    elif tr.syn_cond_mode == 'biexp':
        synEE_pre_mod = mod.synEE_pre_biexp

    synEE_post_mod = mod.syn_post

    if tr.stdp_active:
        synEE_pre_mod = '''%s 
                            %s''' % (synEE_pre_mod, mod.syn_pre_STDP)
        synEE_post_mod = '''%s 
                            %s''' % (synEE_post_mod, mod.syn_post_STDP)

    if tr.synEE_rec:
        synEE_pre_mod = '''%s 
                            %s''' % (synEE_pre_mod, mod.synEE_pre_rec)
        synEE_post_mod = '''%s 
                            %s''' % (synEE_post_mod, mod.synEE_post_rec)

    # E<-E advanced synapse model
    SynEE = Synapses(target=GExc,
                     source=GExc,
                     model=synEE_mod,
                     on_pre=synEE_pre_mod,
                     on_post=synEE_post_mod,
                     namespace=namespace,
                     dt=tr.synEE_mod_dt)

    if tr.istdp_active and tr.istdp_type == 'dbexp':

        if tr.syn_cond_mode_EI == 'exp':
            EI_pre_mod = mod.synEI_pre_exp
        elif tr.syn_cond_mode_EI == 'alpha':
            EI_pre_mod = mod.synEI_pre_alpha
        elif tr.syn_cond_mode_EI == 'biexp':
            EI_pre_mod = mod.synEI_pre_biexp

        synEI_pre_mod = '''%s 
                            %s''' % (EI_pre_mod, mod.syn_pre_STDP)
        synEI_post_mod = '''%s 
                            %s''' % (mod.syn_post, mod.syn_post_STDP)

    elif tr.istdp_active and tr.istdp_type == 'sym':

        if tr.syn_cond_mode_EI == 'exp':
            EI_pre_mod = mod.synEI_pre_sym_exp
        elif tr.syn_cond_mode_EI == 'alpha':
            EI_pre_mod = mod.synEI_pre_sym_alpha
        elif tr.syn_cond_mode_EI == 'biexp':
            EI_pre_mod = mod.synEI_pre_sym_biexp

        synEI_pre_mod = '''%s 
                            %s''' % (EI_pre_mod, mod.syn_pre_STDP)
        synEI_post_mod = '''%s 
                            %s''' % (mod.synEI_post_sym, mod.syn_post_STDP)

    if tr.istdp_active and tr.synEI_rec:

        synEI_pre_mod = '''%s 
                            %s''' % (synEI_pre_mod, mod.synEI_pre_rec)
        synEI_post_mod = '''%s 
                            %s''' % (synEI_post_mod, mod.synEI_post_rec)

    if tr.istdp_active:
        SynEI = Synapses(target=GExc,
                         source=GInh,
                         model=synEI_mod,
                         on_pre=synEI_pre_mod,
                         on_post=synEI_post_mod,
                         namespace=namespace,
                         dt=tr.synEE_mod_dt)

    else:
        model = '''a : 1
                   syn_active : 1'''
        SynEI = Synapses(target=GExc,
                         source=GInh,
                         model=model,
                         on_pre='gi_post += a',
                         namespace=namespace)

    #other simple
    SynIE = Synapses(target=GInh,
                     source=GExc,
                     on_pre='ge_post += a_ie',
                     namespace=namespace)

    SynII = Synapses(target=GInh,
                     source=GInh,
                     on_pre='gi_post += a_ii',
                     namespace=namespace)

    sEE_src, sEE_tar = generate_full_connectivity(tr.N_e, same=True)
    SynEE.connect(i=sEE_src, j=sEE_tar)
    SynEE.syn_active = 0
    SynEE.taupre, SynEE.taupost = tr.taupre, tr.taupost

    if tr.istdp_active and tr.istrct_active:
        print('istrct active')
        sEI_src, sEI_tar = generate_full_connectivity(Nsrc=tr.N_i,
                                                      Ntar=tr.N_e,
                                                      same=False)
        SynEI.connect(i=sEI_src, j=sEI_tar)
        SynEI.syn_active = 0

    else:
        print('istrct not active')
        if tr.weight_mode == 'init':
            sEI_src, sEI_tar = generate_connections(tr.N_e, tr.N_i, tr.p_ei)
            # print('Index Zero will not get inhibition')
            # sEI_src, sEI_tar = np.array(sEI_src), np.array(sEI_tar)
            # sEI_src, sEI_tar = sEI_src[sEI_tar > 0],sEI_tar[sEI_tar > 0]

        elif tr.weight_mode == 'load':

            fpath = os.path.join(tr.basepath, tr.weight_path)

            with open(fpath + 'synei_a.p', 'rb') as pfile:
                synei_a_init = pickle.load(pfile)

            sEI_src, sEI_tar = synei_a_init['i'], synei_a_init['j']

        SynEI.connect(i=sEI_src, j=sEI_tar)

    if tr.istdp_active:
        SynEI.taupre, SynEI.taupost = tr.taupre_EI, tr.taupost_EI

    sIE_src, sIE_tar = generate_connections(tr.N_i, tr.N_e, tr.p_ie)
    sII_src, sII_tar = generate_connections(tr.N_i, tr.N_i, tr.p_ii, same=True)

    SynIE.connect(i=sIE_src, j=sIE_tar)
    SynII.connect(i=sII_src, j=sII_tar)

    tr.f_add_result('sEE_src', sEE_src)
    tr.f_add_result('sEE_tar', sEE_tar)
    tr.f_add_result('sIE_src', sIE_src)
    tr.f_add_result('sIE_tar', sIE_tar)
    tr.f_add_result('sEI_src', sEI_src)
    tr.f_add_result('sEI_tar', sEI_tar)
    tr.f_add_result('sII_src', sII_src)
    tr.f_add_result('sII_tar', sII_tar)

    if tr.syn_noise:
        SynEE.syn_sigma = tr.syn_sigma
        SynEE.run_regularly('a = clip(a,0,amax)',
                            when='after_groups',
                            name='SynEE_noise_clipper')

    if tr.syn_noise and tr.istdp_active:
        SynEI.syn_sigma = tr.syn_sigma
        SynEI.run_regularly('a = clip(a,0,amax)',
                            when='after_groups',
                            name='SynEI_noise_clipper')

    SynEE.insert_P = tr.insert_P
    SynEE.p_inactivate = tr.p_inactivate
    SynEE.stdp_active = 1
    print('Setting maximum EE weight threshold to ', tr.amax)
    SynEE.amax = tr.amax

    if tr.istdp_active:
        SynEI.insert_P = tr.insert_P_ei
        SynEI.p_inactivate = tr.p_inactivate_ei
        SynEI.stdp_active = 1
        SynEI.amax = tr.amax

    SynEE.syn_active, SynEE.a = init_synapses('EE', tr)
    SynEI.syn_active, SynEI.a = init_synapses('EI', tr)

    # recording of stdp in T4
    SynEE.stdp_rec_start = tr.T1 + tr.T2 + tr.T3
    SynEE.stdp_rec_max = tr.T1 + tr.T2 + tr.T3 + tr.stdp_rec_T

    if tr.istdp_active:
        SynEI.stdp_rec_start = tr.T1 + tr.T2 + tr.T3
        SynEI.stdp_rec_max = tr.T1 + tr.T2 + tr.T3 + tr.stdp_rec_T

    # synaptic scaling
    if tr.netw.config.scl_active:

        if tr.syn_scl_rec:
            SynEE.scl_rec_start = tr.T1 + tr.T2 + tr.T3
            SynEE.scl_rec_max = tr.T1 + tr.T2 + tr.T3 + tr.scl_rec_T
        else:
            SynEE.scl_rec_start = T + 10 * second
            SynEE.scl_rec_max = T

        if tr.sig_ATotalMax == 0.:
            GExc.ANormTar = tr.ATotalMax
        else:
            GExc.ANormTar = np.random.normal(loc=tr.ATotalMax,
                                             scale=tr.sig_ATotalMax,
                                             size=tr.N_e)

        SynEE.summed_updaters['AsumEE_post']._clock = Clock(
            dt=tr.dt_synEE_scaling)
        synee_scaling = SynEE.run_regularly(tr.synEE_scaling,
                                            dt=tr.dt_synEE_scaling,
                                            when='end',
                                            name='synEE_scaling')

    if tr.istdp_active and tr.netw.config.iscl_active:

        if tr.syn_iscl_rec:
            SynEI.scl_rec_start = tr.T1 + tr.T2 + tr.T3
            SynEI.scl_rec_max = tr.T1 + tr.T2 + tr.T3 + tr.scl_rec_T
        else:
            SynEI.scl_rec_start = T + 10 * second
            SynEI.scl_rec_max = T

        if tr.sig_iATotalMax == 0.:
            GExc.iANormTar = tr.iATotalMax
        else:
            GExc.iANormTar = np.random.normal(loc=tr.iATotalMax,
                                              scale=tr.sig_iATotalMax,
                                              size=tr.N_e)

        SynEI.summed_updaters['AsumEI_post']._clock = Clock(
            dt=tr.dt_synEE_scaling)

        synei_scaling = SynEI.run_regularly(tr.synEI_scaling,
                                            dt=tr.dt_synEE_scaling,
                                            when='end',
                                            name='synEI_scaling')

    # # intrinsic plasticity
    # if tr.netw.config.it_active:
    #     GExc.h_ip = tr.h_ip
    #     GExc.run_regularly(tr.intrinsic_mod, dt = tr.it_dt, when='end')

    # structural plasticity
    if tr.netw.config.strct_active:
        if tr.strct_mode == 'zero':
            if tr.turnover_rec:
                strct_mod = '''%s 
                                %s''' % (tr.strct_mod, tr.turnover_rec_mod)
            else:
                strct_mod = tr.strct_mod

            strctplst = SynEE.run_regularly(strct_mod,
                                            dt=tr.strct_dt,
                                            when='end',
                                            name='strct_plst_zero')

        elif tr.strct_mode == 'thrs':
            if tr.turnover_rec:
                strct_mod_thrs = '''%s 
                                %s''' % (tr.strct_mod_thrs,
                                         tr.turnover_rec_mod)
            else:
                strct_mod_thrs = tr.strct_mod_thrs

            strctplst = SynEE.run_regularly(strct_mod_thrs,
                                            dt=tr.strct_dt,
                                            when='end',
                                            name='strct_plst_thrs')

    if tr.istdp_active and tr.netw.config.istrct_active:
        if tr.strct_mode == 'zero':
            if tr.turnover_rec:
                strct_mod_EI = '''%s 
                                   %s''' % (tr.strct_mod,
                                            tr.turnoverEI_rec_mod)
            else:
                strct_mod_EI = tr.strct_mod

            strctplst_EI = SynEI.run_regularly(strct_mod_EI,
                                               dt=tr.strct_dt,
                                               when='end',
                                               name='strct_plst_EI')

        elif tr.strct_mode == 'thrs':
            raise NotImplementedError

    netw_objects.extend([SynEE, SynEI, SynIE, SynII])

    # keep track of the number of active synapses
    sum_target = NeuronGroup(1, 'c : 1 (shared)', dt=tr.csample_dt)

    sum_model = '''NSyn : 1 (constant)
                   c_post = (1.0*syn_active_pre)/NSyn : 1 (summed)'''
    sum_connection = Synapses(target=sum_target,
                              source=SynEE,
                              model=sum_model,
                              dt=tr.csample_dt,
                              name='get_active_synapse_count')
    sum_connection.connect()
    sum_connection.NSyn = tr.N_e * (tr.N_e - 1)

    if tr.adjust_insertP:
        # homeostatically adjust growth rate
        growth_updater = Synapses(sum_target, SynEE)
        growth_updater.run_regularly('insert_P_post *= 0.1/c_pre',
                                     when='after_groups',
                                     dt=tr.csample_dt,
                                     name='update_insP')
        growth_updater.connect(j='0')

        netw_objects.extend([sum_target, sum_connection, growth_updater])

    if tr.istdp_active and tr.istrct_active:

        # keep track of the number of active synapses
        sum_target_EI = NeuronGroup(1, 'c : 1 (shared)', dt=tr.csample_dt)

        sum_model_EI = '''NSyn : 1 (constant)
                          c_post = (1.0*syn_active_pre)/NSyn : 1 (summed)'''
        sum_connection_EI = Synapses(target=sum_target_EI,
                                     source=SynEI,
                                     model=sum_model_EI,
                                     dt=tr.csample_dt,
                                     name='get_active_synapse_count_EI')
        sum_connection_EI.connect()
        sum_connection_EI.NSyn = tr.N_e * tr.N_i

        if tr.adjust_EI_insertP:
            # homeostatically adjust growth rate
            growth_updater_EI = Synapses(sum_target_EI, SynEI)
            growth_updater_EI.run_regularly('insert_P_post *= 0.1/c_pre',
                                            when='after_groups',
                                            dt=tr.csample_dt,
                                            name='update_insP_EI')
            growth_updater_EI.connect(j='0')

            netw_objects.extend(
                [sum_target_EI, sum_connection_EI, growth_updater_EI])

    # -------------- recording ------------------

    GExc_recvars = []
    if tr.memtraces_rec:
        GExc_recvars.append('V')
    if tr.vttraces_rec:
        GExc_recvars.append('Vt')
    if tr.getraces_rec:
        GExc_recvars.append('ge')
    if tr.gitraces_rec:
        GExc_recvars.append('gi')
    if tr.gfwdtraces_rec and tr.external_mode == 'poisson':
        GExc_recvars.append('gfwd')

    GInh_recvars = GExc_recvars

    GExc_stat = StateMonitor(GExc,
                             GExc_recvars,
                             record=list(range(tr.nrec_GExc_stat)),
                             dt=tr.GExc_stat_dt)
    GInh_stat = StateMonitor(GInh,
                             GInh_recvars,
                             record=list(range(tr.nrec_GInh_stat)),
                             dt=tr.GInh_stat_dt)

    # SynEE stat
    SynEE_recvars = []
    if tr.synee_atraces_rec:
        SynEE_recvars.append('a')
    if tr.synee_activetraces_rec:
        SynEE_recvars.append('syn_active')
    if tr.synee_Apretraces_rec:
        SynEE_recvars.append('Apre')
    if tr.synee_Aposttraces_rec:
        SynEE_recvars.append('Apost')

    SynEE_stat = StateMonitor(SynEE,
                              SynEE_recvars,
                              record=range(tr.n_synee_traces_rec),
                              when='end',
                              dt=tr.synEE_stat_dt)

    if tr.istdp_active:
        # SynEI stat
        SynEI_recvars = []
        if tr.synei_atraces_rec:
            SynEI_recvars.append('a')
        if tr.synei_activetraces_rec:
            SynEI_recvars.append('syn_active')
        if tr.synei_Apretraces_rec:
            SynEI_recvars.append('Apre')
        if tr.synei_Aposttraces_rec:
            SynEI_recvars.append('Apost')

        SynEI_stat = StateMonitor(SynEI,
                                  SynEI_recvars,
                                  record=range(tr.n_synei_traces_rec),
                                  when='end',
                                  dt=tr.synEI_stat_dt)
        netw_objects.append(SynEI_stat)

    if tr.adjust_insertP:

        C_stat = StateMonitor(sum_target,
                              'c',
                              dt=tr.csample_dt,
                              record=[0],
                              when='end')
        insP_stat = StateMonitor(SynEE,
                                 'insert_P',
                                 dt=tr.csample_dt,
                                 record=[0],
                                 when='end')
        netw_objects.extend([C_stat, insP_stat])

    if tr.istdp_active and tr.adjust_EI_insertP:

        C_EI_stat = StateMonitor(sum_target_EI,
                                 'c',
                                 dt=tr.csample_dt,
                                 record=[0],
                                 when='end')
        insP_EI_stat = StateMonitor(SynEI,
                                    'insert_P',
                                    dt=tr.csample_dt,
                                    record=[0],
                                    when='end')
        netw_objects.extend([C_EI_stat, insP_EI_stat])

    GExc_spks = SpikeMonitor(GExc)
    GInh_spks = SpikeMonitor(GInh)

    GExc_rate = PopulationRateMonitor(GExc)
    GInh_rate = PopulationRateMonitor(GInh)

    if tr.external_mode == 'poisson':
        PInp_spks = SpikeMonitor(PInp)
        PInp_rate = PopulationRateMonitor(PInp)
        netw_objects.extend([PInp_spks, PInp_rate])

    if tr.synee_a_nrecpoints == 0 or tr.sim.T2 == 0 * second:
        SynEE_a_dt = 2 * (tr.T1 + tr.T2 + tr.T3 + tr.T4 + tr.T5)
    else:
        SynEE_a_dt = tr.sim.T2 / tr.synee_a_nrecpoints

        # make sure that choice of SynEE_a_dt does lead
        # to execessively many recordings - this can
        # happen if t1 >> t2.
        estm_nrecs = int(T / SynEE_a_dt)
        if estm_nrecs > 3 * tr.synee_a_nrecpoints:
            print('''Estimated number of EE weight recordings (%d)
            exceeds desired number (%d), increasing 
            SynEE_a_dt''' % (estm_nrecs, tr.synee_a_nrecpoints))

            SynEE_a_dt = T / tr.synee_a_nrecpoints

    SynEE_a = StateMonitor(SynEE, ['a', 'syn_active'],
                           record=range(tr.N_e * (tr.N_e - 1)),
                           dt=SynEE_a_dt,
                           when='end',
                           order=100)

    if tr.istrct_active:
        record_range = range(tr.N_e * tr.N_i)
    else:
        record_range = range(len(sEI_src))

    if tr.synei_a_nrecpoints > 0 and tr.sim.T2 > 0 * second:
        SynEI_a_dt = tr.sim.T2 / tr.synei_a_nrecpoints

        estm_nrecs = int(T / SynEI_a_dt)
        if estm_nrecs > 3 * tr.synei_a_nrecpoints:
            print('''Estimated number of EI weight recordings
            (%d) exceeds desired number (%d), increasing 
            SynEI_a_dt''' % (estm_nrecs, tr.synei_a_nrecpoints))

            SynEI_a_dt = T / tr.synei_a_nrecpoints

        SynEI_a = StateMonitor(SynEI, ['a', 'syn_active'],
                               record=record_range,
                               dt=SynEI_a_dt,
                               when='end',
                               order=100)

        netw_objects.append(SynEI_a)

    netw_objects.extend([
        GExc_stat, GInh_stat, SynEE_stat, SynEE_a, GExc_spks, GInh_spks,
        GExc_rate, GInh_rate
    ])

    if (tr.synEEdynrec
            and (2 * tr.syndynrec_npts * tr.syndynrec_dt < tr.sim.T2)):
        SynEE_dynrec = StateMonitor(SynEE, ['a'],
                                    record=range(tr.N_e * (tr.N_e - 1)),
                                    dt=tr.syndynrec_dt,
                                    name='SynEE_dynrec',
                                    when='end',
                                    order=100)
        SynEE_dynrec.active = False
        netw_objects.extend([SynEE_dynrec])

    if (tr.synEIdynrec
            and (2 * tr.syndynrec_npts * tr.syndynrec_dt < tr.sim.T2)):
        SynEI_dynrec = StateMonitor(SynEI, ['a'],
                                    record=record_range,
                                    dt=tr.syndynrec_dt,
                                    name='SynEI_dynrec',
                                    when='end',
                                    order=100)
        SynEI_dynrec.active = False
        netw_objects.extend([SynEI_dynrec])

    net = Network(*netw_objects)

    def set_active(*argv):
        for net_object in argv:
            net_object.active = True

    def set_inactive(*argv):
        for net_object in argv:
            net_object.active = False

    ### Simulation periods

    # --------- T1 ---------
    # initial recording period,
    # all recorders active

    T1T3_recorders = [
        GExc_spks, GInh_spks, SynEE_stat, GExc_stat, GInh_stat, GExc_rate,
        GInh_rate
    ]

    if tr.istdp_active:
        T1T3_recorders.append(SynEI_stat)

    set_active(*T1T3_recorders)

    if tr.external_mode == 'poisson':
        set_active(PInp_spks, PInp_rate)

    net.run(tr.sim.T1, report='text', report_period=300 * second, profile=True)

    # --------- T2 ---------
    # main simulation period
    # only active recordings are:
    #   1) turnover 2) C_stat 3) SynEE_a

    set_inactive(*T1T3_recorders)

    if tr.T2_spks_rec:
        set_active(GExc_spks, GInh_spks)

    if tr.external_mode == 'poisson':
        set_inactive(PInp_spks, PInp_rate)

    run_T2_syndynrec(net, tr, netw_objects)

    # --------- T3 ---------
    # second recording period,
    # all recorders active

    set_active(*T1T3_recorders)

    if tr.external_mode == 'poisson':
        set_active(PInp_spks, PInp_rate)

    run_T3_split(net, tr)

    # --------- T4 ---------
    # record STDP and scaling weight changes to file
    # through the cpp models

    set_inactive(*T1T3_recorders)

    if tr.external_mode == 'poisson':
        set_inactive(PInp_spks, PInp_rate)

    run_T4(net, tr)

    # --------- T5 ---------
    # freeze network and record Exc spikes
    # for cross correlations

    if tr.scl_active:
        synee_scaling.active = False
    if tr.istdp_active and tr.netw.config.iscl_active:
        synei_scaling.active = False
    if tr.strct_active:
        strctplst.active = False
    if tr.istdp_active and tr.istrct_active:
        strctplst_EI.active = False
    SynEE.stdp_active = 0
    if tr.istdp_active:
        SynEI.stdp_active = 0

    set_active(GExc_rate, GInh_rate)
    set_active(GExc_spks, GInh_spks)

    run_T5(net, tr)

    SynEE_a.record_single_timestep()
    if tr.synei_a_nrecpoints > 0 and tr.sim.T2 > 0. * second:
        SynEI_a.record_single_timestep()

    device.build(directory='builds/%.4d' % (tr.v_idx),
                 clean=True,
                 compile=True,
                 run=True,
                 debug=False)

    # -----------------------------------------

    # save monitors as raws in build directory
    raw_dir = 'builds/%.4d/raw/' % (tr.v_idx)

    if not os.path.exists(raw_dir):
        os.makedirs(raw_dir)

    with open(raw_dir + 'namespace.p', 'wb') as pfile:
        pickle.dump(namespace, pfile)

    with open(raw_dir + 'gexc_stat.p', 'wb') as pfile:
        pickle.dump(GExc_stat.get_states(), pfile)
    with open(raw_dir + 'ginh_stat.p', 'wb') as pfile:
        pickle.dump(GInh_stat.get_states(), pfile)

    with open(raw_dir + 'synee_stat.p', 'wb') as pfile:
        pickle.dump(SynEE_stat.get_states(), pfile)

    if tr.istdp_active:
        with open(raw_dir + 'synei_stat.p', 'wb') as pfile:
            pickle.dump(SynEI_stat.get_states(), pfile)

    if ((tr.synEEdynrec or tr.synEIdynrec)
            and (2 * tr.syndynrec_npts * tr.syndynrec_dt < tr.sim.T2)):

        if tr.synEEdynrec:
            with open(raw_dir + 'syneedynrec.p', 'wb') as pfile:
                pickle.dump(SynEE_dynrec.get_states(), pfile)
        if tr.synEIdynrec:
            with open(raw_dir + 'syneidynrec.p', 'wb') as pfile:
                pickle.dump(SynEI_dynrec.get_states(), pfile)

    with open(raw_dir + 'synee_a.p', 'wb') as pfile:
        SynEE_a_states = SynEE_a.get_states()
        if tr.crs_crrs_rec:
            SynEE_a_states['i'] = list(SynEE.i)
            SynEE_a_states['j'] = list(SynEE.j)
        pickle.dump(SynEE_a_states, pfile)

    if tr.synei_a_nrecpoints > 0 and tr.sim.T2 > 0. * second:
        with open(raw_dir + 'synei_a.p', 'wb') as pfile:
            SynEI_a_states = SynEI_a.get_states()
            if tr.crs_crrs_rec:
                SynEI_a_states['i'] = list(SynEI.i)
                SynEI_a_states['j'] = list(SynEI.j)
            pickle.dump(SynEI_a_states, pfile)

    if tr.adjust_insertP:
        with open(raw_dir + 'c_stat.p', 'wb') as pfile:
            pickle.dump(C_stat.get_states(), pfile)

        with open(raw_dir + 'insP_stat.p', 'wb') as pfile:
            pickle.dump(insP_stat.get_states(), pfile)

    if tr.istdp_active and tr.adjust_EI_insertP:
        with open(raw_dir + 'c_EI_stat.p', 'wb') as pfile:
            pickle.dump(C_EI_stat.get_states(), pfile)

        with open(raw_dir + 'insP_EI_stat.p', 'wb') as pfile:
            pickle.dump(insP_EI_stat.get_states(), pfile)

    with open(raw_dir + 'gexc_spks.p', 'wb') as pfile:
        pickle.dump(GExc_spks.get_states(), pfile)
    with open(raw_dir + 'ginh_spks.p', 'wb') as pfile:
        pickle.dump(GInh_spks.get_states(), pfile)

    if tr.external_mode == 'poisson':
        with open(raw_dir + 'pinp_spks.p', 'wb') as pfile:
            pickle.dump(PInp_spks.get_states(), pfile)

    with open(raw_dir + 'gexc_rate.p', 'wb') as pfile:
        pickle.dump(GExc_rate.get_states(), pfile)
        if tr.rates_rec:
            pickle.dump(GExc_rate.smooth_rate(width=25 * ms), pfile)
    with open(raw_dir + 'ginh_rate.p', 'wb') as pfile:
        pickle.dump(GInh_rate.get_states(), pfile)
        if tr.rates_rec:
            pickle.dump(GInh_rate.smooth_rate(width=25 * ms), pfile)

    if tr.external_mode == 'poisson':
        with open(raw_dir + 'pinp_rate.p', 'wb') as pfile:
            pickle.dump(PInp_rate.get_states(), pfile)
            if tr.rates_rec:
                pickle.dump(PInp_rate.smooth_rate(width=25 * ms), pfile)

    # ----------------- add raw data ------------------------
    fpath = 'builds/%.4d/' % (tr.v_idx)

    from pathlib import Path

    Path(fpath + 'turnover').touch()
    turnover_data = np.genfromtxt(fpath + 'turnover', delimiter=',')
    os.remove(fpath + 'turnover')

    with open(raw_dir + 'turnover.p', 'wb') as pfile:
        pickle.dump(turnover_data, pfile)

    Path(fpath + 'turnover_EI').touch()
    turnover_EI_data = np.genfromtxt(fpath + 'turnover_EI', delimiter=',')
    os.remove(fpath + 'turnover_EI')

    with open(raw_dir + 'turnover_EI.p', 'wb') as pfile:
        pickle.dump(turnover_EI_data, pfile)

    Path(fpath + 'spk_register').touch()
    spk_register_data = np.genfromtxt(fpath + 'spk_register', delimiter=',')
    os.remove(fpath + 'spk_register')

    with open(raw_dir + 'spk_register.p', 'wb') as pfile:
        pickle.dump(spk_register_data, pfile)

    Path(fpath + 'spk_register_EI').touch()
    spk_register_EI_data = np.genfromtxt(fpath + 'spk_register_EI',
                                         delimiter=',')
    os.remove(fpath + 'spk_register_EI')

    with open(raw_dir + 'spk_register_EI.p', 'wb') as pfile:
        pickle.dump(spk_register_EI_data, pfile)

    Path(fpath + 'scaling_deltas').touch()
    scaling_deltas_data = np.genfromtxt(fpath + 'scaling_deltas',
                                        delimiter=',')
    os.remove(fpath + 'scaling_deltas')

    with open(raw_dir + 'scaling_deltas.p', 'wb') as pfile:
        pickle.dump(scaling_deltas_data, pfile)

    Path(fpath + 'scaling_deltas_EI').touch()
    scaling_deltas_data = np.genfromtxt(fpath + 'scaling_deltas_EI',
                                        delimiter=',')
    os.remove(fpath + 'scaling_deltas_EI')

    with open(raw_dir + 'scaling_deltas_EI.p', 'wb') as pfile:
        pickle.dump(scaling_deltas_data, pfile)

    with open(raw_dir + 'profiling_summary.txt', 'w+') as tfile:
        tfile.write(str(profiling_summary(net)))

    # --------------- cross-correlations ---------------------

    if tr.crs_crrs_rec:

        GExc_spks = GExc_spks.get_states()
        synee_a = SynEE_a_states
        wsize = 100 * pq.ms

        for binsize in [1 * pq.ms, 2 * pq.ms, 5 * pq.ms]:

            wlen = int(wsize / binsize)

            ts, idxs = GExc_spks['t'], GExc_spks['i']
            idxs = idxs[ts > tr.T1 + tr.T2 + tr.T3 + tr.T4]
            ts = ts[ts > tr.T1 + tr.T2 + tr.T3 + tr.T4]
            ts = ts - (tr.T1 + tr.T2 + tr.T3 + tr.T4)

            sts = [
                neo.SpikeTrain(ts[idxs == i] / second * pq.s,
                               t_stop=tr.T5 / second * pq.s)
                for i in range(tr.N_e)
            ]

            crs_crrs, syn_a = [], []

            for f, (i, j) in enumerate(zip(synee_a['i'], synee_a['j'])):
                if synee_a['syn_active'][-1][f] == 1:

                    crs_crr, cbin = cch(BinnedSpikeTrain(sts[i],
                                                         binsize=binsize),
                                        BinnedSpikeTrain(sts[j],
                                                         binsize=binsize),
                                        cross_corr_coef=True,
                                        border_correction=True,
                                        window=(-1 * wlen, wlen))

                    crs_crrs.append(list(np.array(crs_crr).T[0]))
                    syn_a.append(synee_a['a'][-1][f])

            fname = 'crs_crrs_wsize%dms_binsize%fms_full' % (wsize / pq.ms,
                                                             binsize / pq.ms)

            df = {
                'cbin': cbin,
                'crs_crrs': np.array(crs_crrs),
                'syn_a': np.array(syn_a),
                'binsize': binsize,
                'wsize': wsize,
                'wlen': wlen
            }

            with open('builds/%.4d/raw/' % (tr.v_idx) + fname + '.p',
                      'wb') as pfile:
                pickle.dump(df, pfile)

        GInh_spks = GInh_spks.get_states()
        synei_a = SynEI_a_states
        wsize = 100 * pq.ms

        for binsize in [1 * pq.ms, 2 * pq.ms, 5 * pq.ms]:

            wlen = int(wsize / binsize)

            ts_E, idxs_E = GExc_spks['t'], GExc_spks['i']
            idxs_E = idxs_E[ts_E > tr.T1 + tr.T2 + tr.T3 + tr.T4]
            ts_E = ts_E[ts_E > tr.T1 + tr.T2 + tr.T3 + tr.T4]
            ts_E = ts_E - (tr.T1 + tr.T2 + tr.T3 + tr.T4)

            ts_I, idxs_I = GInh_spks['t'], GInh_spks['i']
            idxs_I = idxs_I[ts_I > tr.T1 + tr.T2 + tr.T3 + tr.T4]
            ts_I = ts_I[ts_I > tr.T1 + tr.T2 + tr.T3 + tr.T4]
            ts_I = ts_I - (tr.T1 + tr.T2 + tr.T3 + tr.T4)

            sts_E = [
                neo.SpikeTrain(ts_E[idxs_E == i] / second * pq.s,
                               t_stop=tr.T5 / second * pq.s)
                for i in range(tr.N_e)
            ]

            sts_I = [
                neo.SpikeTrain(ts_I[idxs_I == i] / second * pq.s,
                               t_stop=tr.T5 / second * pq.s)
                for i in range(tr.N_i)
            ]

            crs_crrs, syn_a = [], []

            for f, (i, j) in enumerate(zip(synei_a['i'], synei_a['j'])):
                if synei_a['syn_active'][-1][f] == 1:

                    crs_crr, cbin = cch(BinnedSpikeTrain(sts_I[i],
                                                         binsize=binsize),
                                        BinnedSpikeTrain(sts_E[j],
                                                         binsize=binsize),
                                        cross_corr_coef=True,
                                        border_correction=True,
                                        window=(-1 * wlen, wlen))

                    crs_crrs.append(list(np.array(crs_crr).T[0]))
                    syn_a.append(synei_a['a'][-1][f])

            fname = 'EI_crrs_wsize%dms_binsize%fms_full' % (wsize / pq.ms,
                                                            binsize / pq.ms)

            df = {
                'cbin': cbin,
                'crs_crrs': np.array(crs_crrs),
                'syn_a': np.array(syn_a),
                'binsize': binsize,
                'wsize': wsize,
                'wlen': wlen
            }

            with open('builds/%.4d/raw/' % (tr.v_idx) + fname + '.p',
                      'wb') as pfile:
                pickle.dump(df, pfile)

    # -----------------  clean up  ---------------------------
    shutil.rmtree('builds/%.4d/results/' % (tr.v_idx))
    shutil.rmtree('builds/%.4d/static_arrays/' % (tr.v_idx))
    shutil.rmtree('builds/%.4d/brianlib/' % (tr.v_idx))
    shutil.rmtree('builds/%.4d/code_objects/' % (tr.v_idx))

    # ---------------- plot results --------------------------

    #os.chdir('./analysis/file_based/')

    if tr.istdp_active:
        from src.analysis.overview_winh import overview_figure
        overview_figure('builds/%.4d' % (tr.v_idx), namespace)
    else:
        from src.analysis.overview import overview_figure
        overview_figure('builds/%.4d' % (tr.v_idx), namespace)

    from src.analysis.synw_fb import synw_figure
    synw_figure('builds/%.4d' % (tr.v_idx), namespace)
    if tr.istdp_active:
        synw_figure('builds/%.4d' % (tr.v_idx), namespace, connections='EI')

    from src.analysis.synw_log_fb import synw_log_figure
    synw_log_figure('builds/%.4d' % (tr.v_idx), namespace)
    if tr.istdp_active:
        synw_log_figure('builds/%.4d' % (tr.v_idx),
                        namespace,
                        connections='EI')
Beispiel #16
0
            #assert old[i] == raw[1][1][i]


    except:
        pass
    old = raw[-1][0]

    #print(cov)
    #print(ccf)
    #import pdb; pdb.set_trace()
'''
run(simtime)
(E_net + I_net).write_data("Results/brunel_np%d_%s.pkl" % (np, simulator_name))

efull_length = I_net.get_data().segments[0]
efull_trains = BinnedSpikeTrain(E_net.get_data().segments[0].spiketrains,
                                binsize=5 * ms)
ecov = elephant.spike_train_correlation.covariance(efull_trains)
eccf = elephant.spike_train_correlation.corrcoef(efull_trains)

ifull_length = E_net.get_data().segments[0]
ifull_trains = BinnedSpikeTrain(E_net.get_data().segments[0].spiketrains,
                                binsize=5 * ms)
icov = elephant.spike_train_correlation.covariance(ifull_trains)
iccf = elephant.spike_train_correlation.corrcoef(ifull_trains)


def write_data(data, fname):
    with open(fname + str('.p'), 'wb') as f:
        pickle.dump(data, f)
    return
Beispiel #17
0
    cchArray1 = np.array(cch[0][:, 0].magnitude)
    return cchArray1.max()


def generateCorrelationMatrix(neoDataset, connectionDict):
    connectionDictionary = connectionDict
    for i in range(len(neoDataset)):
        for j in range(len(neoDataset)):
            #if i != j:
            connectionDictionary[i][j] = connectionDictionary[i][j] + calcCCH(
                i, j, neoDataset)
    return connectionDictionary


#This code will produce a single cross correlogram
binned_st1 = BinnedSpikeTrain(neoDataset[0], binsize=1 * ms)
binned_st2 = BinnedSpikeTrain(neoDataset[5], binsize=1 * ms)

cch = cross_correlation_histogram(binned_st1,
                                  binned_st2,
                                  window=[-10, 10],
                                  border_correction=True,
                                  binary=True,
                                  kernel=None)
print(cch)
cchArray = cch[0][:, 0].magnitude.round()
cchArrayTime = cch[0].times.magnitude
cchArrayNP = np.array(cchArray)
print("argmax is:", cchArrayNP.max())

#calculate the cross-correlograms of the entire dataset,
Beispiel #18
0
spike_time_bin = 0.002

for i_run in range(nrun):
    print('Processing dataset ' + str(i_run + 1) + '/' + str(nrun))
    path = '../data' + str(i_run) + '/'
    spike_times_list = __load_spike_times(path, name, begin, end, npop)

    for ipop in range(npop):
        spike_times = spike_times_list[ipop]
        st_list = []
        for j in range(matrix_size):
            spike_train = SpikeTrain(np.array(spike_times[j]) * s,
                                     t_stop=(end / 1000.0) * s)
            st_list.append(spike_train)

        binned_st = BinnedSpikeTrain(st_list, spike_time_bin * s, None,
                                     (begin / 1000.0) * s, (end / 1000.0) * s)
        #print (binned_st)
        cc_matrix = corrcoef(binned_st)
        correl = []
        for j in range(matrix_size):
            for k in range(matrix_size):
                #print(j, k, cc_matrix[j][k])
                if (j != k and cc_matrix[j][k] < xmax
                        and cc_matrix[j][k] > xmin):
                    correl.append(cc_matrix[j][k])

        x, hist1 = __smooth_hist(correl, xmin, xmax, nx)
        arr = np.column_stack((x, hist1))
        np.savetxt(path + 'correl_' + str(ipop) + '.dat', arr)

        if i_run == 0:
Beispiel #19
0
def get_firing_rate_metrics(neuronset, spikes_fn, num_neurons=8000.,
                            rows=50000000., start_time=100., dt=1.,
                            window=1000., snapshot_dt=200000.,
                            isi_enabled=True, std_enabled=True,
                            cc_enabled=True):
    """Get various metrics from raster spike files.

    :neuronset: name of neuron set being looked at
    :spikes_fn: file name of spikes file
    :num_neurons: number of neurons in neuron set
    :rows: rows to be read in each pandas chunk
    :start_time: time to start the processing at (ms)
    :dt: increment value (ms)
    :window: window to count spikes in (ms)
    :snapshot_dt: interval between snapshots for ISI and STD metrics (ms)
    :isi_enabled: if ISI CVs should be calculated
    :std_enabled: if STD of firing rate should be calculated
    :cc_enabled: if average spike correlation coefficient should be enabled
    :returns: True if everything went OK, else False

    """
    # Initial indices
    left = 0.
    right = 0.

    num_neurons = int(num_neurons)
    current_time = start_time
    old_neuronIDs = numpy.array([])
    old_times = numpy.array([])
    lgr.info("Processing {}.".format(spikes_fn))
    if not os.path.exists(spikes_fn):
        lgr.error("File not found {}".format(spikes_fn))
        return False

    with open("mean-firing-rates-{}.gdf".format(neuronset), 'w') as fh1, \
            open("std-firing-rates-{}.gdf".format(neuronset), 'w') as fh2, \
            open("ISI-cv-{}.gdf".format(neuronset), 'w') as fh3, \
            open("cc-{}.gdf".format(neuronset), 'w') as fh4:

        for chunk in pandas.read_csv(spikes_fn, sep='\s+',  # noqa: W605
                                     names=["neuronID",
                                            "spike_time"],
                                     dtype={'neuronID': numpy.uint16,
                                            'spike_time': float},
                                     lineterminator="\n",
                                     skipinitialspace=True,
                                     header=None, index_col=None,
                                     skip_blank_lines=True,
                                     chunksize=rows):

            # Drop rows with nan
            chunk = chunk.dropna(how='any')
            if not validate_raster_df(chunk):
                lgr.error("Error in {}. Skipping.".format(spikes_fn))
                return False

            neuronIDs = numpy.array(chunk.values[:, 0])
            times = numpy.array(chunk.values[:, 1])

            # 200 neuronIDs per second = 2 neuronIDs per 0.01 second (dt) per
            # neuron this implies 2 * 10000 neuronIDs for 10000 neurons need
            # to be kept to make sure I have a proper sliding window of
            # chunks
            if len(old_neuronIDs) > 0:
                neuronIDs = numpy.append(old_neuronIDs, neuronIDs)
                times = numpy.append(old_times, times)

            lgr.debug(
                "Times from {} to {} being analysed containing {} rows".format(
                    times[0], times[-1], len(times)))

            lgr.debug("Current time is {}".format(current_time))

            # Reset chunks
            left = 0
            right = 0

            while (current_time < math.floor(times[-1])):
                # Initialise these to 0
                mean_firing_rate = 0.
                spikesnum = 0.
                mystd = -1

                left += numpy.searchsorted(times[left:],
                                           (current_time - window),
                                           side='left')
                right = left + numpy.searchsorted(
                    times[left:], current_time,
                    side='right')

                # point is lesser than the first value in the chunk
                if right == 0 and left == 0:
                    lgr.warning("Point too small for chunk")
                    current_time = times[0]
                    lgr.warning("Time to reset to: {}".format(times[0]))
                    continue

                # interval not found, no spikes - not necessarily at max
                # the max check is in the while condition, and that
                # ascertains if a new chunk should be read
                if right == left:
                    lgr.warning("No spikes in interval at {}".format(
                        current_time))
                    # Increment it by snapshot_dt to ensure that the next check
                    # for ISI and STD metrics can be made.
                    # Ideally, I should be able to move it to times[left], but
                    # there is no guarantee that it would remain dividible by
                    # snapshot_dt. That would mean that the next bits are never
                    # run, even if there are spikes.
                    current_time += snapshot_dt
                    lgr.warning("Current time updated to {}".format(
                        current_time))

                    # Print NA values for STD and ISI which will not be
                    # calculated for this time
                    lgr.warning("Printing invalid values for STD and ISI CV")

                    # For gnuplot, lines starting with # are ignored.
                    # To skip these points and have a discontinuous graph in
                    # gnuplot, one must leave a blank line in the text.
                    print(
                        "#{}\tNA\n".format(current_time / 1000.),
                        file=fh2, flush=True)

                    print(
                        "#{}\tNA\n".format(current_time / 1000.),
                        file=fh3, flush=True)
                    continue

                # could even just do right - left if all I'm using is len
                thiswindow_neuronIDs = neuronIDs[left:right]
                thiswindow_times = times[left:right]

                # mean firing rate
                spikesnum = float(len(thiswindow_neuronIDs))
                mean_firing_rate = (spikesnum / num_neurons) / (window / 1000)
                # total neuronIDs by number of neurons
                print(
                    "{}\t{}".format(current_time / 1000.,
                                    mean_firing_rate),
                    file=fh1, flush=True)

                # We only get here if there are some spikes, so there's no need
                # to check for that again.

                # STD of firing rates and ISI cv - it just takes way too much
                # time to do for each dt - my post processing wont finish. So,
                # we calculate it at intervals
                if ((current_time - start_time) % snapshot_dt == 0):
                    if std_enabled:
                        lgr.debug("STD for {}".format(
                            current_time))
                        # STD of firing rates
                        # calculate firing rates of each neuron in the window
                        # then find STD
                        spike_counts = collections.Counter(thiswindow_neuronIDs)
                        firing_rates = []
                        for neuron, count in spike_counts.items():
                            firing_rates.append(count / (window / 1000))
                        neurons_spiking = len(firing_rates)
                        # Add 0s for neurons that did not spike
                        for i in range(0, (num_neurons - neurons_spiking)):
                            firing_rates.append(0)

                        lgr.debug("std being calculated from {} values".format(
                            len(firing_rates)))
                        mystd = numpy.std(firing_rates)
                        print(
                            "{}\t{}".format(current_time / 1000., mystd),
                            file=fh2, flush=True)

                    if cc_enabled:
                        lgr.debug("CC for {}".format(
                            current_time))
                        # CC
                        # I do not sort them.
                        # Get unique neuron list
                        neurons = list(set(thiswindow_neuronIDs))
                        # Shuffle them so that the spike trains are from a shuffled
                        # pack of neurons when the CC is calculated
                        random.shuffle(neurons)
                        # Select at least 800 neurons
                        if len(neurons) > 800:
                            N = max(int(0.1 * len(neurons)), 800)
                        else:
                            N = len(neurons)
                        lgr.debug("CC is using {} neurons".format(N))
                        neurons = neurons[0:N]
                        spike_trains = []
                        # Get spike trains for each neuron
                        for nrn in list(neurons):
                            indices = [i for i, x in
                                       enumerate(thiswindow_neuronIDs) if x == nrn]
                            nrn_spike_times = thiswindow_times[indices]
                            nrn_spiketrain = SpikeTrain(nrn_spike_times * ms,
                                                        t_stop=thiswindow_times[-1])
                            spike_trains.append(nrn_spiketrain)

                        bin_size = (5 * ms)
                        binned_spike_trains = BinnedSpikeTrain(spike_trains,
                                                               bin_size)
                        cc_matrix = correlation_coefficient(binned_spike_trains)
                        # elements in triangle: (N * (N-1)/2)
                        # mean of cc values = (sum of triangle)/(N * (N-1)/2)
                        avg_cc = (
                            numpy.nansum(numpy.tril(cc_matrix, -1)) / (N * (N - 1) / 2)
                        )
                        print(
                            "{}\t{}\t{}".format(current_time / 1000., N, avg_cc), file=fh4,
                            flush=True)

                    if isi_enabled:
                        # ISI stats
                        neurons = set(thiswindow_neuronIDs)
                        lgr.debug("ISI: {} neurons being analysed.".format(
                            len(neurons)))
                        # for all neurons in this window
                        ISI_cvs = []
                        for neuron in list(neurons):
                            indices = [i for i, x
                                       in enumerate(thiswindow_neuronIDs)
                                       if x == neuron]
                            neuron_times = [thiswindow_times[i] for i in
                                            indices]

                            ISIs = []
                            if len(neuron_times) > 1:
                                # otherwise ISI is undefined in this window for
                                # this neuron
                                prev = neuron_times[0]
                                # get a list of ISIs
                                for neuron_time in neuron_times:
                                    ISIs.append(neuron_time - prev)
                                    prev = neuron_time

                                # for this neuron, get stats
                                ISI_mean = numpy.mean(ISIs)
                                ISI_std = numpy.std(ISIs)
                                ISI_cv = ISI_std / ISI_mean

                                if not numpy.isnan(ISI_cv):
                                    ISI_cvs.append(ISI_cv)

                        print(
                            "{}\t{}".format(current_time / 1000.,
                                            numpy.mean(ISI_cvs)),
                            file=fh3, flush=True)

                current_time += dt

            lgr.debug("Printed till {}".format(current_time))
            old_times = numpy.array(times[(left - len(times)):])
            old_neuronIDs = numpy.array(neuronIDs[(left - len(neuronIDs)):])

            del neuronIDs
            del times
            gc.collect()

    lgr.info("Finished processing {}".format(spikes_fn))
    return True
Beispiel #20
0
#%%
#doing FI curve
#finding mean firing rate for entire sweep
from elephant.statistics import mean_firing_rate
firing_rate_list = []
for spiketrain in neospiketrain_list:
    temp_firing_rate = mean_firing_rate(spiketrain.rescale(pq.s),
                                        t_start=0.5,
                                        t_stop=1.5)
    firing_rate_list.append(np.asarray(temp_firing_rate))

# %% conversion of discrete spike times to binary counts
from elephant.conversion import BinnedSpikeTrain
bst_list = BinnedSpikeTrain(del_half_neospiketrain_list,
                            binsize=1.0 * pq.ms,
                            t_start=700.0 * pq.ms,
                            t_stop=3200.0 * pq.ms)

bst_arr = bst_list.to_array()  # export binned spike times to an array
bst_df = pd.DataFrame(bst_arr).T  # turn into a df and transpose (.T)
bst_sum = bst_df.apply(np.sum, axis=1)  # sum by row across columns

# plt.figure()
#plt.plot(bst_sum)
"""
Making PSTH for the whole sweep, without first 500 ms omitted
"""
bst_list_graph = BinnedSpikeTrain(del_neospiketrain_list,
                                  binsize=1.0 * pq.ms,
                                  t_start=200.0 * pq.ms,
                                  t_stop=3200.0 * pq.ms)
Beispiel #21
0
plt.axis('tight')
plt.xlim(0, runtime)
plt.xlabel('Time (ms)', fontsize=16)
plt.ylabel('Spike Train Index', fontsize=16)
plt.gca().tick_params(axis='both', which='major', labelsize=14)
plt.savefig('decorr_rasterplot_w{}_k{}.png'.format(w, numInhPerNeuron))
'''

# calculate ISIs and coefficient of variation (CV)

isi_list  = [np.nanmean(isi(spiketrain))       for spiketrain in snglnrn_spikes_neo]
rate_list = [(np.size(spiketrain) / runtime * 1e3) for spiketrain in snglnrn_spikes]
cv_list   = [cv(isi(spiketrain))               for spiketrain in snglnrn_spikes_neo]


train = BinnedSpikeTrain(snglnrn_spikes_neo, binsize=5 * q.ms)
cc_matrix = corrcoef(train, binary=False)

# Matrix zwischenspeichern 
#np.savetxt('cc_matrix.txt', cc_matrix)
#print(np.shape(cc_matrix)) # (192, 192)
#print(cc_matrix)
#plt.plot(cc_matrix)

# Hauptdiagonale entfernen
for i in range(192):
	cc_matrix[i][i] = np.nan 

# Nan Werte entfernen
cc_matrix = cc_matrix[:,~np.isnan(cc_matrix).all(0)]
cc_matrix = cc_matrix[~np.isnan(cc_matrix).all(1)]
Beispiel #22
0
"""
#%%
"""
28Sep2017
NOTE: Work on conversion of discrete spike time lists to binary spike counts.
Use this conversion for PSTH and for spike-time correlations.
"""
##ADDED bstc_df to cut the first 500 ms of data out for correlations
###conversion of discrete spike times to binary counts

from elephant.conversion import BinnedSpikeTrain
bins = 1.0 #define the bin size here in ms
for item in g_list:
    for key in channels[item]:
        bst_list = BinnedSpikeTrain(channels[item][key]['neoSpkTrain500'],
                                    binsize = bins*pq.ms,
                                    t_start = 650.0*pq.ms,
                                    t_stop = 3160*pq.ms)


        bst_arr = bst_list.to_array()           #export binned spike times to an array
        bst_df = pd.DataFrame(bst_arr).T        #turn into a df and transpose (.T)
   #     bst_df.index = np.arange(550,3050,bins)
        bst_sum = bst_df.apply(np.sum,axis=1)   #sum by row across columns
      #  bst_sum.index = np.arange(550,3050,bins)
        channels[item][key]['binned_spike_list'] = bst_list
        channels[item][key]['binnedSpikes'] = bst_df
        channels[item][key]['binnedSpksSum'] = bst_sum
        bst_arr = None
        bst_df = None
        bst_sum = None
Beispiel #23
0
def spike_field_coherence(signal, spiketrain, **kwargs):
    """
    Calculates the spike-field coherence between a analog signal(s) and a
    (binned) spike train.

    The current implementation makes use of scipy.signal.coherence(). Additional
    kwargs will will be directly forwarded to scipy.signal.coherence(),
    except for the axis parameter and the sampling frequency, which will be
    extracted from the input signals.

    The spike_field_coherence function receives an analog signal array and
    either a binned spike train or a spike train containing the original spike
    times. In case of original spike times the spike train is binned according
    to the sampling rate of the analog signal array.

    The AnalogSignal object can contain one or multiple signal traces. In case
    of multiple signal traces, the spike field coherence is calculated
    individually for each signal trace and the spike train.

    Parameters
    ----------
    signal : neo AnalogSignalArray object
        'signal' contains n analog signals.
    spiketrain : SpikeTrain or BinnedSpikeTrain
        Single spike train to perform the analysis on. The binsize of the
        binned spike train must match the sampling_rate of signal.

    KWArgs
    ------
    All KWArgs are passed to scipy.signal.coherence().

    Returns
    -------
    coherence : complex Quantity array
        contains the coherence values calculated for each analog signal trace
        in combination with the spike train. The first dimension corresponds to
        the frequency, the second to the number of the signal trace.
    frequencies : Quantity array
        contains the frequency values corresponding to the first dimension of
        the 'coherence' array

    Example
    -------

    Plot the SFC between a regular spike train at 20 Hz, and two sinusoidal
    time series at 20 Hz and 23 Hz, respectively.

    >>> import numpy as np
    >>> import matplotlib.pyplot as plt
    >>> from quantities import ms, mV, Hz, kHz
    >>> import neo, elephant

    >>> t = pq.Quantity(range(10000),units='ms')
    >>> f1, f2 = 20. * Hz, 23. * Hz
    >>> signal = neo.AnalogSignal(np.array([
            np.sin(f1 * 2. * np.pi * t.rescale(s)),
            np.sin(f2 * 2. * np.pi * t.rescale(s))]).T,
            units=pq.mV, sampling_rate=1. * kHz)
    >>> spiketrain = neo.SpikeTrain(
        range(t[0], t[-1], 50), units='ms',
        t_start=t[0], t_stop=t[-1])
    >>> sfc, freqs = elephant.sta.spike_field_coherence(
        signal, spiketrain, window='boxcar')

    >>> plt.plot(freqs, sfc[:,0])
    >>> plt.plot(freqs, sfc[:,1])
    >>> plt.xlabel('Frequency [Hz]')
    >>> plt.ylabel('SFC')
    >>> plt.xlim((0, 60))
    >>> plt.show()
    """

    if not hasattr(scipy.signal, 'coherence'):
        raise AttributeError('scipy.signal.coherence is not available. The sfc '
                             'function uses scipy.signal.coherence for '
                             'the coherence calculation. This function is '
                             'available for scipy version 0.16 or newer. '
                             'Please update you scipy version.')

    # spiketrains type check
    if not isinstance(spiketrain, (SpikeTrain, BinnedSpikeTrain)):
        raise TypeError(
            "spiketrain must be of type SpikeTrain or BinnedSpikeTrain, "
            "not %s." % type(spiketrain))

    # checks on analogsignal
    if not isinstance(signal, AnalogSignalArray):
        raise TypeError(
            "Signal must be an AnalogSignalArray, not %s." % type(signal))
    if len(signal.shape) > 1:
        # num_signals: number of individual traces in the analog signal
        num_signals = signal.shape[1]
    elif len(signal.shape) == 1:
        num_signals = 1
    else:
        raise ValueError("Empty analog signal.")
    len_signals = signal.shape[0]

    # bin spiketrain if necessary
    if isinstance(spiketrain, SpikeTrain):
        spiketrain = BinnedSpikeTrain(
            spiketrain, binsize=signal.sampling_period)

    # check the start and stop times of signal and spike trains
    if spiketrain.t_start < signal.t_start:
        raise ValueError(
            "The spiketrain starts earlier than the analog signal.")
    if spiketrain.t_stop > signal.t_stop:
        raise ValueError(
            "The spiketrain stops later than the analog signal.")

    # check equal time resolution for both signals
    if spiketrain.binsize != signal.sampling_period:
        raise ValueError(
            "The spiketrain and signal must have a "
            "common sampling frequency / binsize")

    # calculate how many bins to add on the left of the binned spike train
    delta_t = spiketrain.t_start - signal.t_start
    if delta_t % spiketrain.binsize == 0:
        left_edge = int((delta_t / spiketrain.binsize).magnitude)
    else:
        raise ValueError("Incompatible binning of spike train and LFP")
    right_edge = int(left_edge + spiketrain.num_bins)

    # duplicate spike trains
    spiketrain_array = np.zeros((1, len_signals))
    spiketrain_array[0, left_edge:right_edge] = spiketrain.to_array()
    spiketrains_array = np.squeeze(
        np.repeat(spiketrain_array, repeats=num_signals, axis=0)).transpose()

    # calculate coherence
    frequencies, sfc = scipy.signal.coherence(
        spiketrains_array, signal.magnitude,
        fs=signal.sampling_rate.rescale('Hz').magnitude,
        axis=0, **kwargs)

    return (pq.Quantity(sfc, units=pq.dimensionless),
            pq.Quantity(frequencies, units=pq.Hz))
def run_net(tr):

    # prefs.codegen.target = 'numpy'
    # prefs.codegen.target = 'cython'
    if tr.n_threads > 1:
        prefs.devices.cpp_standalone.openmp_threads = tr.n_threads

    set_device('cpp_standalone',
               directory='./builds/%.4d' % (tr.v_idx),
               build_on_run=False)

    print("Started process with id ", str(tr.v_idx))

    T = tr.T1 + tr.T2 + tr.T3 + tr.T4 + tr.T5

    namespace = tr.netw.f_to_dict(short_names=True, fast_access=True)
    namespace['idx'] = tr.v_idx

    defaultclock.dt = tr.netw.sim.dt

    # collect all network components dependent on configuration
    # (e.g. poisson vs. memnoise) and add them to the Brian 2
    # network object later
    netw_objects = []

    if tr.external_mode == 'memnoise':
        neuron_model = tr.condlif_memnoise
    elif tr.external_mode == 'poisson':
        neuron_model = tr.condlif_poisson

    GExc = NeuronGroup(
        N=tr.N_e,
        model=neuron_model,
        threshold=tr.nrnEE_thrshld,
        reset=tr.nrnEE_reset,  #method=tr.neuron_method,
        namespace=namespace)
    GInh = NeuronGroup(
        N=tr.N_i,
        model=neuron_model,
        threshold='V > Vt',
        reset='V=Vr_i',  #method=tr.neuron_method,
        namespace=namespace)

    if tr.external_mode == 'memnoise':
        GExc.mu, GInh.mu = tr.mu_e, tr.mu_i
        GExc.sigma, GInh.sigma = tr.sigma_e, tr.sigma_i

    GExc.Vt, GInh.Vt = tr.Vt_e, tr.Vt_i
    GExc.V , GInh.V  = np.random.uniform(tr.Vr_e/mV, tr.Vt_e/mV,
                                         size=tr.N_e)*mV, \
                       np.random.uniform(tr.Vr_i/mV, tr.Vt_i/mV,
                                         size=tr.N_i)*mV

    netw_objects.extend([GExc, GInh])

    synEE_pre_mod = mod.synEE_pre
    synEE_post_mod = mod.synEE_post

    if tr.external_mode == 'poisson':

        if tr.PInp_mode == 'pool':
            PInp = PoissonGroup(tr.NPInp,
                                rates=tr.PInp_rate,
                                namespace=namespace,
                                name='poissongroup_exc')
            sPN = Synapses(target=GExc,
                           source=PInp,
                           model=tr.poisson_mod,
                           on_pre='gfwd_post += a_EPoi',
                           namespace=namespace,
                           name='synPInpExc')

            sPN_src, sPN_tar = generate_N_connections(N_tar=tr.N_e,
                                                      N_src=tr.NPInp,
                                                      N=tr.NPInp_1n)

        elif tr.PInp_mode == 'indep':
            PInp = PoissonGroup(tr.N_e,
                                rates=tr.PInp_rate,
                                namespace=namespace)
            sPN = Synapses(target=GExc,
                           source=PInp,
                           model=tr.poisson_mod,
                           on_pre='gfwd_post += a_EPoi',
                           namespace=namespace,
                           name='synPInp_inhInh')
            sPN_src, sPN_tar = range(tr.N_e), range(tr.N_e)

        sPN.connect(i=sPN_src, j=sPN_tar)

        if tr.PInp_mode == 'pool':
            PInp_inh = PoissonGroup(tr.NPInp_inh,
                                    rates=tr.PInp_inh_rate,
                                    namespace=namespace,
                                    name='poissongroup_inh')
            sPNInh = Synapses(target=GInh,
                              source=PInp_inh,
                              model=tr.poisson_mod,
                              on_pre='gfwd_post += a_EPoi',
                              namespace=namespace)
            sPNInh_src, sPNInh_tar = generate_N_connections(N_tar=tr.N_i,
                                                            N_src=tr.NPInp_inh,
                                                            N=tr.NPInp_inh_1n)

        elif tr.PInp_mode == 'indep':

            PInp_inh = PoissonGroup(tr.N_i,
                                    rates=tr.PInp_inh_rate,
                                    namespace=namespace)
            sPNInh = Synapses(target=GInh,
                              source=PInp_inh,
                              model=tr.poisson_mod,
                              on_pre='gfwd_post += a_EPoi',
                              namespace=namespace)
            sPNInh_src, sPNInh_tar = range(tr.N_i), range(tr.N_i)

        sPNInh.connect(i=sPNInh_src, j=sPNInh_tar)

        netw_objects.extend([PInp, sPN, PInp_inh, sPNInh])

    if tr.syn_noise:
        synEE_mod = '''%s 
                       %s''' % (tr.synEE_noise, tr.synEE_mod)
    else:
        synEE_mod = '''%s 
                       %s''' % (tr.synEE_static, tr.synEE_mod)

    if tr.stdp_active:
        synEE_pre_mod = '''%s 
                            %s''' % (synEE_pre_mod, mod.synEE_pre_STDP)
        synEE_post_mod = '''%s 
                            %s''' % (synEE_post_mod, mod.synEE_post_STDP)

    if tr.synEE_rec:
        synEE_pre_mod = '''%s 
                            %s''' % (synEE_pre_mod, mod.synEE_pre_rec)
        synEE_post_mod = '''%s 
                            %s''' % (synEE_post_mod, mod.synEE_post_rec)

    # E<-E advanced synapse model, rest simple
    SynEE = Synapses(target=GExc,
                     source=GExc,
                     model=synEE_mod,
                     on_pre=synEE_pre_mod,
                     on_post=synEE_post_mod,
                     namespace=namespace,
                     dt=tr.synEE_mod_dt)
    SynIE = Synapses(target=GInh,
                     source=GExc,
                     on_pre='ge_post += a_ie',
                     namespace=namespace)
    SynEI = Synapses(target=GExc,
                     source=GInh,
                     on_pre='gi_post += a_ei',
                     namespace=namespace)
    SynII = Synapses(target=GInh,
                     source=GInh,
                     on_pre='gi_post += a_ii',
                     namespace=namespace)

    if tr.strct_active:
        sEE_src, sEE_tar = generate_full_connectivity(tr.N_e, same=True)
        SynEE.connect(i=sEE_src, j=sEE_tar)
        SynEE.syn_active = 0

    else:
        srcs_full, tars_full = generate_full_connectivity(tr.N_e, same=True)
        SynEE.connect(i=srcs_full, j=tars_full)
        SynEE.syn_active = 0

    sIE_src, sIE_tar = generate_connections(tr.N_i, tr.N_e, tr.p_ie)
    sEI_src, sEI_tar = generate_connections(tr.N_e, tr.N_i, tr.p_ei)
    sII_src, sII_tar = generate_connections(tr.N_i, tr.N_i, tr.p_ii, same=True)

    SynIE.connect(i=sIE_src, j=sIE_tar)
    SynEI.connect(i=sEI_src, j=sEI_tar)
    SynII.connect(i=sII_src, j=sII_tar)

    tr.f_add_result('sIE_src', sIE_src)
    tr.f_add_result('sIE_tar', sIE_tar)
    tr.f_add_result('sEI_src', sEI_src)
    tr.f_add_result('sEI_tar', sEI_tar)
    tr.f_add_result('sII_src', sII_src)
    tr.f_add_result('sII_tar', sII_tar)

    if tr.syn_noise:
        SynEE.syn_sigma = tr.syn_sigma

    SynEE.insert_P = tr.insert_P
    SynEE.p_inactivate = tr.p_inactivate
    SynEE.stdp_active = 1

    ATM_vals = np.random.normal(loc=tr.ATotalMax,
                                scale=tr.ATotalMax_sd,
                                size=tr.N_e * (tr.N_e - 1))
    assert np.min(ATM_vals) > 0.
    SynEE.ATotalMax = ATM_vals

    # make randomly chosen synapses active at beginning
    rs = np.random.uniform(size=tr.N_e * (tr.N_e - 1))
    initial_active = (rs < tr.p_ee).astype('int')
    initial_a = initial_active * tr.a_ee
    SynEE.syn_active = initial_active
    SynEE.a = initial_a

    # recording of stdp in T4
    SynEE.stdp_rec_start = tr.T1 + tr.T2 + tr.T3
    SynEE.stdp_rec_max = tr.T1 + tr.T2 + tr.T3 + tr.stdp_rec_T

    # synaptic scaling
    if tr.netw.config.scl_active:

        if tr.syn_scl_rec:
            SynEE.scl_rec_start = tr.T1 + tr.T2 + tr.T3
            SynEE.scl_rec_max = tr.T1 + tr.T2 + tr.T3 + tr.scl_rec_T
        else:
            SynEE.scl_rec_start = T + 10 * second
            SynEE.scl_rec_max = T

        SynEE.summed_updaters['Asum_post']._clock = Clock(
            dt=tr.dt_synEE_scaling)
        synscaling = SynEE.run_regularly(tr.synEE_scaling,
                                         dt=tr.dt_synEE_scaling,
                                         when='end',
                                         name='syn_scaling')

    # # intrinsic plasticity
    # if tr.netw.config.it_active:
    #     GExc.h_ip = tr.h_ip
    #     GExc.run_regularly(tr.intrinsic_mod, dt = tr.it_dt, when='end')

    # structural plasticity
    if tr.netw.config.strct_active:
        if tr.strct_mode == 'zero':
            if tr.turnover_rec:
                strct_mod = '''%s 
                                %s''' % (tr.strct_mod, tr.turnover_rec_mod)
            else:
                strct_mod = tr.strct_mod

            strctplst = SynEE.run_regularly(strct_mod,
                                            dt=tr.strct_dt,
                                            when='end',
                                            name='strct_plst_zero')

        elif tr.strct_mode == 'thrs':
            if tr.turnover_rec:
                strct_mod_thrs = '''%s 
                                %s''' % (tr.strct_mod_thrs,
                                         tr.turnover_rec_mod)
            else:
                strct_mod_thrs = tr.strct_mod_thrs

            strctplst = SynEE.run_regularly(strct_mod_thrs,
                                            dt=tr.strct_dt,
                                            when='end',
                                            name='strct_plst_thrs')

    netw_objects.extend([SynEE, SynEI, SynIE, SynII])

    # keep track of the number of active synapses
    sum_target = NeuronGroup(1, 'c : 1 (shared)', dt=tr.csample_dt)

    sum_model = '''NSyn : 1 (constant)
                   c_post = (1.0*syn_active_pre)/NSyn : 1 (summed)'''
    sum_connection = Synapses(target=sum_target,
                              source=SynEE,
                              model=sum_model,
                              dt=tr.csample_dt,
                              name='get_active_synapse_count')
    sum_connection.connect()
    sum_connection.NSyn = tr.N_e * (tr.N_e - 1)

    if tr.adjust_insertP:
        # homeostatically adjust growth rate
        growth_updater = Synapses(sum_target, SynEE)
        growth_updater.run_regularly('insert_P_post *= 0.1/c_pre',
                                     when='after_groups',
                                     dt=tr.csample_dt,
                                     name='update_insP')
        growth_updater.connect(j='0')

        netw_objects.extend([sum_target, sum_connection, growth_updater])

    # -------------- recording ------------------

    GExc_recvars = []
    if tr.memtraces_rec:
        GExc_recvars.append('V')
    if tr.vttraces_rec:
        GExc_recvars.append('Vt')
    if tr.getraces_rec:
        GExc_recvars.append('ge')
    if tr.gitraces_rec:
        GExc_recvars.append('gi')
    if tr.gfwdtraces_rec and tr.external_mode == 'poisson':
        GExc_recvars.append('gfwd')

    GInh_recvars = GExc_recvars

    GExc_stat = StateMonitor(GExc,
                             GExc_recvars,
                             record=[0, 1, 2],
                             dt=tr.GExc_stat_dt)
    GInh_stat = StateMonitor(GInh,
                             GInh_recvars,
                             record=[0, 1, 2],
                             dt=tr.GInh_stat_dt)

    SynEE_recvars = []
    if tr.synee_atraces_rec:
        SynEE_recvars.append('a')
    if tr.synee_activetraces_rec:
        SynEE_recvars.append('syn_active')
    if tr.synee_Apretraces_rec:
        SynEE_recvars.append('Apre')
    if tr.synee_Aposttraces_rec:
        SynEE_recvars.append('Apost')

    SynEE_stat = StateMonitor(SynEE,
                              SynEE_recvars,
                              record=range(tr.n_synee_traces_rec),
                              when='end',
                              dt=tr.synEE_stat_dt)

    if tr.adjust_insertP:

        C_stat = StateMonitor(sum_target,
                              'c',
                              dt=tr.csample_dt,
                              record=[0],
                              when='end')
        insP_stat = StateMonitor(SynEE,
                                 'insert_P',
                                 dt=tr.csample_dt,
                                 record=[0],
                                 when='end')
        netw_objects.extend([C_stat, insP_stat])

    GExc_spks = SpikeMonitor(GExc)
    GInh_spks = SpikeMonitor(GInh)

    GExc_rate = PopulationRateMonitor(GExc)
    GInh_rate = PopulationRateMonitor(GInh)

    if tr.external_mode == 'poisson':
        PInp_spks = SpikeMonitor(PInp)
        PInp_rate = PopulationRateMonitor(PInp)
        netw_objects.extend([PInp_spks, PInp_rate])

    if tr.synee_a_nrecpoints == 0:
        SynEE_a_dt = 10 * tr.sim.T2
    else:
        SynEE_a_dt = tr.sim.T2 / tr.synee_a_nrecpoints
    SynEE_a = StateMonitor(SynEE, ['a', 'syn_active'],
                           record=range(tr.N_e * (tr.N_e - 1)),
                           dt=SynEE_a_dt,
                           when='end',
                           order=100)

    netw_objects.extend([
        GExc_stat, GInh_stat, SynEE_stat, SynEE_a, GExc_spks, GInh_spks,
        GExc_rate, GInh_rate
    ])

    net = Network(*netw_objects)

    def set_active(*argv):
        for net_object in argv:
            net_object.active = True

    def set_inactive(*argv):
        for net_object in argv:
            net_object.active = False

    ### Simulation periods

    # --------- T1 ---------
    # initial recording period,
    # all recorders active

    set_active(GExc_spks, GInh_spks, SynEE_stat, GExc_stat, GInh_stat,
               GExc_rate, GInh_rate)

    if tr.external_mode == 'poisson':
        set_active(PInp_spks, PInp_rate)

    net.run(tr.sim.T1, report='text', report_period=300 * second, profile=True)

    # --------- T2 ---------
    # main simulation period
    # only active recordings are:
    #   1) turnover 2) C_stat 3) SynEE_a

    set_inactive(GExc_spks, GInh_spks, SynEE_stat, GExc_stat, GInh_stat,
                 GExc_rate, GInh_rate)

    if tr.external_mode == 'poisson':
        set_inactive(PInp_spks, PInp_rate)

    net.run(tr.sim.T2, report='text', report_period=300 * second, profile=True)

    # --------- T3 ---------
    # second recording period,
    # all recorders active

    set_active(GExc_spks, GInh_spks, SynEE_stat, GExc_stat, GInh_stat,
               GExc_rate, GInh_rate)

    if tr.external_mode == 'poisson':
        set_active(PInp_spks, PInp_rate)

    net.run(tr.sim.T3, report='text', report_period=300 * second, profile=True)

    # --------- T4 ---------
    # record STDP and scaling weight changes to file
    # through the cpp models

    set_inactive(GExc_spks, GInh_spks, SynEE_stat, GExc_stat, GInh_stat,
                 GExc_rate, GInh_rate)

    if tr.external_mode == 'poisson':
        set_inactive(PInp_spks, PInp_rate)

    net.run(tr.sim.T4, report='text', report_period=300 * second, profile=True)

    # --------- T5 ---------
    # freeze network and record Exc spikes
    # for cross correlations

    synscaling.active = False
    strctplst.active = False
    SynEE.stdp_active = 0

    set_active(GExc_spks)

    net.run(tr.sim.T5, report='text', report_period=300 * second, profile=True)

    SynEE_a.record_single_timestep()

    device.build(directory='builds/%.4d' % (tr.v_idx),
                 clean=True,
                 compile=True,
                 run=True,
                 debug=False)

    # -----------------------------------------

    # save monitors as raws in build directory
    raw_dir = 'builds/%.4d/raw/' % (tr.v_idx)

    if not os.path.exists(raw_dir):
        os.makedirs(raw_dir)

    with open(raw_dir + 'namespace.p', 'wb') as pfile:
        pickle.dump(namespace, pfile)

    with open(raw_dir + 'gexc_stat.p', 'wb') as pfile:
        pickle.dump(GExc_stat.get_states(), pfile)
    with open(raw_dir + 'ginh_stat.p', 'wb') as pfile:
        pickle.dump(GInh_stat.get_states(), pfile)

    with open(raw_dir + 'synee_stat.p', 'wb') as pfile:
        pickle.dump(SynEE_stat.get_states(), pfile)
    with open(raw_dir + 'synee_a.p', 'wb') as pfile:
        SynEE_a_states = SynEE_a.get_states()
        if tr.crs_crrs_rec:
            SynEE_a_states['i'] = list(SynEE.i)
            SynEE_a_states['j'] = list(SynEE.j)
        pickle.dump(SynEE_a_states, pfile)

    if tr.adjust_insertP:
        with open(raw_dir + 'c_stat.p', 'wb') as pfile:
            pickle.dump(C_stat.get_states(), pfile)

        with open(raw_dir + 'insP_stat.p', 'wb') as pfile:
            pickle.dump(insP_stat.get_states(), pfile)

    with open(raw_dir + 'gexc_spks.p', 'wb') as pfile:
        pickle.dump(GExc_spks.get_states(), pfile)
    with open(raw_dir + 'ginh_spks.p', 'wb') as pfile:
        pickle.dump(GInh_spks.get_states(), pfile)

    if tr.external_mode == 'poisson':
        with open(raw_dir + 'pinp_spks.p', 'wb') as pfile:
            pickle.dump(PInp_spks.get_states(), pfile)

    with open(raw_dir + 'gexc_rate.p', 'wb') as pfile:
        pickle.dump(GExc_rate.get_states(), pfile)
        if tr.rates_rec:
            pickle.dump(GExc_rate.smooth_rate(width=25 * ms), pfile)
    with open(raw_dir + 'ginh_rate.p', 'wb') as pfile:
        pickle.dump(GInh_rate.get_states(), pfile)
        if tr.rates_rec:
            pickle.dump(GInh_rate.smooth_rate(width=25 * ms), pfile)

    if tr.external_mode == 'poisson':
        with open(raw_dir + 'pinp_rate.p', 'wb') as pfile:
            pickle.dump(PInp_rate.get_states(), pfile)
            if tr.rates_rec:
                pickle.dump(PInp_rate.smooth_rate(width=25 * ms), pfile)

    # ----------------- add raw data ------------------------
    fpath = 'builds/%.4d/' % (tr.v_idx)

    from pathlib import Path

    Path(fpath + 'turnover').touch()
    turnover_data = np.genfromtxt(fpath + 'turnover', delimiter=',')
    os.remove(fpath + 'turnover')

    with open(raw_dir + 'turnover.p', 'wb') as pfile:
        pickle.dump(turnover_data, pfile)

    Path(fpath + 'spk_register').touch()
    spk_register_data = np.genfromtxt(fpath + 'spk_register', delimiter=',')
    os.remove(fpath + 'spk_register')

    with open(raw_dir + 'spk_register.p', 'wb') as pfile:
        pickle.dump(spk_register_data, pfile)

    Path(fpath + 'scaling_deltas').touch()
    scaling_deltas_data = np.genfromtxt(fpath + 'scaling_deltas',
                                        delimiter=',')
    os.remove(fpath + 'scaling_deltas')

    with open(raw_dir + 'scaling_deltas.p', 'wb') as pfile:
        pickle.dump(scaling_deltas_data, pfile)

    with open(raw_dir + 'profiling_summary.txt', 'w+') as tfile:
        tfile.write(str(profiling_summary(net)))

    # --------------- cross-correlations ---------------------

    if tr.crs_crrs_rec:

        GExc_spks = GExc_spks.get_states()
        synee_a = SynEE_a_states
        wsize = 100 * pq.ms

        for binsize in [1 * pq.ms, 2 * pq.ms, 5 * pq.ms]:

            wlen = int(wsize / binsize)

            ts, idxs = GExc_spks['t'], GExc_spks['i']
            idxs = idxs[ts > tr.T1 + tr.T2 + tr.T3 + tr.T4]
            ts = ts[ts > tr.T1 + tr.T2 + tr.T3 + tr.T4]
            ts = ts - (tr.T1 + tr.T2 + tr.T3 + tr.T4)

            sts = [
                neo.SpikeTrain(ts[idxs == i] / second * pq.s,
                               t_stop=tr.T5 / second * pq.s)
                for i in range(tr.N_e)
            ]

            crs_crrs, syn_a = [], []

            for f, (i, j) in enumerate(zip(synee_a['i'], synee_a['j'])):
                if synee_a['syn_active'][-1][f] == 1:

                    crs_crr, cbin = cch(BinnedSpikeTrain(sts[i],
                                                         binsize=binsize),
                                        BinnedSpikeTrain(sts[j],
                                                         binsize=binsize),
                                        cross_corr_coef=True,
                                        border_correction=True,
                                        window=(-1 * wlen, wlen))

                    crs_crrs.append(list(np.array(crs_crr).T[0]))
                    syn_a.append(synee_a['a'][-1][f])

            fname = 'crs_crrs_wsize%dms_binsize%fms_full' % (wsize / pq.ms,
                                                             binsize / pq.ms)

            df = {
                'cbin': cbin,
                'crs_crrs': np.array(crs_crrs),
                'syn_a': np.array(syn_a),
                'binsize': binsize,
                'wsize': wsize,
                'wlen': wlen
            }

            with open('builds/%.4d/raw/' % (tr.v_idx) + fname + '.p',
                      'wb') as pfile:
                pickle.dump(df, pfile)

    # -----------------  clean up  ---------------------------
    shutil.rmtree('builds/%.4d/results/' % (tr.v_idx))

    # ---------------- plot results --------------------------

    #os.chdir('./analysis/file_based/')

    from analysis.overview_fb import overview_figure
    overview_figure('builds/%.4d' % (tr.v_idx), namespace)

    from analysis.synw_fb import synw_figure
    synw_figure('builds/%.4d' % (tr.v_idx), namespace)

    from analysis.synw_log_fb import synw_log_figure
    synw_log_figure('builds/%.4d' % (tr.v_idx), namespace)
Beispiel #25
0
#         plt.plot(t, i * np.ones_like(t), 'k.', markersize=2)
# plt.axis('tight')
# plt.xlim(0, 1000)
# plt.xlabel('Time (ms)', fontsize=16)
# plt.ylabel('Spike Train Index', fontsize=16)
# plt.gca().tick_params(axis='both', which='major', labelsize=14)
# #plt.show()
# cc_matrix = corrcoef(BinnedSpikeTrain(spiketrain_list, 1 * ms))
# print(cc_matrix[0][1])

rate_correlation = []
for x in permutations(np.divide(np.linspace(0, 100, 11),100), 3):
    if sum(x) == 1:
        spiketrain_list = cpp(500 * Hz, x, 1000 * ms)
        rate = len(LIF_R_ASC_AT(w_e, w_i, spiketrain_list[0], spiketrain_list[1]))
        cc_matrix = corrcoef(BinnedSpikeTrain(spiketrain_list, 5 * ms))
        rate_correlation.append([cc_matrix[0][1], rate])

print(rate_correlation)
x_val = [x[0] for x in rate_correlation]
y_val = [x[1] for x in rate_correlation]
#plt.scatter(x_val, y_val, marker="x")
sns.regplot(x_val, y_val, ci=None)
plt.ylim((0, 30))
plt.xlim((0, 1))
plt.xlabel("Pearson’s correlation coefficient")
plt.ylabel("Output firing rate (Hz)")
#sns.lmplot("Correlation", "Output firing rate (Hz)", pd.DataFrame((x_val, y_val), columns =['Correlation', 'Output firing rate (Hz)']))
plt.show()

slope, intecept = np.polyfit(x_val, y_val, 1)
Beispiel #26
0
def spike_field_coherence(signal, spiketrain, **kwargs):
    """
    Calculates the spike-field coherence between a analog signal(s) and a
    (binned) spike train.

    The current implementation makes use of scipy.signal.coherence(). Additional
    kwargs will will be directly forwarded to scipy.signal.coherence(),
    except for the axis parameter and the sampling frequency, which will be
    extracted from the input signals.

    The spike_field_coherence function receives an analog signal array and
    either a binned spike train or a spike train containing the original spike
    times. In case of original spike times the spike train is binned according
    to the sampling rate of the analog signal array.

    The AnalogSignal object can contain one or multiple signal traces. In case
    of multiple signal traces, the spike field coherence is calculated
    individually for each signal trace and the spike train.

    Parameters
    ----------
    signal : neo AnalogSignalArray object
        'signal' contains n analog signals.
    spiketrain : SpikeTrain or BinnedSpikeTrain
        Single spike train to perform the analysis on. The binsize of the
        binned spike train must match the sampling_rate of signal.

    KWArgs
    ------
    All KWArgs are passed to scipy.signal.coherence().

    Returns
    -------
    coherence : complex Quantity array
        contains the coherence values calculated for each analog signal trace
        in combination with the spike train. The first dimension corresponds to
        the frequency, the second to the number of the signal trace.
    frequencies : Quantity array
        contains the frequency values corresponding to the first dimension of
        the 'coherence' array

    Example
    -------

    Plot the SFC between a regular spike train at 20 Hz, and two sinusoidal
    time series at 20 Hz and 23 Hz, respectively.

    >>> import numpy as np
    >>> import matplotlib.pyplot as plt
    >>> from quantities import ms, mV, Hz, kHz
    >>> import neo, elephant

    >>> t = pq.Quantity(range(10000),units='ms')
    >>> f1, f2 = 20. * Hz, 23. * Hz
    >>> signal = neo.AnalogSignal(np.array([
            np.sin(f1 * 2. * np.pi * t.rescale(s)),
            np.sin(f2 * 2. * np.pi * t.rescale(s))]).T,
            units=pq.mV, sampling_rate=1. * kHz)
    >>> spiketrain = neo.SpikeTrain(
        range(t[0], t[-1], 50), units='ms',
        t_start=t[0], t_stop=t[-1])
    >>> sfc, freqs = elephant.sta.spike_field_coherence(
        signal, spiketrain, window='boxcar')

    >>> plt.plot(freqs, sfc[:,0])
    >>> plt.plot(freqs, sfc[:,1])
    >>> plt.xlabel('Frequency [Hz]')
    >>> plt.ylabel('SFC')
    >>> plt.xlim((0, 60))
    >>> plt.show()
    """

    if not hasattr(scipy.signal, 'coherence'):
        raise AttributeError(
            'scipy.signal.coherence is not available. The sfc '
            'function uses scipy.signal.coherence for '
            'the coherence calculation. This function is '
            'available for scipy version 0.16 or newer. '
            'Please update you scipy version.')

    # spiketrains type check
    if not isinstance(spiketrain, (SpikeTrain, BinnedSpikeTrain)):
        raise TypeError(
            "spiketrain must be of type SpikeTrain or BinnedSpikeTrain, "
            "not %s." % type(spiketrain))

    # checks on analogsignal
    if not isinstance(signal, AnalogSignalArray):
        raise TypeError("Signal must be an AnalogSignalArray, not %s." %
                        type(signal))
    if len(signal.shape) > 1:
        # num_signals: number of individual traces in the analog signal
        num_signals = signal.shape[1]
    elif len(signal.shape) == 1:
        num_signals = 1
    else:
        raise ValueError("Empty analog signal.")
    len_signals = signal.shape[0]

    # bin spiketrain if necessary
    if isinstance(spiketrain, SpikeTrain):
        spiketrain = BinnedSpikeTrain(spiketrain,
                                      binsize=signal.sampling_period)

    # check the start and stop times of signal and spike trains
    if spiketrain.t_start < signal.t_start:
        raise ValueError(
            "The spiketrain starts earlier than the analog signal.")
    if spiketrain.t_stop > signal.t_stop:
        raise ValueError("The spiketrain stops later than the analog signal.")

    # check equal time resolution for both signals
    if spiketrain.binsize != signal.sampling_period:
        raise ValueError("The spiketrain and signal must have a "
                         "common sampling frequency / binsize")

    # calculate how many bins to add on the left of the binned spike train
    delta_t = spiketrain.t_start - signal.t_start
    if delta_t % spiketrain.binsize == 0:
        left_edge = int((delta_t / spiketrain.binsize).magnitude)
    else:
        raise ValueError("Incompatible binning of spike train and LFP")
    right_edge = int(left_edge + spiketrain.num_bins)

    # duplicate spike trains
    spiketrain_array = np.zeros((1, len_signals))
    spiketrain_array[0, left_edge:right_edge] = spiketrain.to_array()
    spiketrains_array = np.squeeze(
        np.repeat(spiketrain_array, repeats=num_signals, axis=0)).transpose()

    # calculate coherence
    frequencies, sfc = scipy.signal.coherence(
        spiketrains_array,
        signal.magnitude,
        fs=signal.sampling_rate.rescale('Hz').magnitude,
        axis=0,
        **kwargs)

    return (pq.Quantity(sfc, units=pq.dimensionless),
            pq.Quantity(frequencies, units=pq.Hz))
Beispiel #27
0
#print(dataset)
dataSpikeTimes = np.genfromtxt(
    '../../Downsampled Spikes/pop10/01downsample.csv', delimiter=',')
dataSpikeTimes = dataSpikeTimes.transpose()
#print(dataset)
neoDataset = []
[
    neoDataset.append(neo.SpikeTrain(i, units='ms', t_start=0, t_stop=1000.0))
    for i in dataset
]
print(neoDataset[0])
print(neoDataset[1])

elephantDataset = []
[
    elephantDataset.append(BinnedSpikeTrain(j, binsize=10 * ms))
    for j in neoDataset
]
x = BinnedSpikeTrain(neoDataset, binsize=10 * ms)
'''
cov_matrix = corrcoef(x,binary=True)

'''
#pyplot xcorr returns an array of the timelag used, and a corrsponding array
#of the calculated correlation coefficient between the two spike trains,
#according to that timelag
#print("pyplot Correlate:\n",plt.xcorr(dataSpikeTimes[0],dataSpikeTimes[1]))
'''
Loop through every combination, and generate the cross_correlograms. Save the highest value
bins to identify the strongest connections in the network.
Beispiel #28
0
def plot_cross_corr(pre_spikes, spiketime_dict, presyn, binsize, maxtime=0):
    mean_cc = {}
    mean_cc_shuffle = {}
    cc_shuffle_corrected = {}
    for key in pre_spikes.keys(
    ):  #key indexes the simulation condition, e.g. stpYN
        numtrials = len(pre_spikes[key])
        if maxtime == 0:
            last_spike = [train[-1] for train in pre_spikes[key][0][presyn]]
            t_end = max(np.round(np.max(last_spike), maxtime))
            maxtime = t_end
        else:
            t_end = maxtime
        cc_hist = [[] for t in range(numtrials)]
        fig, axes = plt.subplots(numtrials, numtrials, sharex=True)
        fig.suptitle('cross correlograms ' + key)
        for trial_in in range(numtrials):
            for trial_out in range(numtrials):
                if isinstance(pre_spikes[key][trial_in][presyn], list):
                    spikes = np.sort(
                        np.concatenate(pre_spikes[key][trial_in][presyn]))
                else:
                    spikes = pre_spikes[key][trial_in][presyn]
                train = SpikeTrain(spikes * q.s,
                                   t_start=0 * q.s,
                                   t_stop=t_end * q.s,
                                   binsize=binsize * q.s)
                in_train = BinnedSpikeTrain(train,
                                            t_start=0 * q.s,
                                            t_stop=t_end * q.s,
                                            binsize=binsize * q.s)
                train = SpikeTrain(spiketime_dict[key][trial_out] * q.s,
                                   t_stop=t_end * q.s)
                out_train = BinnedSpikeTrain(train,
                                             t_start=0 * q.s,
                                             t_stop=t_end * q.s,
                                             binsize=binsize * q.s)
                #print('trial_in,trial_out', trial_in, trial_out)
                cc_hist[trial_in].append(
                    elephant.spike_train_correlation.
                    cross_correlation_histogram(in_train, out_train))
                axes[trial_in, trial_out].plot(
                    cc_hist[trial_in][trial_out][0].magnitude[:, 0])
            axes[trial_in, 0].set_ylabel('input ' + str(trial_in))
        for trial_out in range(trial_in, numtrials):
            axes[-1, trial_out].set_xlabel('output ' + str(trial_out))
        #shuffle corrected mean cross-correlogram
        #initialize these to accumulate across conditions, e.g. pre and post-HFS, and possibly across keys (str freq)
        cc_same = [cc_hist[a][a][0].magnitude[:, 0] for a in range(numtrials)]
        mean_cc[key] = np.mean(cc_same, axis=0)
        cc_diff = [
            cc_hist[a][b][0].magnitude[:, 0] for a in range(numtrials)
            for b in range(numtrials) if b != a
        ]
        mean_cc_shuffle[key] = np.mean(cc_diff, axis=0)
        cc_shuffle_corrected[key] = mean_cc[key] - mean_cc_shuffle[key]
    #PLOT mean cc and shuffle corrected for each key on one figure
    xbins = np.linspace(-t_end, t_end, len(mean_cc[key]))
    fig, axes = plt.subplots(3, 1, sharex=True)
    fig.suptitle('cross correlograms ' + presyn)
    for key in mean_cc.keys():
        axes[0].plot(xbins, mean_cc[key], label=key)
        axes[1].plot(xbins, mean_cc_shuffle[key], label=key)
        axes[2].plot(xbins, cc_shuffle_corrected[key], label=key)
    axes[0].set_ylabel('mean cc')
    axes[1].set_ylabel('mean cc shuffled')
    axes[2].set_ylabel('mean cc shuffled-corrected')
    axes[2].legend()
    return
Beispiel #29
0
28Sep2017
NOTE: Work on conversion of discrete spike time lists to binary spike counts.
Use this conversion for PSTH and for spike-time correlations.
"""
##ADDED bstc_df to cut the first 500 ms of data out for correlations
###conversion of discrete spike times to binary counts

from elephant.conversion import BinnedSpikeTrain

binnedst_list = []
bst_df_list = []
bstc_df_list = []
bst_sum_list = []
for item in spktrain_list:
    bst_list = BinnedSpikeTrain(item,
                                binsize=1.0 * pq.ms,
                                t_start=0.0 * pq.ms,
                                t_stop=3100 * pq.ms)

    bst_arr = bst_list.to_array()  #export binned spike times to an array
    bst_df = pd.DataFrame(bst_arr).T  #turn into a df and transpose (.T)
    bst_sum = bst_df.apply(np.sum, axis=1)  #sum by row across columns
    bst_df_list.append(bst_df)
    bstc_df = bst_df.iloc[550:3050]
    bstc_df_list.append(bstc_df)
    #plt.figure()
    #plt.plot(bst_sum)
    binnedst_list.append(bst_list)
    bst_sum_list.append(bst_sum)

    bstc_df_list
#%%
Beispiel #30
0
    print(latencies)
    print(p_values)
    assert latencies[min(idxs)] == stim_latency
    return baseline_trials, test_trials, spike_train, epoch


if __name__ == '__main__':
    import matplotlib
    matplotlib.use('Qt5Agg')

    # baseline_trials, test_trials, spike_train, epoch = _test_salt_inh()
    baseline_trials, test_trials, spike_train, epoch = test_salt_exc()

    binsize = 1 * pq.ms
    import matplotlib.pyplot as plt
    from exana.stimulus import plot_psth
    plot_psth(trials=test_trials, title='test', binsize=10*pq.ms)
    plot_psth(trials=baseline_trials, title='baseline', binsize=10*pq.ms)
    plot_psth(sptr=spike_train, epoch=epoch, t_start=-1 * pq.s,
              t_stop=0.5 * pq.s, title='full', binsize=10*pq.ms)
    plt.show()
    # NOTE for saving matlab var and test vs original matlab script
    from elephant.conversion import BinnedSpikeTrain
    test_binary = BinnedSpikeTrain(test_trials, binsize=binsize).to_array()
    baseline_binary = BinnedSpikeTrain(baseline_trials, binsize=binsize).to_array()

    import scipy
    scipy.io.savemat('/home/mikkel/apps/salt_data.mat',
                     {'spt_baseline': baseline_binary,
                      'spt_test': test_binary})
Beispiel #31
0
def time_histogram(spiketrains,
                   bin_size,
                   t_start=None,
                   t_stop=None,
                   output='counts',
                   binary=False):
    """
    Time Histogram of a list of `neo.SpikeTrain` objects.

    Visualization of this function is covered in Viziphant:
    :func:`viziphant.statistics.plot_time_histogram`.

    Parameters
    ----------
    spiketrains : list of neo.SpikeTrain
        `neo.SpikeTrain`s with a common time axis (same `t_start` and `t_stop`)
    bin_size : pq.Quantity
        Width of the histogram's time bins.
    t_start : pq.Quantity, optional
        Start time of the histogram. Only events in `spiketrains` falling
        between `t_start` and `t_stop` (both included) are considered in the
        histogram.
        If None, the maximum `t_start` of all `neo.SpikeTrain`s is used as
        `t_start`.
        Default: None
    t_stop : pq.Quantity, optional
        Stop time of the histogram. Only events in `spiketrains` falling
        between `t_start` and `t_stop` (both included) are considered in the
        histogram.
        If None, the minimum `t_stop` of all `neo.SpikeTrain`s is used as
        `t_stop`.
        Default: None
    output : {'counts', 'mean', 'rate'}, optional
        Normalization of the histogram. Can be one of:
        * 'counts': spike counts at each bin (as integer numbers)
        * 'mean': mean spike counts per spike train
        * 'rate': mean spike rate per spike train. Like 'mean', but the
          counts are additionally normalized by the bin width.
        Default: 'counts'
    binary : bool, optional
        If True, indicates whether all `neo.SpikeTrain` objects should first
        be binned to a binary representation (using the
        `conversion.BinnedSpikeTrain` class) and the calculation of the
        histogram is based on this representation.
        Note that the output is not binary, but a histogram of the converted,
        binary representation.
        Default: False

    Returns
    -------
    neo.AnalogSignal
        A `neo.AnalogSignal` object containing the histogram values.
        `neo.AnalogSignal[j]` is the histogram computed between
        `t_start + j * bin_size` and `t_start + (j + 1) * bin_size`.

    Raises
    ------
    ValueError
        If `output` is not 'counts', 'mean' or 'rate'.

    Warns
    -----
    UserWarning
        If `t_start` is None and the objects in `spiketrains` have different
        `t_start` values.
        If `t_stop` is None and the objects in `spiketrains` have different
        `t_stop` values.

    See also
    --------
    elephant.conversion.BinnedSpikeTrain

    """
    # Bin the spike trains and sum across columns
    bs = BinnedSpikeTrain(spiketrains,
                          t_start=t_start,
                          t_stop=t_stop,
                          bin_size=bin_size)

    if binary:
        bs = bs.binarize()
    bin_hist = bs.get_num_of_spikes(axis=0)
    # Flatten array
    bin_hist = np.ravel(bin_hist)
    # Renormalise the histogram
    if output == 'counts':
        # Raw
        bin_hist = pq.Quantity(bin_hist, units=pq.dimensionless, copy=False)
    elif output == 'mean':
        # Divide by number of input spike trains
        bin_hist = pq.Quantity(bin_hist / len(spiketrains),
                               units=pq.dimensionless,
                               copy=False)
    elif output == 'rate':
        # Divide by number of input spike trains and bin width
        bin_hist = bin_hist / (len(spiketrains) * bin_size)
    else:
        raise ValueError('Parameter output is not valid.')

    return neo.AnalogSignal(signal=np.expand_dims(bin_hist, axis=1),
                            sampling_period=bin_size,
                            units=bin_hist.units,
                            t_start=bs.t_start,
                            normalization=output,
                            copy=False)