def spk_poisson_tvrate(rate_fun, T=10.0, N=1, mode='spike', **kwargs):
    """
    Generate Poisson spike train for arbitrary time-varying rate.

    Inputs:
    rate_fun   : Non-negative rate function in the form of f(x)
    - T        : Period [0,T] for the spike train
    - N        : Number of neurons
    mode       : {'spike'} | 'interval' If 'spike', recompute every spike; if 'interval' rate is compute on the whole
                 [0,T] interval
    kwargs     :

    Output:
    - spk_train: spike train instants (sorted) with corresponding neuron

    Maurizio De Pitta', Basque Center of Applied Mathematics, June 11th, 2018.
    """

    pars = {
        'npts': 100,
        'dt': 1e-3,
        'eps': 5e-3
    }  # number of points to evaluate rate
    pars = gu.varargin(pars, **kwargs)
    if 'dt' in kwargs.keys():
        pars['npts'] = T / kwargs['dt']
    if mode == 'interval':
        time = linspace(0., T, int(pars['npts']))
        rate = rate_fun(time)
        dt = time[1] - time[0]
        sp_index = less(random.rand(N, pars['npts']), tile((rate * dt),
                                                           (N, 1)))
        spk_train = tile(time, (N, 1))[sp_index]
        indexes = (tile(arange(0, N), (pars['npts'], 1))).T[sp_index]
        si = spk_train.argsort()
        # Stack and sort
        spk_train = vstack((spk_train[si], indexes[si]))
    else:
        # mode=='spike'
        spk_train, indexes = zeros(1), zeros(1)
        for i in xrange(N):
            t = 0
            isi = []
            xscale = float(T - t)
            yscale = amax(rate_fun(linspace(t, T, pars['npts'])))
            while ((T - t) > pars['eps']):
                tval = xscale * random.rand(1)
                yval = yscale * random.rand(1)
                if yval < rate_fun(tval):
                    isi.append(tval)
                    t = t + tval
                    xscale = float(T - t)
                    yscale = amax(rate_fun(linspace(t, T, pars['npts'])))
            # Generate final spike train
            tspk = cut_isi(isi, T)
            spk_train = concatenate((spk_train, r_[0, tspk]))
            indexes = concatenate((indexes, i * ones(len(isi) + 1)))
        spk_train = vstack((spk_train, indexes))

    return spk_train
def spk_periodic(T=10, N=1, **kwargs):
    """
    Generate periodic spike train


    Use:
    tspk = spk_periodic(T=10,N=1,**kwargs)
    
    Input arguments:
    - T        : period [0,T] for the spike train
    - N        : number of neurons
    - **kwargs : 
        - rate : average rate of Poisson spike [Hz]
        - trp  : refractory period [s]
    
    Output:
    - tspk     : spike train instants (sorted) with corresponding neuron
 
    Maurizio De Pitta', The University of Chicago, September 23rd, 2014.        
    """
    pars = {'rate': 100, 'trp': 2e-3}
    pars = gu.varargin(pars, **kwargs)
    N_spikes = int(T * pars['rate'])  # Average number of spikes per trial
    spk_train, indexes = empty(0), empty(0)
    for i in xrange(N):
        isi = cut_isi(ones(N_spikes) * (1 / pars['rate'] + pars['trp']),
                      T)  # Periodic ISI at effective rate
        spk_train = concatenate((spk_train, isi))
        indexes = concatenate((indexes, i * ones(isi.size)))
    # Sort spikes
    si = spk_train.argsort()
    # Stack and sort
    spk_train = vstack((spk_train[si], indexes[si]))
    return spk_train
Exemple #3
0
def chi_parameters(**kwargs):
    """
    Parameters for the plain ChI model (DePitta' et al., JOBP 2009).
    
    Maurizio De Pitta', The University of Chicago, February 28th, 2015.
    """
    pars_lra = lra_parameters()
    pars_lra.pop('ip3', None)
    pars = {
        'vbias': 0,
        'vbeta': 3,
        'vdelta': 0.5,
        'kappad': 1,
        'Kdelta': 0.5,
        'v3k': 2,
        'Kd': 0.5,
        'K3': 1,
        'r5p': 0,
        'ICs': [0.05, 0.05, 0.99]
    }

    # Merge the two parameter dictionaries
    pars = gu.merge_dicts(pars_lra, pars)
    ## User-defined parameters
    pars = gu.varargin(pars, **kwargs)
    # Parameters must be floats
    for k, item in pars.iteritems():
        if isscalar(item):
            pars[k] = float(item)
        else:
            pars[k] = array(item, dtype=float)
    return pars
Exemple #4
0
def lra_parameters(**kwargs):
    """
    Parameters for the Li-Rinzel astrocyte (Li and Rinzel, JTB 1994).

    Maurizio De Pitta', INRIA Rhone-Alpes, November 3rd, 2017.
    """
    pars = {
        'd1': 0.1,
        'd2': 2.1,
        'd3': 0.9967,
        'd5': 0.2,
        'a2': 0.4,
        'c1': 0.4,
        'c0': 4,
        'rc': 7,
        'rl': 0.05,
        'ver': 0.9,
        'Ker': 0.1,
        'ip3': 0.1,
        'ICs': [0.05, 0.99]
    }

    ## User-defined parameters
    pars = gu.varargin(pars, **kwargs)
    # Parameters must be floats
    for k, item in pars.iteritems():
        if isscalar(item):
            pars[k] = float(item)
        else:
            pars[k] = array(item, dtype=float)
    return pars
Exemple #5
0
def egchi_parameters(**kwargs):
    """
    Parameters for the extended G-ChI model that includes PKC, DAG and AA

    Maurizio De Pitta', INRIA Rhone-Alpes, November 27th, 2017.
    """

    pars_gchi = gchi_parameters(Kkc=0.6)
    del pars_gchi['zeta']
    del pars_gchi['T']
    del pars_gchi['pw']
    del pars_gchi['yb']
    pars = {
        'vkd': 0.5,
        'vk': 1.0,
        'OmegaKD': 2.5,
        'vd': 1.5,
        'Kdc': 0.3,
        'Kdd': 0.05,
        'OmegaD': 0.1,
        'ICs': [0.01, 0.05, 0.05, 0.99, 0.05, 0.0]  # [ago, I, C, h, D, P]
    }

    # Merge the two parameter dictionaries
    pars = gu.merge_dicts(pars_gchi, pars)
    ## User-defined parameters
    pars = gu.varargin(pars, **kwargs)
    # Parameters must be floats
    for k, item in pars.iteritems():
        if isscalar(item):
            pars[k] = float(item)
        else:
            pars[k] = array(item, dtype=float)
    return pars
Exemple #6
0
def gchi_parameters(**kwargs):
    """
    Parameters for the G-ChI model (DePitta' et al., JOBP 2009).
    
    Maurizio De Pitta', The University of Chicago, February 28th, 2015.
    """
    # TODO: should include here vbeta and leave only vbias in chi_parameters but this requires
    # implementing a bias in the astrocyte model -- right now we use vbeta for constant IP3 production
    # in the ChI model
    pars_chi = chi_parameters()
    pars = {
        'yrel': 0.02,
        'Op': 0.3,
        'OmegaP': 1.8,
        'zeta': 0,
        'Kkc': 0.5,
        'ICs': [0.01, 0.05, 0.05, 0.99],
        # Bias / Exogenous stimulation (default: no bias)
        'T': 0.,  # Period
        'pw': 0.,  # pulse-width
        'yb': 0.  # amplitude
    }
    # Merge the two parameter dictionaries
    pars = gu.merge_dicts(pars_chi, pars)
    ## User-defined parameters
    pars = gu.varargin(pars, **kwargs)
    # Parameters must be floats
    for k, item in pars.iteritems():
        if isscalar(item):
            pars[k] = float(item)
        else:
            pars[k] = array(item, dtype=float)
    return pars
Exemple #7
0
def model_bounds(model='lra', **kwargs):
    # Generate empty Ordered Dictionary for boundaries. The order of keys MUST mirror the order of variables 'x' in the
    # cost function
    bounds_dict = collections.OrderedDict()
    if model == 'lra':
        bounds_dict['d1'] = [0.1, 10.0, 'log']
        bounds_dict['d2'] = [0.1, 10.0, 'log']
        bounds_dict['d3'] = [0.1, 10.0, 'log']
        bounds_dict['d5'] = [0.1, 10.0, 'log']
        bounds_dict['a2'] = [0.1, 5.0, 'log']
    elif model == 'lra2':
        bounds_dict['d1'] = [0.1, 0.5, 'lin']
        bounds_dict['d2'] = [1.0, 4.5, 'lin']
        bounds_dict['d5'] = [0.05, 0.5, 'log']
        bounds_dict['a2'] = [0.1, 0.5, 'lin']
    elif model == 'lra_fit':
        bounds_dict['rc'] = [2.0, 20.0, 'lin']
        bounds_dict['ver'] = [2.0, 20.0, 'lin']
        bounds_dict['ip3'] = [0.05, 0.5, 'lin']
        bounds_dict['C0'] = [0.05, 0.5, 'lin']
        bounds_dict['h0'] = [0.0, 1.0, 'lin']
        bounds_dict['c0'] = [4.0, 12.0, 'lin']
    elif model == 'chi':
        bounds_dict['vbeta'] = [0.001, 5.0, 'log']
        bounds_dict['vdelta'] = [0.001, 0.5, 'log']
        bounds_dict['v3k'] = [0.1, 5.0, 'log']
        bounds_dict['r5p'] = [0.1, 1.0, 'lin']
        bounds_dict['C0'] = [0.05, 5.0, 'log']
        bounds_dict['h0'] = [0.0, 1.0, 'lin']
        bounds_dict['I0'] = [0.05, 5.0, 'log']

    # Customer defined bounds
    bounds_dict = gu.varargin(bounds_dict, **kwargs)

    # Treatment of lin/log scaling parameters
    for b in bounds_dict.values():
        if b[-1] == 'log': b[:2] = np.log10(b[:2])

    return bounds_dict
def gchidp_pars(**kwargs):
    pars = {
        'yrel': 0,
        'Op': 0.3,
        'OmegaP': 1.8,
        'vbias': 0,
        'vbeta': 3,
        'vdelta': 0.5,
        'kappad': 1,
        'Kdelta': 0.5,
        'v3k': 2,
        'Kd': 0.5,
        'K3': 1,
        'r5p': 0.1,
        'vkd': 0.28,
        'Kkc': 0.5,
        'OmegaKD': 0.33,
        'vk': 1.0,
        'vd': 1.0,
        'Kdc': 0.3,
        'Kdd': 0.005,
        'OmegaD': 0.1,
        'd1': 0.1,
        'd2': 2.1,
        'd3': 0.9967,
        'd5': 0.2,
        'a2': 0.4,
        'c1': 0.4,
        'c0': 4,
        'rc': 7,
        'rl': 0.05,
        'ver': 0.9,
        'KER': 0.1
    }
    # Custom parameters
    pars = gu.varargin(pars, **kwargs)
    return pars
def solver_opts(method='euler',**kwargs):
    """
    Define a standard dictionary to use in C/C++ simulators.
    
    Use:
    options = solver_opts(...)
    
    Input:
    - method    : {'euler'} | 'rk4' | 'gsl' | 'gsl_rk8pd' | 'gsl_msadams'
    - ndims     : {1} | Integer   Number of equations (i.e. dimension) of the system to integrate

    **kwargs:
        - t0        : initial instant of integration [s]
        - tfin      : final instant of integration [s]
        - transient : transient to drop from output result [s]
        - tbin      : time bin of solution [s]
        - dt        : step of integration [s]
        - solver    : string {"euler"} | "rk4"
    
    Output:
    - options   : dictionary of solver settings (keys are inputs).

    v1.3
    Added options for GSL solvers.
    Append 'solver' key at the end for all methods.
    Maurizio De Pitta', INRIA Rhone-Alpes, November 1st, 2017.

    """


    ## User-defined parameters
    if method in ['euler','rk4']:
        opts = {'t0'        : 0.0,
                'tfin'      : 20.0,
                'transient' : 0.0,
                'tbin'      : 1e-2,
                'dt'        : 1e-3
                }
    elif method in ['gsl', 'gsl_rk8pd', 'gsl_msadams']:
        opts = {'t0'     : 0.0,
                'tfin'   : 1.0,
                'dt'     : 1e-3,
                'atol'   : 1e-8,
                'rtol'   : 1e-6
                }
    else:
        # This is the case of solver 'None','none','steady_state'
        opts = {'nmax' : 1000, # Max number of iterations for the solver
                'atol' : 1e-10,# Absolute tolerance on error
                'rtol' : 0.0   # Relative tolerance on error (default: 0.0: not considered)
                }

    opts = gu.varargin(opts, **kwargs)
    for k,item in opts.iteritems():
        if (not isinstance(item, (basestring, bool)))&(not hasattr(item, '__len__'))&(item != None):
            opts[k] = float(item)

    if method in ['gsl','gsl_rk8pd']:
        opts['nstep'] = (opts['tfin']-opts['t0'])//opts['dt'] + 1 # +1 if when we start counting from zero

    if (not method) or (method in ['none','steady_state']):
        for p in ['nmax']:
            opts[p] = np.intc(opts[p])

    # Include solver specifier in the final dictionary
    opts['solver'] = method
    return opts
Exemple #10
0
def compute_thr(**kwargs):
    """
    Simulation routine to compute CICR threshold
    """

    # Load relevant data from fits
    po_data = svu.loaddata('../data/fit_po.pkl')[0]
    lr_data = svu.loaddata('../data/fit_lra.pkl')[0]
    chi_data = svu.loaddata('../data/chi_fit_0_bee.pkl')[0]

    # Control parameter set
    pars = models.gchi_parameters(d1=po_data[0],
                                  d2=po_data[1],
                                  d3=po_data[0],
                                  d5=po_data[2],
                                  a2=po_data[3],
                                  c0=10.0,
                                  c1=0.5,
                                  rl=0.1,
                                  Ker=0.1,
                                  rc=lr_data[0],
                                  ver=10.0,
                                  vbeta=0.8,
                                  vdelta=2 * chi_data[1],
                                  v3k=chi_data[3],
                                  r5p=chi_data[3])
    # Custom parameters
    pars = gu.varargin(pars, **kwargs)

    # Simulation duration
    t0 = 0.0
    tfin = 20.0

    # Generate model
    astro = models.Astrocyte(model='gchi',
                             d1=pars['d1'],
                             d2=pars['d2'],
                             d3=pars['d3'],
                             d5=pars['d5'],
                             a2=pars['a2'],
                             c0=pars['c0'],
                             c1=pars['c1'],
                             rl=pars['rl'],
                             Ker=pars['Ker'],
                             rc=pars['rc'],
                             ver=pars['ver'],
                             vbeta=pars['vbeta'],
                             vdelta=pars['vdelta'],
                             v3k=pars['v3k'],
                             r5p=pars['r5p'],
                             ICs=np.asarray([0.0, 0.05, 0.05, 0.9]))
    options = su.solver_opts(t0=t0,
                             tfin=30.,
                             dt=1e-4,
                             atol=1e-8,
                             rtol=1e-6,
                             method="gsl_msadams")
    # First run to seek resting state
    astro.integrate(algparams=options, normalized=False)
    # Update model with new ICs and add pulse train
    options = su.solver_opts(t0=t0,
                             tfin=tfin,
                             dt=1e-4,
                             atol=1e-8,
                             rtol=1e-6,
                             method="gsl_msadams")
    astro.ICs = np.r_[astro.sol['rec'][-1], astro.sol['ip3'][-1],
                      astro.sol['ca'][-1], astro.sol['h'][-1]]
    astro.pars['T'] = tfin
    astro.pars['pw'] = tfin
    yb = np.arange(0.5, 5.0, 0.05)
    # yb = np.arange(1.0, 3.0, 0.3)
    Nt = np.size(yb)
    # Allocate results
    ca_traces = [None] * Nt
    ip3_traces = [None] * Nt
    bias_traces = [None] * Nt
    for i in xrange(Nt):
        astro.pars['yb'] = yb[i]
        # Effective simulation
        astro.integrate(algparams=options, normalized=False)
        # Save results
        ca_traces[i] = astro.sol['ca']
        ip3_traces[i] = astro.sol['ip3']
        bias_traces[i] = astro.bias(twin=[t0, tfin], dt=0.01)

    traces = {
        'yb': yb,
        'ts': astro.sol['ts'],
        'ca': ca_traces,
        'ip3': ip3_traces,
        'bias': bias_traces
    }
    del astro
    gc.collect()
    return traces, pars
def spk_poisson(T=10, N=1, **kwargs):
    """
    Generate Poisson-distributed spike train.

    Use:
    tspk = spk_poisson(T=10,N=1,**kwargs)

    Input arguments:
    - T        : period [0,T] for the spike train
    - N        : number of neurons
    - **kwargs :
        - rate : average rate of Poisson spike [Hz]
        - trp  : refractory period [s]

    Output:
    - tspk     : spike train instants (sorted) with corresponding neuron

    v1.1
    Corrected minor bug in handling tau_r and rate array.
    Maurizio De Pitta', Basque Center of Applied Mathematics, August 19, 2018.

    v1.0
    Maurizio De Pitta', The University of Chicago, September 9th, 2014.
    """
    pars = {'rate': 100, 'trp': 2e-3}
    pars = gu.varargin(pars, **kwargs)
    # Check that T and rate are of the same size
    assert size(
        pars['rate']) == size(T), "Size of stimulus ending times (T) (" + str(
            size(T)) + ") does not match size of stimulus rates (" + str(
                size(pars['rate'])) + ")"
    # The following assures that rate and T are Numpy arrays
    if hasattr(pars['rate'], '__len__'):
        pars['rate'] = asarray(pars['rate'])
        T = asarray(T)
    else:
        pars['rate'] = asarray([pars['rate']])
        T = asarray([T])

    # Compute N_spikes : allows for some fluctuations in the number of spikes in order to avoid border effects
    N_spikes = int(max(
        sum(T) * (1. + random.random()) * amax(pars['rate']),
        2))  # Average number of spikes per trial >=2 due to thinning
    spk_train, indexes = empty(0), empty(0)
    for i in xrange(N):
        # For an homogeneous Poisson process the following works. For non-homogeneous processes then it will prepare it
        # for thinning
        isi_homog = -log(random.rand(N_spikes)) / amax(
            pars['rate'])  # Random ISIs at maximum rate
        if (pars['trp'] > 0) or (size(pars['rate']) > 1):
            tspk = cut_isi(
                thin_isi(isi_homog, pars['trp'], N_spikes, pars['rate'],
                         cumsum(T)), sum(T))
        else:
            tspk = cut_isi(isi_homog, sum(T))
        spk_train = concatenate((spk_train, tspk))
        indexes = concatenate((indexes, i * ones(tspk.size)))
    # Sort spikes
    si = spk_train.argsort()
    # Stack and sort
    spk_train = vstack((spk_train[si], indexes[si]))
    return spk_train