Esempio n. 1
0
 def __init__(self, N, spiketimes, clock=None, period=None, gather=False, sort=True):
     clock = guess_clock(clock)
     if gather: # assumes spike times are sorted
         spiketimes=self.gather(spiketimes,clock.dt)
         sort=False
     thresh = SpikeGeneratorThreshold(N, spiketimes, period=period, sort=sort)
     self.period = period
     NeuronGroup.__init__(self, N, model=LazyStateUpdater(), threshold=thresh, clock=clock)
Esempio n. 2
0
 def __init__(self, threshold=1 * mvolt, refractory=1 * msecond, state=0, clock=None):
     self.threshold = threshold # Threshold value
     self.state = state
     clock = guess_clock(clock)
     self.refractory = int(refractory / clock.dt)
     # this assumes that if the state stays over the threshold, and say
     # refractory=5ms the user wants spiking at 0ms 5ms 10ms 15ms etc.
     if is_approx_equal(self.refractory * clock.dt, refractory) and self.refractory > 0:
         self.refractory -= 1
Esempio n. 3
0
 def __init__(self, spiketimes, clock=None, period=None):
     """Pass spiketimes
     
     spiketimes is a list of lists, one list for each neuron
     in the group. Each sublist consists of the spike times.
     """
     clock = guess_clock(clock)
     thresh = MultipleSpikeGeneratorThreshold(spiketimes, period=period)
     NeuronGroup.__init__(self, len(spiketimes), model=LazyStateUpdater(), threshold=thresh, clock=clock)
Esempio n. 4
0
 def reinit(self, states=True):
     '''
     Resets the objects and clocks. If ``states=False`` it will not reinit
     the state variables.
     '''
     objs = self.groups + self.connections + self.operations
     if self.clock is not None:
         objs.append(self.clock)
     else:
         guess_clock(None).reinit()
     if hasattr(self, 'clocks'):
         objs.extend(self.clocks)
     for P in objs:
         if hasattr(P, 'reinit'):
             if isinstance(P, NeuronGroup):
                 try:
                     P.reinit(states=states)
                 except TypeError:
                     P.reinit()
             else:
                 P.reinit()
Esempio n. 5
0
 def set_clock(self):
     '''
     Sets the clock and checks that clocks of all groups are synchronized.
     '''
     if self.same_clocks():
         groups_and_operations=self.groups + self.operations
         if len(groups_and_operations)>0:
             self.clock = groups_and_operations[0].clock
         else:
             self.clock = guess_clock()
     else:
         raise TypeError, 'Clocks are not synchronized!' # other error type?
Esempio n. 6
0
 def __init__(self, baseupdater, nstate, mu, sigma, clock=None):
     '''
     baseupdater = source neuron StateUpdater
     nstate = index of synaptic state variable
     mu = mean synaptic input rate (per ms)
     sigma = s.d. of synaptic input per ms^{1/2}
     '''
     self.baseupdater = baseupdater
     self.nstate = nstate
     if clock == None:
         clock = guess_clock()
     if clock:
         # TODO: check units
         self.mu = mu * clock.dt
         self.sigma = sigma * clock.dt ** .5
     else:
         raise TypeError, "A time reference must be passed."
Esempio n. 7
0
 def __init__(self, baseupdater, nstate, mu, sigma, clock=None):
     '''
     baseupdater = source neuron StateUpdater
     nstate = index of synaptic state variable
     mu = mean synaptic input rate (per ms)
     sigma = s.d. of synaptic input per ms^{1/2}
     '''
     self.baseupdater = baseupdater
     self.nstate = nstate
     if clock == None:
         clock = guess_clock()
     if clock:
         # TODO: check units
         self.mu = mu * clock.dt
         self.sigma = sigma * clock.dt ** .5
     else:
         raise TypeError, "A time reference must be passed."
Esempio n. 8
0
 def __init__(self, M, B=None, clock=None):
     '''
     Initialize a linear model with dynamics dX/dt = M(X-B) or dX/dt = MX,
     where B is a column vector.
     TODO: more checks
     TODO: rest
     '''
     self._useaccel = get_global_preference('useweave_linear_diffeq')
     self._cpp_compiler = get_global_preference('weavecompiler')
     self._extra_compile_args = ['-O3']
     if self._cpp_compiler == 'gcc':
         self._extra_compile_args += get_global_preference(
             'gcc_options')  # ['-march=native', '-ffast-math']
     self._useB = False
     if clock == None:
         clock = guess_clock()
     if isinstance(M, ndarray):
         self.A = linalg.expm(M * clock.dt)
         self.B = B
     elif isinstance(M, Equations):
         try:
             M, self.B = get_linear_equations(M)
             self.A = linalg.expm(M * clock.dt)
             #self.A=array(self.A,single)
             if self.B is not None:
                 self._C = -dot(self.A, self.B) + self.B
                 #self._C=array(self._C,single)
                 self._useB = True
             else:
                 self._useB = False
         except LinAlgError:
             log_info('brian.stateupdater',
                      'Solving linear equations numerically')
             self.A, self._C = get_linear_equations_solution_numerically(
                 M, clock.dt)
             self.B = NotImplemented  # raises error on trying to use this
             self._useB = True
     # note the numpy dot command works faster if self.A has C ordering compared
     # to fortran ordering (although maybe this depends on which implementation
     # of BLAS you're using). The difference is only significant in small
     # calculations because making a copy of self.A is usually not serious, its
     # size is only the number of variables, not the number of neurons.
     self.A = array(self.A, order='C')
     if self._useB:
         self._C = array(self._C, order='C')
Esempio n. 9
0
 def __init__(self, M, B=None, clock=None):
     '''
     Initialize a linear model with dynamics dX/dt = M(X-B) or dX/dt = MX,
     where B is a column vector.
     TODO: more checks
     TODO: rest
     '''
     self._useaccel = get_global_preference('useweave_linear_diffeq')
     self._cpp_compiler = get_global_preference('weavecompiler')
     self._extra_compile_args = ['-O3']
     if self._cpp_compiler == 'gcc':
         self._extra_compile_args += get_global_preference('gcc_options') # ['-march=native', '-ffast-math']
     self._useB = False
     if clock == None:
         clock = guess_clock()
     if isinstance(M, ndarray):
         self.A = linalg.expm(M * clock.dt)
         self.B = B
     elif isinstance(M, Equations):
         try:
             M, self.B = get_linear_equations(M)
             self.A = linalg.expm(M * clock.dt)
             #self.A=array(self.A,single)
             if self.B is not None:
                 self._C = -dot(self.A, self.B) + self.B
                 #self._C=array(self._C,single)
                 self._useB = True
             else:
                 self._useB = False
         except LinAlgError:
             log_info('brian.stateupdater', 'Solving linear equations numerically')
             self.A, self._C = get_linear_equations_solution_numerically(M, clock.dt)
             self.B = NotImplemented # raises error on trying to use this
             self._useB = True
     # note the numpy dot command works faster if self.A has C ordering compared
     # to fortran ordering (although maybe this depends on which implementation
     # of BLAS you're using). The difference is only significant in small
     # calculations because making a copy of self.A is usually not serious, its
     # size is only the number of variables, not the number of neurons.
     self.A = array(self.A, order='C')
     if self._useB:
         self._C = array(self._C, order='C')
Esempio n. 10
0
 def __init__(self, t, n, sigma, clock=None):
     self.clock = guess_clock(clock)
     self.generate(t, n, sigma)
Esempio n. 11
0
 def __init__(self, func, numstates, clock=None):
     self.clock = guess_clock(clock)
     self.func = func
     self.numstates = numstates
Esempio n. 12
0
    def __init__(self, N, spiketimes, clock=None, period=None, 
                 sort=True, gather=None):
        clock = guess_clock(clock)
        self.N = N
        self.period = period
        if gather:
            log_warn('brian.SpikeGeneratorGroup', 'SpikeGeneratorGroup\'s gather keyword use is deprecated')
        fallback = False # fall back on old SpikeGeneratorThreshold or not
        if isinstance(spiketimes, list):
            # spiketimes is a list of (i,t)
            if len(spiketimes):
                idx, times = zip(*spiketimes)
            else:
                idx, times = [], []
            # the following try ... handles the case where spiketimes has index arrays
            # e.g spiketimes = [([0, 1], 0 * msecond), ([0, 1, 2], 2 * msecond)]
            # Notes:
            # - if there is always the same number of indices by array, its simple, it's just a matter of flattening
            # - if not, then it requires a for loop, and it's done in the except
            try:
                idx = array(idx, dtype = float)
                times = array(times, dtype = float)
                if idx.ndim > 1:
                    # simple case
                    times = tile(times.reshape((len(times), 1)), (idx.shape[1], 1)).flatten()
                    idx = idx.flatten()
            except ValueError:
                new_idx = []
                new_times = []
                for k, item in enumerate(idx):
                    if isinstance(item, list):
                        new_idx += item # append indices
                        new_times += [times[k]]*len(item)
                    else:
                        new_times += [times[k]]
                        new_idx += [item]
                idx = array(new_idx, dtype  = float)
                times = new_times
                times = array(times, dtype = float)
        elif isinstance(spiketimes, tuple):
            # spike times is a tuple with idx, times in arrays
            idx = spiketimes[0]
            times = spiketimes[1]
        elif isinstance(spiketimes, ndarray):
            # spiketimes is a ndarray, with first col is index and second time
            idx = spiketimes[:,0]
            times = spiketimes[:,1]
        else:
            log_warn('brian.SpikeGeneratorGroup', 'Using (slow) threshold because spiketimes is assumed to be a generator/iterator')
            # spiketimes is a callable object, so falling back on old SpikeGeneratorThreshold
            fallback = True

        if not fallback:
            thresh = FastSpikeGeneratorThreshold(N, idx, times, dt=clock.dt, period=period)
        else:
            thresh = SpikeGeneratorThreshold(N, spiketimes, period=period, sort=sort)
        
        if not hasattr(self, '_initialized'):
            NeuronGroup.__init__(self, N, model=LazyStateUpdater(), threshold=thresh, clock=clock)
            self._initialized = True
        else:
            self._threshold = thresh
Esempio n. 13
0
 def __init__(self, function, clock=None, when='end'):
     self.clock = guess_clock(clock)
     self.when = when
     self.function = function
     if hasattr(function, 'func_code'):
         self._has_arg = (self.function.func_code.co_argcount==1)
Esempio n. 14
0
 def __init__(self, func, numstates, clock=None):
     self.clock = guess_clock(clock)
     self.func = func
     self.numstates = numstates
Esempio n. 15
0
 def __init__(self, t, n, sigma, clock=None):
     self.clock = guess_clock(clock)
     self.generate(t, n, sigma)
Esempio n. 16
0
    def __init__(self,
                 N,
                 spiketimes,
                 clock=None,
                 period=None,
                 sort=True,
                 gather=None):
        clock = guess_clock(clock)
        self.N = N
        self.period = period
        if gather:
            log_warn(
                'brian.SpikeGeneratorGroup',
                'SpikeGeneratorGroup\'s gather keyword use is deprecated')
        fallback = False  # fall back on old SpikeGeneratorThreshold or not
        if isinstance(spiketimes, list):
            # spiketimes is a list of (i,t)
            if len(spiketimes):
                idx, times = zip(*spiketimes)
            else:
                idx, times = [], []
            # the following try ... handles the case where spiketimes has index arrays
            # e.g spiketimes = [([0, 1], 0 * msecond), ([0, 1, 2], 2 * msecond)]
            # Notes:
            # - if there is always the same number of indices by array, its simple, it's just a matter of flattening
            # - if not, then it requires a for loop, and it's done in the except
            try:
                idx = array(idx, dtype=float)
                times = array(times, dtype=float)
                if idx.ndim > 1:
                    # simple case
                    times = tile(times.reshape((len(times), 1)),
                                 (idx.shape[1], 1)).flatten()
                    idx = idx.flatten()
            except ValueError:
                new_idx = []
                new_times = []
                for k, item in enumerate(idx):
                    if isinstance(item, list):
                        new_idx += item  # append indices
                        new_times += [times[k]] * len(item)
                    else:
                        new_times += [times[k]]
                        new_idx += [item]
                idx = array(new_idx, dtype=float)
                times = new_times
                times = array(times, dtype=float)
        elif isinstance(spiketimes, tuple):
            # spike times is a tuple with idx, times in arrays
            idx = spiketimes[0]
            times = spiketimes[1]
        elif isinstance(spiketimes, ndarray):
            # spiketimes is a ndarray, with first col is index and second time
            idx = spiketimes[:, 0]
            times = spiketimes[:, 1]
        else:
            log_warn(
                'brian.SpikeGeneratorGroup',
                'Using (slow) threshold because spiketimes is assumed to be a generator/iterator'
            )
            # spiketimes is a callable object, so falling back on old SpikeGeneratorThreshold
            fallback = True

        if not fallback:
            thresh = FastSpikeGeneratorThreshold(N,
                                                 idx,
                                                 times,
                                                 dt=clock.dt,
                                                 period=period)
        else:
            thresh = SpikeGeneratorThreshold(N,
                                             spiketimes,
                                             period=period,
                                             sort=sort)

        if not hasattr(self, '_initialized'):
            NeuronGroup.__init__(self,
                                 N,
                                 model=LazyStateUpdater(),
                                 threshold=thresh,
                                 clock=clock)
            self._initialized = True
        else:
            self._threshold = thresh