Exemplo n.º 1
0
 def __init__(self, group, inputcode, language, level=0):
     self.namespace = namespace(inputcode, level=level+1)
     self.inputcode = inputcode
     self.language = language
     P = group
     ns = self.namespace
     self.inputcode = freeze_with_equations(self.inputcode, P._eqs, ns)
     statements = statements_from_codestring(self.inputcode, P._eqs,
                                             infer_definitions=True)
     block = Block(*statements)
     symbols = get_neuron_group_symbols(P, self.language,
                                        index='_neuron_index')
     if language.name=='python' or language.name=='c':
         symbols['_neuron_index'] = ArrayIndex('_neuron_index',
                                               '_spikes',
                                                self.language,
                                                array_len='_numspikes')
     if language.name=='gpu':
         symbols['_neuron_index'] = SliceIndex('_neuron_index',
                                               '0',
                                               '_num_neurons',
                                               self.language,
                                               all=True)
         _spiked_symbol = group._threshold._spiked_symbol
         symbols['_spiked'] = _spiked_symbol
         block = CIfBlock('_spiked', [block])
         self.namespace['_num_gpu_indices'] = len(P)
         self.namespace['t'] = 1.0 # dummy value
         self.namespace['_num_neurons'] = len(P)
     self.code = block.generate('reset', self.language, symbols,
                                namespace=ns)
     ns = self.code.namespace
     ns['dt'] = P.clock._dt
     log_info('brian.codegen2.CodeGenReset', 'CODE:\n'+self.code.code_str)
     log_info('brian.codegen2.CodeGenReset', 'KEYS:\n'+str(ns.keys()))
Exemplo n.º 2
0
 def __init__(self, inputcode, language, level=0):
     self._ns, unknowns = namespace(inputcode,
                                    level=level + 1,
                                    return_unknowns=True)
     self._inputcode = inputcode
     self._language = language
     self._prepared = False
Exemplo n.º 3
0
 def __init__(self, expr, level=0):
     self._namespace, unknowns = namespace(expr, level=level + 1, return_unknowns=True)
     self._vars = unknowns
     self._expr = expr
     self._code = compile(expr, "StringThreshold", "eval")
     class Replacer(object):
         def __init__(self, func, n):
             self.n = n
             self.func = func
         def __call__(self):
             return self.func(self.n)
     self._Replacer = Replacer
Exemplo n.º 4
0
 def __init__(self, expr, level=0):
     self._namespace, unknowns = namespace(expr, level=level + 1, return_unknowns=True)
     self._vars = unknowns
     self._expr = expr
     self._code = compile(expr, "StringThreshold", "eval")
     class Replacer(object):
         def __init__(self, func, n):
             self.n = n
             self.func = func
         def __call__(self):
             return self.func(self.n)
     self._Replacer = Replacer
Exemplo n.º 5
0
 def _interpret(self, value, synapses, level):
     '''
     Interprets value string in the context of the synaptic indexes synapses
     '''
     _namespace = namespace(value, level=level)
     code = compile(value, "StringAssignment", "eval")
     synapses = slice_to_array(synapses, N=len(self.synapses))
     _namespace['n'] = len(synapses)
     _namespace['i'] = self.synapses.presynaptic[synapses]
     _namespace['j'] = self.synapses.postsynaptic[synapses]
     for var in self.synapses.var_index:  # maybe synaptic variables should have higher priority
         if isinstance(var, str):
             _namespace[var] = self.synapses.state(var)[synapses]
     _namespace['rand'] = self._Replacer(np.random.rand, len(synapses))
     _namespace['randn'] = self._Replacer(np.random.randn, len(synapses))
     return eval(code, _namespace)
Exemplo n.º 6
0
 def _interpret(self, value, synapses, level):
     '''
     Interprets value string in the context of the synaptic indexes synapses
     '''
     _namespace = namespace(value, level=level)
     code = compile(value, "StringAssignment", "eval")
     synapses=slice_to_array(synapses,N=len(self.synapses))
     _namespace['n']=len(synapses)
     _namespace['i']=self.synapses.presynaptic[synapses]
     _namespace['j']=self.synapses.postsynaptic[synapses]
     for var in self.synapses.var_index: # maybe synaptic variables should have higher priority
         if isinstance(var,str):
             _namespace[var] = self.synapses.state(var)[synapses]
     _namespace['rand'] = self._Replacer(np.random.rand, len(synapses))
     _namespace['randn'] = self._Replacer(np.random.randn, len(synapses))
     return eval(code, _namespace)
Exemplo n.º 7
0
 def __init__(self, group, inputcode, language, level=0):
     self.namespace = namespace(inputcode, level=level + 1)
     self.inputcode = inputcode
     self.language = language
     P = group
     ns = self.namespace
     self.inputcode = freeze_with_equations(self.inputcode, P._eqs, ns)
     statements = statements_from_codestring(self.inputcode,
                                             P._eqs,
                                             infer_definitions=True)
     block = Block(*statements)
     symbols = get_neuron_group_symbols(P,
                                        self.language,
                                        index='_neuron_index')
     if language.name == 'python' or language.name == 'c':
         symbols['_neuron_index'] = ArrayIndex('_neuron_index',
                                               '_spikes',
                                               self.language,
                                               array_len='_numspikes')
     if language.name == 'gpu':
         symbols['_neuron_index'] = SliceIndex('_neuron_index',
                                               '0',
                                               '_num_neurons',
                                               self.language,
                                               all=True)
         _spiked_symbol = group._threshold._spiked_symbol
         symbols['_spiked'] = _spiked_symbol
         block = CIfBlock('_spiked', [block])
         self.namespace['_num_gpu_indices'] = len(P)
         self.namespace['t'] = 1.0  # dummy value
         self.namespace['_num_neurons'] = len(P)
     self.code = block.generate('reset',
                                self.language,
                                symbols,
                                namespace=ns)
     ns = self.code.namespace
     ns['dt'] = P.clock._dt
     log_info('brian.codegen2.CodeGenReset', 'CODE:\n' + self.code.code_str)
     log_info('brian.codegen2.CodeGenReset', 'KEYS:\n' + str(ns.keys()))
Exemplo n.º 8
0
def select_threshold(expr, eqs, level=0):
    '''
    Automatically selects the appropriate Threshold object from a string.
    
    Matches the following patterns:
    
    var_name > or >= const : Threshold
    var_name > or >= var_name : VariableThreshold
    others : StringThreshold
    '''
    global CThreshold, PythonThreshold
    use_codegen = (get_global_preference('usecodegen') and
                   get_global_preference('usecodegenthreshold'))
    use_weave = (get_global_preference('useweave') and
                 get_global_preference('usecodegenweave'))
    if use_codegen:
        if CThreshold is None:
            from brian.experimental.codegen.threshold import (CThreshold,
                                                              PythonThreshold)
        if use_weave:
            log_warn('brian.threshold', 'Using codegen CThreshold')
            return CThreshold(expr, level=level + 1)
        else:
            log_warn('brian.threshold', 'Using codegen PythonThreshold')
            return PythonThreshold(expr, level=level + 1)
    # plan:
    # - see if it matches A > B or A >= B, if not select StringThreshold
    # - check if A, B both match diffeq variable names, and if so
    #   select VariableThreshold
    # - check that A is a variable name, if not select StringThreshold
    # - extract all the identifiers from B, and if none of them are
    #   callable, assume it is a constant, try to eval it and then use
    #   Threshold. If not, or if eval fails, use StringThreshold.
    # This misses the case of e.g. V>10*mV*exp(1) because exp will be
    # callable, but in general a callable means that it could be
    # non-constant.
    expr = expr.strip()
    eqs.prepare()
    ns = namespace(expr, level=level + 1)
    s = re.search(r'^\s*(\w+)\s*>=?(.+)', expr)
    if not s:
        return StringThreshold(expr, level=level + 1)
    A = s.group(1)
    B = s.group(2).strip()
    if A not in eqs._diffeq_names:
        return StringThreshold(expr, level=level + 1)
    if B in eqs._diffeq_names:
        return VariableThreshold(B, A)
    try:
        vars = get_identifiers(B)
    except SyntaxError:
        return StringThreshold(expr, level=level + 1)
    all_vars = eqs._eq_names + eqs._diffeq_names + eqs._alias.keys() + ['t']
    for v in vars:
        if v not in ns or v in all_vars or callable(ns[v]):
            return StringThreshold(expr, level=level + 1)
    try:
        val = eval(B, ns)
    except:
        return StringThreshold(expr, level=level + 1)
    return Threshold(val, A)
 def __call__(self, code):
     ns = namespace(code, level=1)
     for k, v in ns.iteritems():
         if isinstance(v, Symbol):
             code = re.sub("\\b" + k + "\\b", str(v), code)
     self.outputcode.add(''.join(expr + ';\n' for expr in code.split('\n') if expr.strip()))
Exemplo n.º 10
0
    def __init__(self,
                 C,
                 eqs,
                 pre,
                 post,
                 wmin=0,
                 wmax=Inf,
                 level=0,
                 clock=None,
                 delay_pre=None,
                 delay_post=None):
        NetworkOperation.__init__(self, lambda: None, clock=clock)
        C.compress()
        # Convert to equations object
        if isinstance(eqs, Equations):
            eqs_obj = eqs
        else:
            eqs_obj = Equations(eqs, level=level + 1)
        # handle multi-line pre, post equations and multi-statement equations separated by ;
        if '\n' in pre:
            pre = flattened_docstring(pre)
        elif ';' in pre:
            pre = '\n'.join([line.strip() for line in pre.split(';')])
        if '\n' in post:
            post = flattened_docstring(post)
        elif ';' in post:
            post = '\n'.join([line.strip() for line in post.split(';')])

        # Check units
        eqs_obj.compile_functions()
        eqs_obj.check_units()

        # Get variable names
        vars = eqs_obj._diffeq_names
        # Find which ones are directly modified (e.g. regular expression matching; careful with comments)
        vars_pre = [var for var in vars if var in modified_variables(pre)]
        vars_post = [var for var in vars if var in modified_variables(post)]

        # additional dependencies are used to ensure that if there are multiple
        # pre/post separated equations they are grouped together as one
        additional_deps = [
            '__pre_deps=' + '+'.join(vars_pre),
            '__post_deps=' + '+'.join(vars_post)
        ]
        separated_equations = separate_equations(eqs_obj, additional_deps)
        if not len(separated_equations) == 2:
            print separated_equations
            raise ValueError(
                'Equations should separate into pre and postsynaptic variables.'
            )
        sep_pre, sep_post = separated_equations
        for v in vars_pre:
            if v in sep_post._diffeq_names:
                sep_pre, sep_post = sep_post, sep_pre
                break

        index_pre = [
            i for i in range(len(vars))
            if vars[i] in vars_pre or vars[i] in sep_pre._diffeq_names
        ]
        index_post = [
            i for i in range(len(vars))
            if vars[i] in vars_post or vars[i] in sep_post._diffeq_names
        ]

        vars_pre = array(vars)[index_pre].tolist()
        vars_post = array(vars)[index_post].tolist()

        # Check pre/post consistency
        shared_vars = set(vars_pre).intersection(vars_post)
        if shared_vars != set([]):
            raise Exception, str(
                list(shared_vars)) + " are both presynaptic and postsynaptic!"

        # Substitute equations/aliases into pre/post code
        def substitute_eqs(code):
            for name in sep_pre._eq_names[-1::-1] + sep_post._eq_names[
                    -1::-1]:  # reverse order, as in equations.py
                if name in sep_pre._eq_names:
                    expr = sep_pre._string[name]
                else:
                    expr = sep_post._string[name]
                code = re.sub("\\b" + name + "\\b", '(' + expr + ')', code)
            return code

        pre = substitute_eqs(pre)
        post = substitute_eqs(post)

        # Create namespaces for pre and post codes
        pre_namespace = namespace(pre, level=level + 1)
        post_namespace = namespace(post, level=level + 1)

        def splitcode(incode):
            num_perneuron = num_persynapse = 0
            reordering_warning = False
            incode_lines = [
                line.strip() for line in incode.split('\n') if line.strip()
            ]
            per_neuron_lines = []
            per_synapse_lines = []
            for line in incode_lines:
                if not line.strip(): continue
                m = re.search(
                    r'\bw\b\s*[^><=]?=',
                    line)  # lines of the form w = ..., w *= ..., etc.
                if m:
                    num_persynapse += 1
                    per_synapse_lines.append(line)
                else:
                    num_perneuron += 1
                    if num_persynapse != 0 and not reordering_warning:
                        log_warn(
                            'brian.experimental.cstdp',
                            'STDP operations are being re-ordered, results may be wrong.'
                        )
                        reordering_warning = True
                    per_neuron_lines.append(line)
            return per_neuron_lines, per_synapse_lines

        per_neuron_pre, per_synapse_pre = splitcode(pre)
        per_neuron_post, per_synapse_post = splitcode(post)

        all_vars = vars_pre + vars_post + ['w']

        per_neuron_pre = [
            c_single_statement(freeze(line, all_vars, pre_namespace))
            for line in per_neuron_pre
        ]
        per_neuron_post = [
            c_single_statement(freeze(line, all_vars, post_namespace))
            for line in per_neuron_post
        ]
        per_synapse_pre = [
            c_single_statement(freeze(line, all_vars, pre_namespace))
            for line in per_synapse_pre
        ]
        per_synapse_post = [
            c_single_statement(freeze(line, all_vars, post_namespace))
            for line in per_synapse_post
        ]

        per_neuron_pre = '\n'.join(per_neuron_pre)
        per_neuron_post = '\n'.join(per_neuron_post)
        per_synapse_pre = '\n'.join(per_synapse_pre)
        per_synapse_post = '\n'.join(per_synapse_post)

        # Neuron groups
        G_pre = NeuronGroup(len(C.source), model=sep_pre, clock=self.clock)
        G_post = NeuronGroup(len(C.target), model=sep_post, clock=self.clock)
        G_pre._S[:] = 0
        G_post._S[:] = 0
        self.pre_group = G_pre
        self.post_group = G_post
        var_group = {}
        for i, v in enumerate(vars_pre):
            var_group[v] = G_pre
        for i, v in enumerate(vars_post):
            var_group[v] = G_post
        self.var_group = var_group

        self.contained_objects += [G_pre, G_post]

        vars_pre_ind = {}
        for i, var in enumerate(vars_pre):
            vars_pre_ind[var] = i
        vars_post_ind = {}
        for i, var in enumerate(vars_post):
            vars_post_ind[var] = i

        prevars_dict = dict((k, G_pre.state(k)) for k in vars_pre)
        postvars_dict = dict((k, G_post.state(k)) for k in vars_post)

        clipcode = ''
        if isfinite(wmin):
            clipcode += 'if(w<%wmin%) w = %wmin%;\n'.replace(
                '%wmin%', repr(float(wmin)))
        if isfinite(wmax):
            clipcode += 'if(w>%wmax%) w = %wmax%;\n'.replace(
                '%wmax%', repr(float(wmax)))

        if not isinstance(C, DelayConnection):
            precode = iterate_over_spikes(
                '_j', '_spikes',
                (load_required_variables(
                    '_j', prevars_dict), transform_code(per_neuron_pre),
                 iterate_over_row(
                     '_k', 'w', C.W, '_j', (load_required_variables(
                         '_k', postvars_dict), transform_code(per_synapse_pre),
                                            ConnectionCode(clipcode)))))
            postcode = iterate_over_spikes(
                '_j', '_spikes',
                (load_required_variables(
                    '_j', postvars_dict), transform_code(per_neuron_post),
                 iterate_over_col('_i', 'w', C.W, '_j',
                                  (load_required_variables('_i', prevars_dict),
                                   transform_code(per_synapse_post),
                                   ConnectionCode(clipcode)))))
            log_debug('brian.experimental.c_stdp',
                      'CSTDP Pre code:\n' + str(precode))
            log_debug('brian.experimental.c_stdp',
                      'CSTDP Post code:\n' + str(postcode))
            connection_delay = C.delay * C.source.clock.dt
            if (delay_pre is None) and (
                    delay_post is None):  # same delays as the Connnection C
                delay_pre = connection_delay
                delay_post = 0 * ms
            elif delay_pre is None:
                delay_pre = connection_delay - delay_post
                if delay_pre < 0 * ms:
                    raise AttributeError, "Postsynaptic delay is too large"
            elif delay_post is None:
                delay_post = connection_delay - delay_pre
                if delay_post < 0 * ms:
                    raise AttributeError, "Postsynaptic delay is too large"
            # create forward and backward Connection objects or SpikeMonitor objects
            pre_updater = SpikeMonitor(C.source,
                                       function=precode,
                                       delay=delay_pre)
            post_updater = SpikeMonitor(C.target,
                                        function=postcode,
                                        delay=delay_post)
            updaters = [pre_updater, post_updater]
            self.contained_objects += [pre_updater, post_updater]
        else:
            if delay_pre is not None or delay_post is not None:
                raise ValueError(
                    "Must use delay_pre=delay_post=None for the moment.")
            max_delay = C._max_delay * C.target.clock.dt
            # Ensure that the source and target neuron spikes are kept for at least the
            # DelayConnection's maximum delay
            C.source.set_max_delay(max_delay)
            C.target.set_max_delay(max_delay)

            self.G_pre_monitors = {}
            self.G_post_monitors = {}
            self.G_pre_monitors.update(
                ((var,
                  RecentStateMonitor(G_pre,
                                     vars_pre_ind[var],
                                     duration=(C._max_delay + 1) *
                                     C.target.clock.dt,
                                     clock=G_pre.clock)) for var in vars_pre))
            self.G_post_monitors.update(
                ((var,
                  RecentStateMonitor(
                      G_post,
                      vars_post_ind[var],
                      duration=(C._max_delay + 1) * C.target.clock.dt,
                      clock=G_post.clock)) for var in vars_post))
            self.contained_objects += self.G_pre_monitors.values()
            self.contained_objects += self.G_post_monitors.values()

            prevars_dict_delayed = dict(
                (k, self.G_pre_monitors[k]) for k in prevars_dict.keys())
            postvars_dict_delayed = dict(
                (k, self.G_post_monitors[k]) for k in postvars_dict.keys())

            precode_immediate = iterate_over_spikes(
                '_j', '_spikes', (load_required_variables(
                    '_j', prevars_dict), transform_code(per_neuron_pre)))
            precode_delayed = iterate_over_spikes(
                '_j', '_spikes',
                iterate_over_row('_k',
                                 'w',
                                 C.W,
                                 '_j',
                                 extravars={'_delay': C.delayvec},
                                 code=(ConnectionCode(
                                     'double _t_past = _max_delay-_delay;',
                                     vars={'_max_delay': float(max_delay)}),
                                       load_required_variables_pastvalue(
                                           '_k', '_t_past',
                                           postvars_dict_delayed),
                                       transform_code(per_synapse_pre),
                                       ConnectionCode(clipcode))))
            postcode = iterate_over_spikes(
                '_j', '_spikes',
                (load_required_variables(
                    '_j', postvars_dict), transform_code(per_neuron_post),
                 iterate_over_col('_i',
                                  'w',
                                  C.W,
                                  '_j',
                                  extravars={'_delay': C.delayvec},
                                  code=(load_required_variables_pastvalue(
                                      '_i', '_delay', prevars_dict_delayed),
                                        transform_code(per_synapse_post),
                                        ConnectionCode(clipcode)))))
            log_debug('brian.experimental.c_stdp',
                      'CSTDP Pre code (immediate):\n' + str(precode_immediate))
            log_debug('brian.experimental.c_stdp',
                      'CSTDP Pre code (delayed):\n' + str(precode_delayed))
            log_debug('brian.experimental.c_stdp',
                      'CSTDP Post code:\n' + str(postcode))
            pre_updater_immediate = SpikeMonitor(C.source,
                                                 function=precode_immediate)
            pre_updater_delayed = SpikeMonitor(C.source,
                                               function=precode_delayed,
                                               delay=max_delay)
            post_updater = SpikeMonitor(C.target, function=postcode)
            updaters = [
                pre_updater_immediate, pre_updater_delayed, post_updater
            ]
            self.contained_objects += updaters
Exemplo n.º 11
0
    def __setitem__(self, key, value):
        '''
        Creates new synapses.
        Synapse indexes are created such that synapses with the same presynaptic neuron
        and delay have contiguous indexes.
        
        Caution:
        1) there is no deletion
        2) synapses are added, not replaced (e.g. S[1,2]=True;S[1,2]=True creates 2 synapses)
        
        TODO:
        * S[:,:]=array (boolean or int)
        '''
        if self._iscompressed:
            raise AttributeError,"Synapses cannot be added after they have been run"
        
        if not isinstance(key, tuple): # we should check that number of elements is 2 as well
            raise AttributeError,'Synapses behave as 2-D objects'
        pre,post=key # pre and post indexes (can be slices)
        
        '''
        Each of these sets of statements creates:
        * synapses_pre: a mapping from presynaptic neuron to synapse indexes
        * synapses_post: same
        * presynaptic: an array of presynaptic neuron indexes (synapse->pre)
        * postsynaptic: same
        '''
        pre_slice = self.presynaptic_indexes(pre)
        post_slice = self.postsynaptic_indexes(post)
        # Bound checks
        if pre_slice[-1]>=len(self.source):
            raise ValueError('Presynaptic index %d greater than number of '\
                             'presynaptic neurons (%d)'
                             % (pre_slice[-1], len(self.source)))
        if post_slice[-1]>=len(self.target):
            raise ValueError('Postsynaptic index %d greater than number of '\
                             'postsynaptic neurons (%d)'
                             % (post_slice[-1], len(self.target)))

        if isinstance(value,float):
            self.connect_random(pre,post,value)
            return
        elif isinstance(value, (int, bool)): # ex. S[1,7]=True
            # Simple case, either one or multiple synapses between different neurons
            if value is False:
                raise ValueError('Synapses cannot be deleted')
            elif value is True:
                nsynapses = 1
            else:
                nsynapses = value

            postsynaptic,presynaptic=np.meshgrid(post_slice,pre_slice) # synapse -> pre, synapse -> post
            # Flatten
            presynaptic.shape=(presynaptic.size,)
            postsynaptic.shape=(postsynaptic.size,)
            # pre,post -> synapse index, relative to last synapse
            # (that's a complex vectorised one!)
            synapses_pre=np.arange(len(presynaptic)).reshape((len(pre_slice),len(post_slice)))
            synapses_post=np.ones((len(post_slice),1),dtype=int)*np.arange(0,len(presynaptic),len(post_slice))+\
                          np.arange(len(post_slice)).reshape((len(post_slice),1))
            # Repeat
            if nsynapses>1:
                synapses_pre=np.hstack([synapses_pre+k*len(presynaptic) for k in range(nsynapses)]) # could be vectorised
                synapses_post=np.hstack([synapses_post+k*len(presynaptic) for k in range(nsynapses)]) # could be vectorised
                presynaptic=np.tile(presynaptic,nsynapses)
                postsynaptic=np.tile(postsynaptic,nsynapses)
            # Make sure the type is correct
            synapses_pre=np.array(synapses_pre,dtype=self.synapses_pre[0].dtype)
            synapses_post=np.array(synapses_post,dtype=self.synapses_post[0].dtype)
            # Turn into dictionaries
            synapses_pre=dict(zip(pre_slice,synapses_pre))
            synapses_post=dict(zip(post_slice,synapses_post))
        elif isinstance(value,str): # string code assignment
            # For subgroups, origin of i and j are shifted to subgroup origin
            if isinstance(pre,NeuronGroup):
                pre_shift=pre_slice[0]
            else:
                pre_shift=0
            if isinstance(post,NeuronGroup):
                post_shift=post_slice[0]
            else:
                post_shift=0
            code = re.sub(r'\b' + 'rand\(\)', 'rand(n)', value) # replacing rand()
            code = re.sub(r'\b' + 'randn\(\)', 'randn(n)', code) # replacing randn()
            _namespace = namespace(value, level=1)
            _namespace.update({'j' : post_slice-post_shift,
                               'n' : len(post_slice),
                               'rand': np.random.rand,
                               'randn': np.random.randn})
#            try: # Vectorise over all indexes: not faster! 
#                post,pre=np.meshgrid(post_slice-post_shift,pre_slice-pre_shift)
#                pre=pre.flatten()
#                post=post.flatten()
#                _namespace['i']=array(pre,dtype=self.presynaptic.dtype)
#                _namespace['j']=array(post,dtype=self.postsynaptic.dtype)
#                _namespace['n']=len(post)
#                result = eval(code, _namespace) # mask on synapses
#                if result.dtype==float: # random number generation
#                    result=rand(len(post))<result
#                indexes=result.nonzero()[0]
#                presynaptic=pre[indexes]
#                postsynaptic=post[indexes]
#                dtype=self.synapses_pre[0].dtype
#                synapses_pre={}
#                nsynapses=0
#                for i in pre_slice:
#                    n=sum(result[i*len(post_slice):(i+1)*len(post_slice)])
#                    synapses_pre[i]=array(nsynapses+np.arange(n),dtype=dtype)
#                    nsynapses+=n
#            except MemoryError: # If not possible, vectorise over postsynaptic indexes
#                log_info("synapses","Construction of synapses cannot be fully vectorised (too big)")
            #del pre
            #del post
            #_namespace['i']=None
            #_namespace['j']=post_slice-post_shift
            #_namespace['n']=len(post_slice)
            synapses_pre={}
            nsynapses=0
            presynaptic,postsynaptic=[],[]
            for i in pre_slice:
                _namespace['i']=i-pre_shift # maybe an array rather than a scalar?
                result = eval(code, _namespace) # mask on synapses
                if result.dtype==float: # random number generation
                    result=rand(len(post_slice))<result
                indexes=result.nonzero()[0]
                n=len(indexes)
                synapses_pre[i]=np.array(nsynapses+np.arange(n),dtype=self.synapses_pre[0].dtype)
                presynaptic.append(i*np.ones(n,dtype=int))
                postsynaptic.append(post_slice[indexes])
                nsynapses+=n
                
            # Make sure the type is correct
            presynaptic=np.array(np.hstack(presynaptic),dtype=self.presynaptic.dtype)
            postsynaptic=np.array(np.hstack(postsynaptic),dtype=self.postsynaptic.dtype)
            synapses_post=None
        elif isinstance(value, np.ndarray):
            raise NotImplementedError
            nsynapses = np.array(value, dtype = int) 
            
        # Now create the synapses
        self.create_synapses(presynaptic,postsynaptic,synapses_pre,synapses_post)
Exemplo n.º 12
0
    def generate_code(self,code,level,direct=False,code_namespace=None):
        '''
        Generates pre and post code.
        
        ``code''
            The code as a string.
            
        ``level''
            The namespace level in which the code is executed.
        
        ``direct=False''
            If True, the code is generated assuming that
            postsynaptic variables are not modified. This makes the
            code faster.
            
        ``code_namespace''
            Additional namespace (highest priority)
        
        TODO:
        * include static variables (substitution)
        * have a list of variable names
        '''
        # Handle multi-line pre, post equations and multi-statement equations separated by ;
        # (this should probably be factored)
        if '\n' in code:
            code = flattened_docstring(code)
        elif ';' in code:
            code = '\n'.join([line.strip() for line in code.split(';')])
        
        # Create namespaces
        _namespace = namespace(code, level = level + 1)
        if code_namespace is not None:
            _namespace.update(code_namespace)
        _namespace['target'] = self.target # maybe we could save one indirection here
        _namespace['unique'] = np.unique
        _namespace['nonzero'] = np.nonzero
        _namespace['empty'] = np.empty
        _namespace['logical_not'] = np.logical_not
        _namespace['not_equal'] = np.not_equal
        _namespace['take'] = np.take
        _namespace['extract'] = np.extract
        _namespace['add'] = np.add
        _namespace['hstack'] = np.hstack

        code = re.sub(r'\b' + 'rand\(\)', 'rand(n)', code)
        code = re.sub(r'\b' + 'randn\(\)', 'randn(n)', code)

        # Generate the code
        def update_code(code, indices, postinds):
            res = code
            # given the synapse indices, write the update code,
            # this is here because in the code we generate we need to write this twice (because of the multiple presyn spikes for the same postsyn neuron problem)
                       
            # Replace synaptic variables by their value
            for var in self.var_index: # static variables are not included here
                if isinstance(var, str):
                    res = re.sub(r'\b' + var + r'\b', var + '['+indices+']', res) # synaptic variable, indexed by the synapse number
 
            # Replace postsynaptic variables by their value
            for postsyn_var in self.target.var_index: # static variables are not included here
                if isinstance(postsyn_var, str):
                    #res = re.sub(r'\b' + postsyn_var + r'_post\b', 'target.' + postsyn_var + '['+postinds+']', res)# postsyn variable, indexed by post syn neuron numbers
                    #res = re.sub(r'\b' + postsyn_var + r'\b', 'target.' + postsyn_var + '['+postinds+']', res)# postsyn variable, indexed by post syn neuron numbers
                    res = re.sub(r'\b' + postsyn_var + r'_post\b', '_target_' + postsyn_var + '['+postinds+']', res)# postsyn variable, indexed by post syn neuron numbers
                    res = re.sub(r'\b' + postsyn_var + r'\b', '_target_' + postsyn_var + '['+postinds+']', res)# postsyn variable, indexed by post syn neuron numbers
                    _namespace['_target_' + postsyn_var] = self.target.state_(postsyn_var)
            
            # Replace presynaptic variables by their value
            for presyn_var in self.source.var_index: # static variables are not included here
                if isinstance(presyn_var, str):
                    #res = re.sub(r'\b' + presyn_var + r'_pre\b', 'source.' + presyn_var + '[_pre['+indices+']]', res)# postsyn variable, indexed by post syn neuron numbers
                    res = re.sub(r'\b' + presyn_var + r'_pre\b', '_source_' + presyn_var + '[_pre['+indices+']]', res)# postsyn variable, indexed by post syn neuron numbers
                    _namespace['_source_' + presyn_var] = self.source.state_(presyn_var)
 
            # Replace n by number of synapses being updated
            res = re.sub(r'\bn\b','len('+indices+')', res)
 
            return res
 
        if direct: # direct update code, not caring about multiple accesses to postsynaptic variables
            code_str = '_post_neurons = _post[_synapses]\n'+update_code(code, '_synapses', '_post_neurons') + "\n"            
        else:
            algo = 3
            if algo==0:
                ## Old version using numpy's unique()
                code_str = "_post_neurons = _post[_synapses]\n" # not necessary to do a copy because _synapses is not a slice
                code_str += "_u, _i = unique(_post_neurons, return_index = True)\n"
                #code_str += update_code(code, '_synapses[_i]', '_u') + "\n"
                code_str += update_code(code, '_synapses[_i]', '_post[_synapses[_i]]') + "\n"
                code_str += "if len(_u) < len(_post_neurons):\n"
                code_str += "    _post_neurons[_i] = -1\n"
                code_str += "    while (len(_u) < len(_post_neurons)) & (_post_neurons>-1).any():\n" # !! the any() is time consuming (len(u)>=1??)
                #code_str += "    while (len(_u) < len(_post_neurons)) & (len(_u)>1):\n" # !! the any() is time consuming (len(u)>=1??)
                code_str += "        _u, _i = unique(_post_neurons, return_index = True)\n"
                code_str += indent(update_code(code, '_synapses[_i[1:]]', '_post[_synapses[_i[1:]]]'),2) + "\n"
                code_str += "        _post_neurons[_i[1:]] = -1 \n"
            elif algo==1:
                code_str = "_post_neurons = _post[_synapses]\n" # not necessary to do a copy because _synapses is not a slice
                code_str += "_perm = _post_neurons.argsort()\n"
                code_str += "_aux = _post_neurons[_perm]\n"
                code_str += "_flag = empty(len(_aux) + 1, dtype = bool)\n"
                code_str += "_flag[0] = _flag[-1] = True\n"
                code_str += "not_equal(_aux[1:], _aux[:-1], _flag[1:-1])\n"
                code_str += "_F = _flag.nonzero()[0][:-1]\n"
                code_str += "logical_not(_flag, _flag)\n"
                code_str += "while len(_F):\n"
                code_str += "    _u = _aux[_F]\n"
                code_str += "    _i = _perm[_F]\n"
                code_str += indent(update_code(code, '_synapses[_i]', '_u'), 1) + "\n"
                code_str += "    _F += 1\n"
                code_str += "    _F = _F[_flag[_F]]\n"
            elif algo==2:
                code_str = '''
                _post_neurons = _post.data.take(_synapses)
                _perm = _post_neurons.argsort()
                _aux = _post_neurons.take(_perm)
                _flag = empty(len(_aux)+1, dtype=bool)
                _flag[0] = _flag[-1] = 1
                not_equal(_aux[1:], _aux[:-1], _flag[1:-1])
                if 0:#_flag.sum()==len(_aux)+1:
                %(code1)s
                else:
                    _F = _flag.nonzero()[0][:-1]
                    logical_not(_flag, _flag)
                    while len(_F):
                        _u = _aux.take(_F)
                        _i = _perm.take(_F)
                %(code2)s
                        _F += 1
                        _F = extract(_flag.take(_F), _F)
                '''
                code_str = flattened_docstring(code_str) % {'code1': indent(update_code(code, '_synapses','_post_neurons'), 1),
                                                            'code2': indent(update_code(code, '_synapses[_i]', '_u'), 2)}
            elif algo==3:
                code_str = '''
                _post_neurons = _post.data.take(_synapses)
                _perm = _post_neurons.argsort()
                _aux = _post_neurons.take(_perm)
                _flag = empty(len(_aux)+1, dtype=bool)
                _flag[0] = _flag[-1] = 1
                not_equal(_aux[1:], _aux[:-1], _flag[1:-1])
                _F = _flag.nonzero()[0][:-1]
                logical_not(_flag, _flag)
                while len(_F):
                    _u = _aux.take(_F)
                    _i = _perm.take(_F)
                %(code)s
                    _F += 1
                    _F = extract(_flag.take(_F), _F)
                '''
                code_str = flattened_docstring(code_str) % {'code': indent(update_code(code, '_synapses[_i]', '_u'), 1)}
            elif algo==4:
                code_str = '''
                _post_neurons = _post[_synapses]
                _perm = _post_neurons.argsort()
                _aux = _post_neurons[_perm]
                _flag = empty(len(_aux)+1, dtype=bool)
                _flag[0] = _flag[-1] = 1
                not_equal(_aux[1:], _aux[:-1], _flag[1:-1])
                _F = _flag.nonzero()[0][:-1]
                logical_not(_flag, _flag)
                while len(_F):
                    _u = _aux[_F]
                    _i = _perm[_F]
                %(code)s
                    _F += 1
                    _F = _F[_flag[_F]]
                '''
                code_str = flattened_docstring(code_str) % {'code': indent(update_code(code, '_synapses[_i]', '_u'), 1)}
#        print code_str
            
        log_debug('brian.synapses', '\nCODE:\n'+code_str)
        
        # Compile
        compiled_code = compile(code_str, "Synaptic code", "exec")
        
        _namespace['_original_code_string'] = code_str
        
        return compiled_code,_namespace
Exemplo n.º 13
0
 def __init__(self, group, inputcode, language, level=0):
     inputcode = inputcode.strip()
     self.namespace = namespace(inputcode, level=level+1)
     self.inputcode = inputcode
     self.language = language
     self.prepared = False
     P = group
     ns = self.namespace
     self.inputcode = freeze_with_equations(self.inputcode, P._eqs, ns)
     block = make_threshold_block(P, self.inputcode, self.language)
     symbols = get_neuron_group_symbols(P, self.language)
     symbols['_neuron_index'] = SliceIndex('_neuron_index',
                                           '0',
                                           '_num_neurons',
                                           self.language,
                                           all=True)
     if self.language.name=='c':
         symbols['_numspikes'] = NumSpikesSymbol('_numspikes',
                                                 self.language)
     if self.language.name=='gpu':
         _arr_spiked_bool = zeros(len(P)+1, dtype=int32)
         symbols['_spiked'] = ArraySymbol(_arr_spiked_bool,
                                          '_spiked',
                                          self.language,
                                          index='_neuron_index',
                                          array_name='_arr_spiked_bool',
                                          )
         self._spiked_symbol = symbols['_spiked']
         self._arr_spiked_bool = _arr_spiked_bool
     code = self.code = block.generate('threshold', self.language,
                                       symbols)
     log_info('brian.codegen2.CodeGenThreshold', 'CODE:\n'+self.code.code_str)
     log_info('brian.codegen2.CodeGenThreshold', 'KEYS:\n'+str(ns.keys()))
     ns = self.code.namespace
     ns['t'] = 1.0 # dummy value
     ns['dt'] = P.clock._dt
     ns['_num_neurons'] = len(P)
     if self.language.name=='python':
         def threshold_func(P):
             code()
             return ns['_spikes_bool'].nonzero()[0]
     elif self.language.name=='c':
         ns['_spikes'] = zeros(len(P), dtype=int)
         def threshold_func(P):
             code()
             return ns['_spikes'][:ns['_arr__numspikes'][0]]
     elif self.language.name=='gpu':
         ns['_arr_spiked_bool'] = _arr_spiked_bool
         ns['_num_gpu_indices'] = len(P)
         # TODO: this threshold func should do nothing on GPU unless
         # we want to force sync, or alternatively we can do a
         # compaction on the GPU and then return that
         compactor = GPUCompactor(len(P), index_dtype=int32)
         device_syms = code.gpu_man.mem_man.device
         def threshold_func(P):
             code()
             return compactor(device_syms['_arr_spiked_bool'])
             #if not language.force_sync:
             #    code.gpu_man.copy_to_host('_arr_spiked_bool')
             #return ns['_arr_spiked_bool'].nonzero()[0]
     self.threshold_func = threshold_func
Exemplo n.º 14
0
def select_threshold(expr, eqs, level=0):
    '''
    Automatically selects the appropriate Threshold object from a string.
    
    Matches the following patterns:
    
    var_name > or >= const : Threshold
    var_name > or >= var_name : VariableThreshold
    others : StringThreshold
    '''
    global CThreshold, PythonThreshold
    use_codegen = (get_global_preference('usecodegen') and
                   get_global_preference('usecodegenthreshold'))
    use_weave = (get_global_preference('useweave') and
                 get_global_preference('usecodegenweave'))
    if use_codegen:
        if CThreshold is None:
            from brian.experimental.codegen.threshold import (CThreshold,
                                                              PythonThreshold)
        if use_weave:
            log_warn('brian.threshold', 'Using codegen CThreshold')
            return CThreshold(expr, level=level + 1)
        else:
            log_warn('brian.threshold', 'Using codegen PythonThreshold')
            return PythonThreshold(expr, level=level + 1)
    # plan:
    # - see if it matches A > B or A >= B, if not select StringThreshold
    # - check if A, B both match diffeq variable names, and if so
    #   select VariableThreshold
    # - check that A is a variable name, if not select StringThreshold
    # - extract all the identifiers from B, and if none of them are
    #   callable, assume it is a constant, try to eval it and then use
    #   Threshold. If not, or if eval fails, use StringThreshold.
    # This misses the case of e.g. V>10*mV*exp(1) because exp will be
    # callable, but in general a callable means that it could be
    # non-constant.
    expr = expr.strip()
    eqs.prepare()
    ns = namespace(expr, level=level + 1)
    s = re.search(r'^\s*(\w+)\s*>=?(.+)', expr)
    if not s:
        return StringThreshold(expr, level=level + 1)
    A = s.group(1)
    B = s.group(2).strip()
    if A not in eqs._diffeq_names:
        return StringThreshold(expr, level=level + 1)
    if B in eqs._diffeq_names:
        return VariableThreshold(B, A)
    try:
        vars = get_identifiers(B)
    except SyntaxError:
        return StringThreshold(expr, level=level + 1)
    all_vars = eqs._eq_names + eqs._diffeq_names + eqs._alias.keys() + ['t']
    for v in vars:
        if v not in ns or v in all_vars or callable(ns[v]):
            return StringThreshold(expr, level=level + 1)
    try:
        val = eval(B, ns)
    except:
        return StringThreshold(expr, level=level + 1)
    return Threshold(val, A)
Exemplo n.º 15
0
 def __init__(self, inputcode, language, level=0):
     self._ns, unknowns = namespace(inputcode, level=level+1, return_unknowns=True)
     self._inputcode = inputcode
     self._language = language
     self._prepared = False
Exemplo n.º 16
0
    def __init__(self, C, eqs, pre, post, wmin=0, wmax=Inf, level=0,
                 clock=None, delay_pre=None, delay_post=None):
        NetworkOperation.__init__(self, lambda:None, clock=clock)
        C.compress()
        # Convert to equations object
        if isinstance(eqs, Equations):
            eqs_obj = eqs
        else:
            eqs_obj = Equations(eqs, level=level + 1)
        # handle multi-line pre, post equations and multi-statement equations separated by ;
        if '\n' in pre:
            pre = flattened_docstring(pre)
        elif ';' in pre:
            pre = '\n'.join([line.strip() for line in pre.split(';')])
        if '\n' in post:
            post = flattened_docstring(post)
        elif ';' in post:
            post = '\n'.join([line.strip() for line in post.split(';')])

        # Check units
        eqs_obj.compile_functions()
        eqs_obj.check_units()

        # Get variable names
        vars = eqs_obj._diffeq_names
        # Find which ones are directly modified (e.g. regular expression matching; careful with comments)
        vars_pre = [var for var in vars if var in modified_variables(pre)]
        vars_post = [var for var in vars if var in modified_variables(post)]

        # additional dependencies are used to ensure that if there are multiple
        # pre/post separated equations they are grouped together as one
        additional_deps = ['__pre_deps='+'+'.join(vars_pre),
                           '__post_deps='+'+'.join(vars_post)]
        separated_equations = separate_equations(eqs_obj, additional_deps)
        if not len(separated_equations) == 2:
            print separated_equations
            raise ValueError('Equations should separate into pre and postsynaptic variables.')
        sep_pre, sep_post = separated_equations
        for v in vars_pre:
            if v in sep_post._diffeq_names:
                sep_pre, sep_post = sep_post, sep_pre
                break

        index_pre = [i for i in range(len(vars)) if vars[i] in vars_pre or vars[i] in sep_pre._diffeq_names]
        index_post = [i for i in range(len(vars)) if vars[i] in vars_post or vars[i] in sep_post._diffeq_names]

        vars_pre = array(vars)[index_pre].tolist()
        vars_post = array(vars)[index_post].tolist()

        # Check pre/post consistency
        shared_vars = set(vars_pre).intersection(vars_post)
        if shared_vars != set([]):
            raise Exception, str(list(shared_vars)) + " are both presynaptic and postsynaptic!"

        # Substitute equations/aliases into pre/post code
        def substitute_eqs(code):
            for name in sep_pre._eq_names[-1::-1]+sep_post._eq_names[-1::-1]: # reverse order, as in equations.py
                if name in sep_pre._eq_names:
                    expr = sep_pre._string[name]
                else:
                    expr = sep_post._string[name]
                code = re.sub("\\b" + name + "\\b", '(' + expr + ')', code)
            return code
        pre = substitute_eqs(pre)
        post = substitute_eqs(post)

        # Create namespaces for pre and post codes
        pre_namespace = namespace(pre, level=level + 1)
        post_namespace = namespace(post, level=level + 1)

        def splitcode(incode):
            num_perneuron = num_persynapse = 0
            reordering_warning = False
            incode_lines = [line.strip() for line in incode.split('\n') if line.strip()]
            per_neuron_lines = []
            per_synapse_lines = []
            for line in incode_lines:
                if not line.strip(): continue
                m = re.search(r'\bw\b\s*[^><=]?=', line) # lines of the form w = ..., w *= ..., etc.
                if m:
                    num_persynapse += 1
                    per_synapse_lines.append(line)
                else:
                    num_perneuron += 1
                    if num_persynapse!=0 and not reordering_warning:
                        log_warn('brian.experimental.cstdp', 'STDP operations are being re-ordered, results may be wrong.')
                        reordering_warning = True
                    per_neuron_lines.append(line)
            return per_neuron_lines, per_synapse_lines

        per_neuron_pre, per_synapse_pre = splitcode(pre)
        per_neuron_post, per_synapse_post = splitcode(post)

        all_vars = vars_pre + vars_post + ['w']        

        per_neuron_pre = [c_single_statement(freeze(line, all_vars, pre_namespace)) for line in per_neuron_pre]
        per_neuron_post = [c_single_statement(freeze(line, all_vars, post_namespace)) for line in per_neuron_post]
        per_synapse_pre = [c_single_statement(freeze(line, all_vars, pre_namespace)) for line in per_synapse_pre]
        per_synapse_post = [c_single_statement(freeze(line, all_vars, post_namespace)) for line in per_synapse_post]

        per_neuron_pre = '\n'.join(per_neuron_pre)
        per_neuron_post = '\n'.join(per_neuron_post)
        per_synapse_pre = '\n'.join(per_synapse_pre)
        per_synapse_post = '\n'.join(per_synapse_post)

        # Neuron groups
        G_pre = NeuronGroup(len(C.source), model=sep_pre, clock=self.clock)
        G_post = NeuronGroup(len(C.target), model=sep_post, clock=self.clock)
        G_pre._S[:] = 0
        G_post._S[:] = 0
        self.pre_group = G_pre
        self.post_group = G_post
        var_group = {}
        for i, v in enumerate(vars_pre):
            var_group[v] = G_pre
        for i, v in enumerate(vars_post):
            var_group[v] = G_post
        self.var_group = var_group

        self.contained_objects += [G_pre, G_post]

        vars_pre_ind = {}
        for i, var in enumerate(vars_pre):
            vars_pre_ind[var] = i
        vars_post_ind = {}
        for i, var in enumerate(vars_post):
            vars_post_ind[var] = i

        prevars_dict = dict((k, G_pre.state(k)) for k in vars_pre)
        postvars_dict = dict((k, G_post.state(k)) for k in vars_post)

        clipcode = ''
        if isfinite(wmin):
            clipcode += 'if(w<%wmin%) w = %wmin%;\n'.replace('%wmin%', repr(float(wmin)))
        if isfinite(wmax):
            clipcode += 'if(w>%wmax%) w = %wmax%;\n'.replace('%wmax%', repr(float(wmax)))

        if not isinstance(C, DelayConnection):
            precode = iterate_over_spikes('_j', '_spikes',
                        (load_required_variables('_j', prevars_dict),
                         transform_code(per_neuron_pre),
                         iterate_over_row('_k', 'w', C.W, '_j',
                            (load_required_variables('_k', postvars_dict),
                             transform_code(per_synapse_pre),
                             ConnectionCode(clipcode)))))
            postcode = iterate_over_spikes('_j', '_spikes',
                        (load_required_variables('_j', postvars_dict),
                         transform_code(per_neuron_post),
                         iterate_over_col('_i', 'w', C.W, '_j',
                            (load_required_variables('_i', prevars_dict),
                             transform_code(per_synapse_post),
                             ConnectionCode(clipcode)))))
            log_debug('brian.experimental.c_stdp', 'CSTDP Pre code:\n' + str(precode))
            log_debug('brian.experimental.c_stdp', 'CSTDP Post code:\n' + str(postcode))
            connection_delay = C.delay * C.source.clock.dt
            if (delay_pre is None) and (delay_post is None): # same delays as the Connnection C
                delay_pre = connection_delay
                delay_post = 0 * ms
            elif delay_pre is None:
                delay_pre = connection_delay - delay_post
                if delay_pre < 0 * ms: raise AttributeError, "Postsynaptic delay is too large"
            elif delay_post is None:
                delay_post = connection_delay - delay_pre
                if delay_post < 0 * ms: raise AttributeError, "Postsynaptic delay is too large"
            # create forward and backward Connection objects or SpikeMonitor objects
            pre_updater = SpikeMonitor(C.source, function=precode, delay=delay_pre)
            post_updater = SpikeMonitor(C.target, function=postcode, delay=delay_post)
            updaters = [pre_updater, post_updater]
            self.contained_objects += [pre_updater, post_updater]
        else:
            if delay_pre is not None or delay_post is not None:
                raise ValueError("Must use delay_pre=delay_post=None for the moment.")
            max_delay = C._max_delay * C.target.clock.dt
            # Ensure that the source and target neuron spikes are kept for at least the
            # DelayConnection's maximum delay
            C.source.set_max_delay(max_delay)
            C.target.set_max_delay(max_delay)

            self.G_pre_monitors = {}
            self.G_post_monitors = {}
            self.G_pre_monitors.update(((var, RecentStateMonitor(G_pre, vars_pre_ind[var], duration=(C._max_delay + 1) * C.target.clock.dt, clock=G_pre.clock)) for var in vars_pre))
            self.G_post_monitors.update(((var, RecentStateMonitor(G_post, vars_post_ind[var], duration=(C._max_delay + 1) * C.target.clock.dt, clock=G_post.clock)) for var in vars_post))
            self.contained_objects += self.G_pre_monitors.values()
            self.contained_objects += self.G_post_monitors.values()

            prevars_dict_delayed = dict((k, self.G_pre_monitors[k]) for k in prevars_dict.keys())
            postvars_dict_delayed = dict((k, self.G_post_monitors[k]) for k in postvars_dict.keys())

            precode_immediate = iterate_over_spikes('_j', '_spikes',
                                    (load_required_variables('_j', prevars_dict),
                                     transform_code(per_neuron_pre)))
            precode_delayed = iterate_over_spikes('_j', '_spikes',
                                     iterate_over_row('_k', 'w', C.W, '_j', extravars={'_delay':C.delayvec},
                                        code=(
                                         ConnectionCode('double _t_past = _max_delay-_delay;', vars={'_max_delay':float(max_delay)}),
                                         load_required_variables_pastvalue('_k', '_t_past', postvars_dict_delayed),
                                         transform_code(per_synapse_pre),
                                         ConnectionCode(clipcode))))
            postcode = iterate_over_spikes('_j', '_spikes',
                            (load_required_variables('_j', postvars_dict),
                             transform_code(per_neuron_post),
                             iterate_over_col('_i', 'w', C.W, '_j', extravars={'_delay':C.delayvec},
                                code=(
                                 load_required_variables_pastvalue('_i', '_delay', prevars_dict_delayed),
                                 transform_code(per_synapse_post),
                                 ConnectionCode(clipcode)))))
            log_debug('brian.experimental.c_stdp', 'CSTDP Pre code (immediate):\n' + str(precode_immediate))
            log_debug('brian.experimental.c_stdp', 'CSTDP Pre code (delayed):\n' + str(precode_delayed))
            log_debug('brian.experimental.c_stdp', 'CSTDP Post code:\n' + str(postcode))
            pre_updater_immediate = SpikeMonitor(C.source, function=precode_immediate)
            pre_updater_delayed = SpikeMonitor(C.source, function=precode_delayed, delay=max_delay)
            post_updater = SpikeMonitor(C.target, function=postcode)
            updaters = [pre_updater_immediate, pre_updater_delayed, post_updater]
            self.contained_objects += updaters