def __init__(self, population, name=None, scaling=1.0, refractory=None): """ *Parameters*: * **population**: the Population to convert. Its neuron type must be spiking. * **name**: the (optional) name of the hybrid population. * **scaling**: the scaling of the firing rate. Defines what a rate ``r`` of 1.0 means in Hz (default: 1.0). * **refractory**: a refractory period in ms to ensure the ISI is not too high (default: None) """ self.population = population if not self.population.neuron_type.description['type'] == 'rate': Global._error('the population ' + self.population.name + ' must contain rate-coded neurons.') # Create the description, but it will not be used for generation Population.__init__( self, geometry = self.population.geometry, name=name, neuron = Neuron( parameters=""" scaling = %(scaling)s : population """ % {'scaling': scaling} , equations=""" p = Uniform(0.0, 1.0) rates = p """, spike="rates>p", refractory=refractory ) ) self._specific = True
def _replace_random(self, loc_eqs, glob_eqs, random_distributions): """ we replace the rand_%(id)s by the corresponding curand... term """ # double precision methods have a postfix prec_extension = "" if Global.config['precision'] == "float" else "_double" for rd in random_distributions: if rd['dist'] == "Uniform": term = """( curand_uniform%(postfix)s( &%(rd)s[i] ) * (%(max)s - %(min)s) + %(min)s )""" % {'postfix': prec_extension, 'rd': rd['name'], 'min': rd['args'].split(',')[0], 'max': rd['args'].split(',')[1]} loc_eqs = loc_eqs.replace(rd['name']+"[i]", term) term = """( curand_uniform%(postfix)s( &%(rd)s[0] ) * (%(max)s - %(min)s) + %(min)s )""" % {'postfix': prec_extension, 'rd': rd['name'], 'min': rd['args'].split(',')[0], 'max': rd['args'].split(',')[1]} glob_eqs = glob_eqs.replace(rd['name']+"[0]", term) elif rd['dist'] == "Normal": term = """( curand_normal%(postfix)s( &%(rd)s[i] ) * %(sigma)s + %(mean)s )""" % {'postfix': prec_extension, 'rd': rd['name'], 'mean': rd['args'].split(",")[0], 'sigma': rd['args'].split(",")[1]} loc_eqs = loc_eqs.replace(rd['name']+"[i]", term) term = """( curand_normal%(postfix)s( &%(rd)s[0] ) * %(sigma)s + %(mean)s )""" % {'postfix': prec_extension, 'rd': rd['name'], 'mean': rd['args'].split(",")[0], 'sigma': rd['args'].split(",")[1]} glob_eqs = glob_eqs.replace(rd['name']+"[0]", term) elif rd['dist'] == "LogNormal": term = """( curand_log_normal%(postfix)s( &%(rd)s[i], %(mean)s, %(std_dev)s) )""" % {'postfix': prec_extension, 'rd': rd['name'], 'mean': rd['args'].split(',')[0], 'std_dev': rd['args'].split(',')[1]} loc_eqs = loc_eqs.replace(rd['name']+"[i]", term) term = """( curand_log_normal%(postfix)s( &%(rd)s[0], %(mean)s, %(std_dev)s) )""" % {'postfix': prec_extension, 'rd': rd['name'], 'mean': rd['args'].split(',')[0], 'std_dev': rd['args'].split(',')[1]} glob_eqs = glob_eqs.replace(rd['name']+"[0]", term) else: Global._error("Unsupported random distribution on GPUs: " + rd['dist']) # set indices loc_eqs = loc_eqs % {'global_index': '[0]'} glob_eqs = glob_eqs % {'global_index': '[0]'} return loc_eqs, glob_eqs
def __init__(self, population, name=None, mode='window', window = 100.0, scaling=1.0, smooth=1.0, cut=3.0): """ *Parameters*: * **population**: the Population to convert. Its neuron type must be rate-coded. * **name**: the (optional) name of the hybrid population. * **mode**: mode of computation of the firing rate. ``'window'`` (default) or ``'isi'``. * **window**: the extent of the sliding window (in ms) used to compute the firing rate in the 'window' mode(default: 100.0 ms). * **cut**: cutting frequency of the ``'isi'`` kernel. default 2.0. * **scaling**: the scaling of the firing rate. Defines what a firing rate of 1 Hz outputs (default: 1.0). * **smooth**: time constant (in ms) of the low-pass filter used to smooth the firing rate (default: 1 ms, i.e no smoothing) """ self.population = population self.name = name self.mode = mode self.scaling = scaling self.window = window self.smooth = smooth self.cut = cut if not self.population.neuron_type.description['type'] == 'spike': Global._error('the population ' + self.population.name + ' must contain spiking neurons.') if self.mode == 'window': self._code = self._create_window() elif self.mode == 'adaptive': self._code = self._create_adaptive() elif self.mode == 'isi': self._code = self._create_isi() else: Global._error('Spike2RatePopulation: Unknown method ' + self.mode) self._specific = True
def start_record(self, variable, period=None, ranks="all"): """ **Deprecated!!** Start recording neural variables. Parameter: * **variable**: single variable name or list of variable names. * **period**: delay in ms between two recording (default: dt). Not valid for the ``spike`` variable. * **ranks**: list of ranks of the neurons to record (default: 'all'). Example:: pop1.start_record('r') pop2.start_record(['mp', 'r'], period=10.0) pop3.start_record(['spike']) pop4.start_record(['r'], ranks=range(10, 100)) """ Global._warning("recording from a Population is deprecated, use a Monitor instead.") from .Record import Monitor if ranks == "all": self._monitor = Monitor(self, variable, period=period) else: self._monitor = Monitor(PopulationView(self, ranks), variable, period=period)
def extract_spike_variable(description): cond = prepare_string(description['raw_spike']) if len(cond) > 1: Global.Global._print(description['raw_spike']) Global._error('The spike condition must be a single expression') translator = Equation('raw_spike_cond', cond[0].strip(), description) raw_spike_code = translator.parse() # Also store the variables used in the condition, as it may be needed for CUDA generation spike_code_dependencies = translator.dependencies() reset_desc = [] if 'raw_reset' in description.keys() and description['raw_reset']: reset_desc = process_equations(description['raw_reset']) for var in reset_desc: translator = Equation(var['name'], var['eq'], description) var['cpp'] = translator.parse() var['dependencies'] = translator.dependencies() return { 'spike_cond': raw_spike_code, 'spike_cond_dependencies': spike_code_dependencies, 'spike_reset': reset_desc}
def rank_from_coordinates(self, coord): """ Returns the rank of a neuron based on coordinates. *Parameter*: * **coord**: coordinate tuple, can be multidimensional. """ try: rank = self._rank_from_coord(coord, self.geometry) except: Global._error( "rank_from_coordinates(): There is no neuron of coordinates", coord, "in the population", self.name, self.geometry, ) if rank > self.size: Global._error( "rank_from_coordinates(), neuron", str(coord), ": the population", self.name, "has only", self.size, "neurons (geometry " + str(self.geometry) + ").", ) else: return rank
def neuron(self, *coord): """ Returns an ``IndividualNeuron`` object wrapping the neuron with the provided rank or coordinates. """ # Transform arguments if len(coord) == 1: if isinstance(coord[0], int): rank = coord[0] if not rank < self.size: Global._error( " when accessing neuron", str(rank), ": the population", self.name, "has only", self.size, "neurons (geometry " + str(self.geometry) + ").", ) else: rank = self.rank_from_coordinates(coord[0]) if rank is None: return None else: # a tuple rank = self.rank_from_coordinates(coord) if rank is None: return None # Return corresponding neuron return IndividualNeuron(self, rank)
def __init__(self, rates, schedule=0., period= -1., name=None, copied=False): neuron = Neuron( parameters="", equations=" r = 0.0", name="Timed Array", description="Timed array source." ) # Geometry of the population geometry = rates.shape[1:] # Check the schedule if isinstance(schedule, (int, float)): if float(schedule) <= 0.0: schedule = Global.config['dt'] schedule = [ float(schedule*i) for i in range(rates.shape[0])] if len(schedule) > rates.shape[0]: Global._error('TimedArray: the length of the schedule parameter cannot exceed the first dimension of the rates parameter.') if len(schedule) < rates.shape[0]: Global._warning('TimedArray: the length of the schedule parameter is smaller than the first dimension of the rates parameter (more data than time points). Make sure it is what you expect.') SpecificPopulation.__init__(self, geometry=geometry, neuron=neuron, name=name, copied=copied) self.init['schedule'] = schedule self.init['rates'] = rates self.init['period'] = period
def get_populations(self): """ Returns a list of all declared populations in this network. """ if self.populations == []: Global._warning("Network.get_populations(): no populations attached to this network.") return self.populations
def connect_from_matrix(self, weights, delays=0.0, pre_post=False): """ Builds a connection pattern according to a dense connectivity matrix. The matrix must be N*M, where N is the number of neurons in the post-synaptic population and M in the pre-synaptic one. Lists of lists must have the same size. If a synapse should not be created, the weight value should be None. *Parameters*: * **weights**: a matrix or list of lists representing the weights. If a value is None, the synapse will not be created. * **delays**: a matrix or list of lists representing the delays. Must represent the same synapses as weights. If the argument is omitted, delays are 0. * **pre_post**: states which index is first. By default, the first dimension is related to the post-synaptic population. If ``pre_post`` is True, the first dimension is the pre-synaptic population. """ # Store the synapses self.connector_name = "Connectivity matrix" self.connector_description = "Connectivity matrix" if isinstance(weights, list): try: weights= np.array(weights) except: Global._error('connect_from_matrix(): You must provide a dense 2D matrix.') self._store_connectivity(self._load_from_matrix, (weights, delays, pre_post), delays) return self
def set_variable_equation(self, name, equation): """ Changes the equation of a variable for the projection. If the variable ``w`` is defined in the Synapse description through: eta * dw/dt = pre.r * post.r one can change the equation with: proj.set_variable_equation('w', 'eta * dw/dt = pre.r * (post.r - 0.1) ') Only the equation should be provided, the flags have to be changed with ``set_variable_flags()``. .. warning:: This method should be used with great care, it is advised to define another Synapse object instead. *Parameters*: * **name**: the name of the variable. * **equation**: the new equation as string. """ rk_var = self._find_variable_index(name) if rk_var == -1: Global._error('The projection '+self.name+' has no variable called ' + name) return self.synapse_type.description['variables'][rk_var]['eq'] = equation
def check_and_apply_pow_fix(eqs): """ CUDA SDKs before 7.5 had an error if std=c++11 is enabled related to pow(double, int). Only pow(double, double) was detected as device function, the pow(double, int) will be detected as host function. (This was fixed within SDK 7.5) To support also earlier versions, we simply add a double type cast. """ if eqs.strip() == "": # nothing to do return eqs try: from ANNarchy.generator.CudaCheck import CudaCheck if CudaCheck().runtime_version() > 7000: # nothing to do, is working in higher SDKs return eqs except: Global._error('CUDA is not correctly installed on your system') if Global.config['verbose']: Global._print('occurance of pow() and SDK below 7.5 detected, apply fix.') # detect all pow statements pow_occur = re.findall(r"pow[\( [\S\s]*?\)*?, \d+\)]*?", eqs) for term in pow_occur: eqs = eqs.replace(term, term.replace(', ', ', (double)')) return eqs
def connect_dog(self, amp_pos, sigma_pos, amp_neg, sigma_neg, delays=0.0, limit=0.01, allow_self_connections=False): """ Builds a Difference-Of-Gaussians connection pattern between the two populations. Each neuron in the postsynaptic population is connected to a region of the presynaptic population centered around the neuron with the same normalized coordinates using a Difference-Of-Gaussians profile. *Parameters*: * **amp_pos**: amplitude of the positive Gaussian function * **sigma_pos**: width of the positive Gaussian function * **amp_neg**: amplitude of the negative Gaussian function * **sigma_neg**: width of the negative Gaussian function * **delays**: synaptic delay, either a single value or a random distribution object (default=dt). * **limit**: proportion of *amp* below which synapses are not created (default: 0.01) * **allow_self_connections**: allows connections between a neuron and itself. """ if self.pre!=self.post: allow_self_connections = True if isinstance(self.pre, PopulationView) or isinstance(self.post, PopulationView): Global._error('DoG connector is only possible on whole populations, not PopulationViews.') self.connector_name = "Difference-of-Gaussian" self.connector_description = "Difference-of-Gaussian, $A^+ %(Aplus)s, $\sigma^+$ %(sigmaplus)s, $A^- %(Aminus)s, $\sigma^-$ %(sigmaminus)s, delays %(delay)s"% {'Aplus': str(amp_pos), 'sigmaplus': str(sigma_pos), 'Aminus': str(amp_neg), 'sigmaminus': str(sigma_neg), 'delay': _process_random(delays)} self._store_connectivity( dog, (amp_pos, sigma_pos, amp_neg, sigma_neg, delays, limit, allow_self_connections, "lil", "post_to_pre"), delays, "lil", "post_to_pre") return self
def __init__(self, pre, post, target, psp="w * pre.r", operation="sum"): """ Projection based on shared weights: each post-synaptic neuron uses the same weights, so they need to be instantiated only once to save memory. Learning is not possible for now. The ``synapse`` argument is removed, replaced by a single ``psp`` argument to modified what is summed and ``operation`` to replace the summation operation by max-pooling or similar.. *Parameters*: * **pre**: pre-synaptic population (either its name or a ``Population`` object). * **post**: post-synaptic population (either its name or a ``Population`` object). * **target**: type of the connection. * **psp**: function to be summed. By default: ``w * pre.r`` * **operation**: function applied on ``psp`` ("sum", "max", "min", "mean"). "sum" is the default. """ # Create the description, but it will not be used for generation Projection.__init__( self, pre, post, target, synapse = SharedSynapse(psp=psp, operation=operation) ) self._omp_config['psp_schedule'] = 'schedule(dynamic)' if not Global.config["paradigm"] == "openmp": Global._error('Weight sharing is only implemented for the OpenMP paradigm.') exit(0) if not pre.neuron_type.type == 'rate': Global._error('Weight sharing is only implemented for rate-coded populations.') exit(0)
def _function(self, func): "Access a user defined function" if not self.initialized: Global._warning('the network is not compiled yet, cannot access the function ' + func) return return getattr(self.cyInstance, func)
def sum(self, target): """ Returns the array of weighted sums corresponding to the target:: excitatory = pop.sum('exc') For spiking networks, this is equivalent to accessing the conductances directly:: excitatory = pop.g_exc If no incoming projection has the given target, the method returns zeros. *Parameter:* * **target**: the desired projection target. **Note:** it is not possible to distinguish the original population when the same target is used. """ # Check if the network is initialized if not self.initialized: Global._warning('sum(): the population', self.name, 'is not initialized yet.') return np.zeros(self.geometry) # Check if a projection has this type if not target in self.targets: Global._warning('sum(): the population', self.name, 'receives no projection with the target', target) return np.zeros(self.geometry) # Spiking neurons already have conductances available if self.neuron_type.type == 'spike': return getattr(self, 'g_'+target) # Otherwise, call the Cython method return getattr(self.cyInstance, 'get_sum_'+target)()
def reset(self, attributes=-1): """ Resets all parameters and variables of the population to the value they had before the call to compile(). *Parameters:* * **attributes**: list of attributes (parameter or variable) which should be reinitialized. Default: all attributes. """ if attributes == -1: try: self.set(self.init) except Exception as e: Global._print(e) Global._error("Population.reset(): something went wrong while resetting", var) else: # only some of them for var in attributes: # check it exists if not var in self.attributes: Global._warning("Population.reset():", var, "is not an attribute of the population, skipping.") continue try: self.__setattr__(var, self.init[var]) except Exception as e: Global._print(e) Global._warning("Population.reset(): something went wrong while resetting", var) self.cyInstance.activate(self.enabled) self.cyInstance.reset()
def histogram(data, binsize=Global.config['dt']): """ **Deprecated!!** Returns for each recorded simulation step the number of spikes occuring in the population. *Parameters*: * **data**: the dictionary returned by the get_record() method for the population. * **binsize**: the duration in milliseconds where spikes are averaged (default: dt). """ Global._warning("histogram() is deprecated, use a Monitor instead.") if isinstance(data['start'], int): # only one recording duration = data['stop'] - data['start'] else: duration = 0 for t in range(len(data['start'])): duration += data['stop'][t] - data['start'][t] nb_neurons = len(data['data']) nb_bins = int(duration*Global.config['dt']/binsize) spikes = [0 for t in xrange(nb_bins)] for neuron in range(nb_neurons): for t in data['data'][neuron]: spikes[int(t/float(binsize/Global.config['dt']))] += 1 return np.array(spikes)
def connect_fixed_number_post(self, number, weights=1.0, delays=0.0, allow_self_connections=False, force_multiple_weights=False): """ Builds a connection pattern between the two populations with a fixed number of post-synaptic neurons. Each neuron in the pre-synaptic population sends connections to a fixed number of neurons of the post-synaptic population chosen randomly. *Parameters*: * **number**: number of synapses per pre-synaptic neuron. * **weights**: either a single value for all synapses or a RandomDistribution object. * **delays**: either a single value for all synapses or a RandomDistribution object (default = dt) * **allow_self_connections** : defines if self-connections are allowed (default=False) * **force_multiple_weights**: if a single value is provided for ``weights`` and there is no learning, a single weight value will be used for the whole projection instead of one per synapse. Setting ``force_multiple_weights`` to True ensures that a value per synapse will be used. """ if self.pre!=self.post: allow_self_connections = True if number > self.post.size: Global._error('connect_fixed_number_post: the number of post-synaptic neurons exceeds the size of the population.') self.connector_name = "Random Divergent" self.connector_description = "Random Divergent 1 $\\rightarrow$ %(number)s, weights %(weight)s, delays %(delay)s"% {'weight': _process_random(weights), 'delay': _process_random(delays), 'number': number} if isinstance(weights, (int, float)) and not force_multiple_weights: self._single_constant_weight = True self._store_connectivity( fixed_number_post, (number, weights, delays, allow_self_connections, "lil", "post_to_pre"), delays, "lil", "post_to_pre") return self
def _set_cython_attribute(self, attribute, value): """ Sets the value of the given attribute for all neurons in the population, as a NumPy array having the same geometry as the population if it is local. *Parameter:* * **attribute**: should be a string representing the variables's name. * **value**: a value or Numpy array of the right size. """ try: if attribute in self.neuron_type.description['local']: if isinstance(value, np.ndarray): getattr(self.cyInstance, 'set_'+attribute)(value.reshape(self.size)) elif isinstance(value, list): getattr(self.cyInstance, 'set_'+attribute)(np.array(value).reshape(self.size)) else: getattr(self.cyInstance, 'set_'+attribute)(value * np.ones( self.size )) else: getattr(self.cyInstance, 'set_'+attribute)(value) except Exception as e: Global._debug(e) err_msg = """Population.set(): either the variable '%(attr)s' does not exist in the population '%(pop)s', or the provided array does not have the right size.""" Global._error(err_msg % { 'attr': attribute, 'pop': self.name } )
def connect_gaussian(self, amp, sigma, delays=0.0, limit=0.01, allow_self_connections=False): """ Builds a Gaussian connection pattern between the two populations. Each neuron in the postsynaptic population is connected to a region of the presynaptic population centered around the neuron with the same normalized coordinates using a Gaussian profile. *Parameters*: * **amp**: amplitude of the Gaussian function * **sigma**: width of the Gaussian function * **delays**: synaptic delay, either a single value or a random distribution object (default=dt). * **limit**: proportion of *amp* below which synapses are not created (default: 0.01) * **allow_self_connections**: allows connections between a neuron and itself. """ if self.pre!=self.post: allow_self_connections = True if isinstance(self.pre, PopulationView) or isinstance(self.post, PopulationView): Global._error('Gaussian connector is only possible on whole populations, not PopulationViews.') self.connector_name = "Gaussian" self.connector_description = "Gaussian, $A$ %(A)s, $\sigma$ %(sigma)s, delays %(delay)s"% {'A': str(amp), 'sigma': str(sigma), 'delay': _process_random(delays)} self._store_connectivity(Connector.gaussian, (amp, sigma, delays, limit, allow_self_connections), delays) return self
def _init_attributes(self): """ Method used after compilation to initialize the attributes.""" # Initialize the population self.initialized = True # Transfer the initial values of all attributes for name, value in self.init.items(): if isinstance(value, Global.Constant): self.__setattr__(name, value.value) else: self.__setattr__(name, value) # Activate the population self.cyInstance.activate(self.enabled) # Reset to generate the right structures self.cyInstance.reset() # If the spike population has a refractory period: if self.neuron_type.type == 'spike' and self.neuron_type.description['refractory']: if isinstance(self.neuron_type.description['refractory'], str): # a global variable try: self.refractory = eval('self.'+self.neuron_type.description['refractory']) except Exception as e: Global._print(e, self.neuron_type.description['refractory']) Global._error('The initialization for the refractory period is not valid.') else: # a value self.refractory = self.neuron_type.description['refractory'] # Spiking neurons can compute a mean FR if self.neuron_type.type == 'spike': getattr(self.cyInstance, 'compute_firing_rate')(self._compute_mean_fr)
def set_image(self, image_name): """ Sets an image (.png, .jpg or whatever is supported by PIL) into the firing rate of the population. If the image has a different size from the population, it will be resized. """ try: im = Image.open(image_name) except : # image does not exist Global._error('The image ' + image_name + ' does not exist.') exit(0) # Resize the image if needed (width, height) = (self.geometry[1], self.geometry[0]) if im.size != (width, height): Global._warning('The image ' + image_name + ' does not have the same size '+str(im.size)+' as the population ' + str((width, height)) + '. It will be resized.') im = im.resize((width, height)) # Check if only the luminance should be extracted if self.dimension == 2 or self.geometry[2] == 1: im=im.convert("L") # Set the rate of the population if not Global._network[0]['compiled']: self.r = (np.array(im))/255. else: self.cyInstance.set_r(np.array(im).reshape(self.size)/255.)
def __init__(self, geometry, name=None): """ *Parameters*: * *geometry*: population geometry as tuple. It must correspond to the image size and be fixed through the whole simulation. * If the geometry is 2D, it corresponds to the (height, width) of the image. Only the luminance of the pixels will be represented (grayscale image). * If the geometry is 3D, the third dimension can be either 1 (grayscale) or 3 (color). If the third dimension is 3, each will correspond to the RGB values of the pixels. .. warning:: Due to the indexing system of Numpy, a 640*480 image should be fed into a (480, 640) or (480, 640, 3) population. * *name*: unique name of the population (optional). """ # Check geometry if isinstance(geometry, int) or len(geometry)==1: Global._error('The geometry of an ImagePopulation should be 2D (grayscale) or 3D (color).') exit(0) if len(geometry)==3 and (geometry[2]!=3 and geometry[2]!=1): Global._error('The third dimension of an ImagePopulation should be either 1 (grayscale) or 3 (color).') exit(0) if len(geometry)==3 and geometry[2]==1: geometry = (geometry[0], geometry[1]) # Create the population Population.__init__(self, geometry = geometry, name=name, neuron = Neuron(parameters="r = 0.0") )
def __init__(self, spike_times, name=None): if not isinstance(spike_times, list): Global._error('in SpikeSourceArray, spike_times must be a Python list.') exit(0) if isinstance(spike_times[0], list): # several neurons nb_neurons = len(spike_times) else: # a single Neuron nb_neurons = 1 spike_times = [ spike_times ] # Create a fake neuron just to be sure the description has the correct parameters neuron = Neuron( parameters=""" spike_times = 0.0 """, equations="", spike=" t == spike_times", reset="", name="Spike source", description="Spikes source array." ) Population.__init__(self, geometry=nb_neurons, neuron=neuron, name=name) # Do some sorting to save C++ complexity times = [] for neur_times in spike_times: times.append(sorted(list(set(neur_times)))) # suppress doublons and sort self.init['spike_times'] = times
def _data(self): "Method gathering all info about the projection when calling save()" if not self.initialized: Global._error('save_connectivity(): the network has not been compiled yet.') desc = {} desc['name'] = self.name desc['pre'] = self.pre.name desc['post'] = self.post.name desc['target'] = self.target desc['post_ranks'] = self.post_ranks desc['attributes'] = self.attributes desc['parameters'] = self.parameters desc['variables'] = self.variables desc['pre_ranks'] = self.cyInstance.pre_rank_all() desc['delays'] = self._get_delay() # Attributes to save attributes = self.attributes if not 'w' in self.attributes: attributes.append('w') # Save all attributes for var in attributes: try: desc[var] = getattr(self.cyInstance, 'get_'+var)() except: Global._warning('Can not save the attribute ' + var + ' in the projection.') return desc
def parse(self, part=None): if not part: part = self.eq expression = transform_condition(part) # Check if there is a == in the condition if '==' in expression: # Is it the only term, or are there other operations? if '&' in expression or '|' in expression: expression = re.sub(r'([\w\s.]+)==([\w\s.]+)', r'Equality(\1, \2)', expression) else: terms = expression.split('==') expression = 'Equality(' + terms[0] + ', ' + terms[1] + ')' # Check if there is a != in the condition if '!=' in expression: # Is it the only term, or are there other operations? if '&' in expression or '|' in expression: expression = re.sub(r'([\w\s.]+)!=([\w\s.]+)', r'Not(Equality(\1, \2))', expression) else: terms = expression.split('!=') expression = 'Not(Equality(' + terms[0] + ', ' + terms[1] + '))' try: eq = parse_expr(expression, local_dict = self.local_dict, transformations = ((auto_number, convert_xor,)) ) except: Global._print(expression) Global._error('The function depends on unknown variables.') return ccode(eq, precision=8, user_functions=self.user_functions)
def __init__(self, spike_times, name=None): if not isinstance(spike_times, list): Global._error('In a SpikeSourceArray, spike_times must be a Python list.') if isinstance(spike_times[0], list): # several neurons nb_neurons = len(spike_times) else: # a single Neuron nb_neurons = 1 spike_times = [ spike_times ] # Create a fake neuron just to be sure the description has the correct parameters neuron = Neuron( parameters=""" spike_times = 0.0 : int """, equations="", spike=" t == spike_times", reset="", name="Spike source", description="Spikes source array." ) Population.__init__(self, geometry=nb_neurons, neuron=neuron, name=name) self.init['spike_times'] = spike_times
def save_connectivity(self, filename): """ Saves the projection pattern in a file. Only the connectivity matrix, the weights and delays are saved, not the other synaptic variables. The generated data should be used to create a projection in another network:: proj.connect_from_file(filename) *Parameters*: * **filename**: file where the data will be saved. """ if not self.initialized: Global._error('save_connectivity(): the network has not been compiled yet.') return data = { 'name': self.name, 'post_ranks': self.post_ranks, 'pre_ranks': self.cyInstance.pre_rank_all(), # was: [self.cyInstance.pre_rank(n) for n in range(self.size)], 'w': self.cyInstance.get_w(), 'delay': self.cyInstance.get_delay() if hasattr(self.cyInstance, 'get_delay') else None, 'max_delay': self.max_delay, 'uniform_delay': self.uniform_delay, 'size': self.size, 'nb_synapses': sum([self.cyInstance.nb_synapses(n) for n in range(self.size)]) } try: import cPickle as pickle # Python2 except: import pickle # Python3 with open(filename, 'wb') as wfile: pickle.dump(data, wfile, protocol=pickle.HIGHEST_PROTOCOL)
def connectivity_matrix(self, fill=0.0): """ Returns a dense connectivity matrix (2D Numpy array) representing the connections between the pre- and post-populations. The first index of the matrix represents post-synaptic neurons, the second the pre-synaptic ones. If PopulationViews were used for creating the projection, the matrix is expanded to the whole populations by default. *Parameters*: * **fill**: value to put in the matrix when there is no connection (default: 0.0). """ if isinstance(self.pre, PopulationView): size_pre = self.pre.population.size else: size_pre = self.pre.size if isinstance(self.post, PopulationView): size_post = self.post.population.size else: size_post = self.post.size res = np.ones((size_post, size_pre)) * fill for rank in self.post_ranks: idx = self.post_ranks.index(rank) try: preranks = self.cyInstance.pre_rank(idx) w = self.cyInstance.get_dendrite_w(idx) except: Global._error('The connectivity matrix can only be accessed after compilation') return [] res[rank, preranks] = w return res
def _init_parameters_variables(self, proj, single_spmv_matrix): """ Generate initialization code for variables / parameters of the projection *proj*. Returns 3 values: ret1 (str): weight initialization ret2 (str): delay initialization ret3 (str): other initializations (e. g. event-driven) """ # Is it a specific projection? if 'init_parameters_variables' in proj._specific_template.keys(): return proj._specific_template['init_parameters_variables'] # Learning by default code = "" weight_code = "" # choose initialization templates based on chosen paradigm attr_init_tpl = self._templates['attribute_cpp_init'] attributes = [] # Initialize parameters for var in proj.synapse_type.description['parameters'] + proj.synapse_type.description['variables']: # Avoid doublons if var['name'] in attributes: continue # Important to select which template locality = var['locality'] attr_type = 'parameter' if var in proj.synapse_type.description['parameters'] else 'variable' # The synaptic weight if var['name'] == 'w': if var['locality'] == "global" or proj._has_single_weight(): if cpp_connector_available(proj.connector_name, proj._storage_format, proj._storage_order): weight_code = tabify("w = w_dist_arg1;", 2) else: weight_code = tabify("w = values[0][0];", 2) elif var['locality'] == "local": if cpp_connector_available(proj.connector_name, proj._storage_format, proj._storage_order): # Init weights in CPP if proj.connector_weight_dist == None: init_code = self._templates['attribute_cpp_init']['local'] % { 'init': 'w_dist_arg1', 'type': var['ctype'], 'attr_type': 'parameter' if var in proj.synapse_type.description['parameters'] else 'variable', 'name': var['name'] } elif isinstance(proj.connector_weight_dist, ANNRandom.Uniform): if single_spmv_matrix: init_code = "w = init_matrix_variable_uniform<%(float_prec)s>(w_dist_arg1, w_dist_arg2, rng[0]);" else: init_code = "w = init_matrix_variable_uniform<%(float_prec)s>(w_dist_arg1, w_dist_arg2, rng);" elif isinstance(proj.connector_weight_dist, ANNRandom.Normal): if single_spmv_matrix: init_code = "w = init_matrix_variable_normal<%(float_prec)s>(w_dist_arg1, w_dist_arg2, rng[0]);" else: init_code = "w = init_matrix_variable_normal<%(float_prec)s>(w_dist_arg1, w_dist_arg2, rng);" elif isinstance(proj.connector_weight_dist, ANNRandom.LogNormal): if proj.connector_weight_dist.min==None and proj.connector_weight_dist.max==None: if single_spmv_matrix: init_code = "w = init_matrix_variable_log_normal<%(float_prec)s>(w_dist_arg1, w_dist_arg2, rng[0]);" else: init_code = "w = init_matrix_variable_log_normal<%(float_prec)s>(w_dist_arg1, w_dist_arg2, rng);" else: min_code = "std::numeric_limits<%(float_prec)s>::min()" if proj.connector_weight_dist.min==None else str(proj.connector_weight_dist.min) max_code = "std::numeric_limits<%(float_prec)s>::max()" if proj.connector_weight_dist.max==None else str(proj.connector_weight_dist.max) if single_spmv_matrix: init_code = "w = init_matrix_variable_log_normal_clip<%(float_prec)s>(w_dist_arg1, w_dist_arg2, rng[0], "+min_code+", "+max_code+");" else: init_code = "w = init_matrix_variable_log_normal_clip<%(float_prec)s>(w_dist_arg1, w_dist_arg2, rng, "+min_code+", "+max_code+");" else: raise NotImplementedError( str(type(proj.connector_weight_dist)) + " is not available for CPP-side connection patterns.") if Global._check_paradigm("cuda"): init_code += "\ngpu_w = init_matrix_variable_gpu<%(float_prec)s>(w);" weight_code = tabify(init_code % {'float_prec': Global.config['precision']}, 2) # Init_from_lil else: init = 'false' if var['ctype'] == 'bool' else ('0' if var['ctype'] == 'int' else '0.0') weight_code = attr_init_tpl[locality] % { 'id': proj.id, 'id_post': proj.post.id, 'name': var['name'], 'type': var['ctype'], 'init': init, 'attr_type': attr_type, 'float_prec': Global.config['precision'] } weight_code += tabify("update_matrix_variable_all<%(float_prec)s>(w, values);" % {'float_prec': Global.config['precision']}, 2) if Global._check_paradigm("cuda"): weight_code += tabify("\nw_host_to_device = true;", 2) else: raise NotImplementedError # All other variables else: init = 'false' if var['ctype'] == 'bool' else ('0' if var['ctype'] == 'int' else '0.0') var_ids = { 'id': proj.id, 'id_post': proj.post.id, 'name': var['name'], 'type': var['ctype'], 'init': init, 'attr_type': attr_type, 'float_prec': Global.config['precision'] } if Global._check_paradigm("cuda") and locality == "global": code += attr_init_tpl[locality][attr_type] % var_ids else: code += attr_init_tpl[locality] % var_ids attributes.append(var['name']) # Initialize delays differs for construction from LIL or CPP inited patterns if proj.max_delay > 1: # Special case: we have non-uniform delays, but not determined by a RandomDistribution # This will caused most likely by custom connectivity pattern if proj.connector_delay_dist == None and proj.uniform_delay==-1: id_pre = proj.pre.id if not isinstance(proj.pre, PopulationView) else proj.pre.population.id if proj.synapse_type.type == "rate": delay_code = self._templates['delay']['nonuniform_rate_coded']['init'] % {'id_pre': id_pre} else: delay_code = self._templates['delay']['nonuniform_spiking']['init'] % {'id_pre': id_pre} # # uniform delay elif proj.connector_delay_dist == None: if cpp_connector_available(proj.connector_name, proj._storage_format, proj._storage_order): delay_code = tabify("delay = d_dist_arg1;", 2) else: delay_code = self._templates['delay']['uniform']['init'] # # non-uniform delay drawn from distribution elif isinstance(proj.connector_delay_dist, ANNRandom.RandomDistribution): if cpp_connector_available(proj.connector_name, proj._storage_format, proj._storage_order): rng_init = "rng[0]" if single_spmv_matrix else "rng" delay_code = tabify(""" delay = init_matrix_variable_discrete_uniform<int>(d_dist_arg1, d_dist_arg2, %(rng_init)s); max_delay = -1;""" % {'id_pre': proj.pre.id, 'rng_init': rng_init}, 2) else: id_pre = proj.pre.id if not isinstance(proj.pre, PopulationView) else proj.pre.population.id if proj.synapse_type.type == "rate": delay_code = self._templates['delay']['nonuniform_rate_coded']['init'] % {'id_pre': id_pre} else: delay_code = self._templates['delay']['nonuniform_spiking']['init'] % {'id_pre': id_pre} else: raise NotImplementedError( str(type(proj.connector_weight_dist)) + " is not available.") else: delay_code = "" # If no psp is defined, it's event-driven has_event_driven = False for var in proj.synapse_type.description['variables']: if var['method'] == 'event-driven': has_event_driven = True break if has_event_driven: code += self._templates['event_driven']['cpp_init'] # Pruning if Global.config['structural_plasticity']: if 'pruning' in proj.synapse_type.description.keys(): code += """ // Pruning _pruning = false; _pruning_period = 1; _pruning_offset = 0; """ if 'creating' in proj.synapse_type.description.keys(): code += """ // Creating _creating = false; _creating_period = 1; _creating_offset = 0; """ return weight_code, delay_code, code
from ANNarchy.core.Population import Population from ANNarchy.core.Neuron import Neuron import ANNarchy.core.Global as Global from ANNarchy.generator.Compiler import extra_libs try: from PIL import Image except: Global._warning('The Python Image Library (pillow) is not installed on your system, unable to create ImagePopulations.') import numpy as np class ImagePopulation(Population): """ Specific rate-coded Population allowing to represent images (png, jpg...) as the firing rate of a population (each neuron represents one pixel). This extension requires the Python Image Library (pip install Pillow). Usage: .. code-block:: python from ANNarchy import * from ANNarchy.extensions.image import ImagePopulation pop = ImagePopulation(geometry=(480, 640)) pop.set_image('image.jpg') """ def __init__(self, geometry, name=None, copied=False): """
def _add_object(self, obj): if isinstance(obj, Population): # Create a copy pop = Population(geometry=obj.geometry, neuron=obj.neuron_type, name=obj.name, stop_condition=obj.stop_condition) # Remove the copy from the global network Global._network[0]['populations'].pop(-1) # Copy import properties pop.id = obj.id pop.name = obj.name pop.class_name = obj.class_name pop.init = obj.init # Add the copy to the local network Global._network[self.id]['populations'].append(pop) self.populations.append(pop) elif isinstance(obj, Projection): # Check the pre- or post- populations try: pre_pop = self.get(obj.pre) if isinstance(obj.pre, PopulationView): pre = PopulationView(pre_pop, obj.pre.ranks) else: pre = pre_pop post_pop = self.get(obj.post) if isinstance(obj.post, PopulationView): post = PopulationView(post_pop, obj.post.ranks) else: post = post_pop except: Global._error( 'Network.add(): The pre- or post-synaptic population of this projection are not in the network.' ) exit(0) target = obj.target synapse = obj.synapse_type # Create the projection proj = Projection(pre=pre, post=post, target=target, synapse=synapse) # Remove the copy from the global network Global._network[0]['projections'].pop(-1) # Copy import properties proj.id = obj.id proj.name = obj.name proj.init = obj.init # Copy the synapses if they are already created proj._store_connectivity(obj._connection_method, obj._connection_args, obj._connection_delay) # Add the copy to the local network Global._network[self.id]['projections'].append(proj) self.projections.append(proj) elif isinstance(obj, Monitor): m = Monitor(obj.object, variables=obj.variables, period=obj._period, start=obj._start, net_id=self.id) # Add the copy to the local network (the monitor writes itself already in the right network) self.monitors.append(m)
def set_seed(self, seed): """ Sets the seed of the random number generators for this network. """ Global.set_seed(seed, self.id)
def get_current_step(self): "Returns the current simulation step." return Global.get_current_step(self.id)
def get_time(self): "Returns the current time in ms." return Global.get_time(self.id)
def _function(self, func): "Access a user defined function" if not self.initialized: Global._error('the network is not compiled yet, cannot access the function ' + func) return getattr(self.cyInstance, func)
def _declaration_accessors(self, proj, single_matrix): """ Generate declaration and accessor code for variables/parameters of the projection. Returns: (dict, str): first return value contain declaration code and last one the accessor code. The declaration dictionary has the following fields: delay, event_driven, rng, parameters_variables, additional, cuda_stream """ # create the code for non-specific projections declare_event_driven = "" declare_rng = "" declare_additional = "" # Delays if proj.max_delay > 1: if proj.uniform_delay > 1 : key_delay = "uniform" else: if Global._check_paradigm("cuda"): Global.CodeGeneratorException("Non-uniform delays on rate-coded or spiking synapses are not available for CUDA devices.") if proj.synapse_type.type == "rate": key_delay = "nonuniform_rate_coded" else: key_delay = "nonuniform_spiking" declare_delay = self._templates['delay'][key_delay]['declare'] init_delay = self._templates['delay'][key_delay]['init'] else: declare_delay = "" init_delay = "" # Code for declarations and accessors declare_parameters_variables, accessor = self._generate_default_get_set(proj, single_matrix) # If no psp is defined, it's event-driven has_event_driven = False for var in proj.synapse_type.description['variables']: if var['method'] == 'event-driven': has_event_driven = True break if has_event_driven: declare_event_driven = self._templates['event_driven']['declare'] # Arrays for the random numbers if len(proj.synapse_type.description['random_distributions']) > 0: declare_rng += """ // Random numbers """ for rd in proj.synapse_type.description['random_distributions']: declare_rng += self._templates['rng'][rd['locality']]['decl'] % { 'rd_name' : rd['name'], 'type': rd['ctype'], 'float_prec': Global.config['precision'], 'template': rd['template'] % {'float_prec':Global.config['precision']} } # Structural plasticity if Global.config['structural_plasticity']: declare_parameters_variables += self._header_structural_plasticity(proj) # Specific projections can overwrite if 'declare_parameters_variables' in proj._specific_template.keys(): declare_parameters_variables = proj._specific_template['declare_parameters_variables'] if 'access_parameters_variables' in proj._specific_template.keys(): accessor = proj._specific_template['access_parameters_variables'] if 'declare_rng' in proj._specific_template.keys(): declare_rng = proj._specific_template['declare_rng'] if 'declare_event_driven' in proj._specific_template.keys(): declare_event_driven = proj._specific_template['declare_event_driven'] if 'declare_additional' in proj._specific_template.keys(): declare_additional = proj._specific_template['declare_additional'] # Finalize the declarations declaration = { 'declare_delay': declare_delay, 'init_delay': init_delay, 'event_driven': declare_event_driven, 'rng': declare_rng, 'parameters_variables': declare_parameters_variables, 'additional': declare_additional } return declaration, accessor
def python_environment(): """ Python environment configuration, required by Compiler.generate_makefile. Contains among others the python version, library path and cython version. Warning: changes to this method should be copied to setup.py. """ # Python version py_version = "%(major)s.%(minor)s" % { 'major': sys.version_info[0], 'minor': sys.version_info[1] } py_major = str(sys.version_info[0]) if py_major == '2': Global._warning( "Python 2 is not supported anymore, things might break.") # Python includes and libs # non-standard python installs need to tell the location of libpythonx.y.so/dylib # export LD_LIBRARY_PATH=$HOME/anaconda/lib:$LD_LIBRARY_PATH # export DYLD_FALLBACK_LIBRARY_PATH=$HOME/anaconda/lib:$DYLD_FALLBACK_LIBRARY_PATH py_prefix = sys.prefix # Search for pythonx.y-config cmd = "%(py_prefix)s/bin/python%(py_version)s-config --includes > /dev/null 2> /dev/null" with subprocess.Popen(cmd % { 'py_version': py_version, 'py_prefix': py_prefix }, shell=True) as test: if test.wait() != 0: Global._warning( "Can not find python-config in the same directory as python, trying with the default path..." ) python_config_path = "python%(py_version)s-config" % { 'py_version': py_version } else: python_config_path = "%(py_prefix)s/bin/python%(py_version)s-config" % { 'py_version': py_version, 'py_prefix': py_prefix } python_include = "`%(pythonconfigpath)s --includes`" % { 'pythonconfigpath': python_config_path } python_libpath = "-L%(py_prefix)s/lib" % {'py_prefix': py_prefix} # Identify the -lpython flag with subprocess.Popen('%(pythonconfigpath)s --ldflags' % {'pythonconfigpath': python_config_path}, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) as test: flagline = str(test.stdout.read().decode('UTF-8')).strip() errorline = str(test.stderr.read().decode('UTF-8')) test.wait() if len(errorline) > 0: Global._error( "Unable to find python-config. Make sure you have installed the development files of Python (python-dev or -devel) and that either python-config, python2-config or python3-config are in your path." ) flags = flagline.split(' ') for flag in flags: if flag.startswith('-lpython'): python_lib = flag break else: python_lib = "-lpython" + py_version # Check cython version with subprocess.Popen(py_prefix + "/bin/cython%(major)s -V > /dev/null 2> /dev/null" % {'major': py_major}, shell=True) as test: if test.wait() != 0: cython = py_prefix + "/bin/cython" else: cython = py_prefix + "/bin/cython" + py_major # If not in the same folder as python, use the default with subprocess.Popen("%(cython)s -V > /dev/null 2> /dev/null" % {'cython': cython}, shell=True) as test: if test.wait() != 0: cython = shutil.which("cython" + str(py_major)) if cython is None: cython = shutil.which("cython") if cython is None: Global._error("Unable to detect the path to cython.") return py_version, py_major, python_include, python_lib, python_libpath, cython
def compile(directory='annarchy', clean=False, populations=None, projections=None, compiler="default", compiler_flags="default", add_sources="", extra_libs="", cuda_config={'device': 0}, annarchy_json="", silent=False, debug_build=False, profile_enabled=False, net_id=0): """ This method uses the network architecture to generate optimized C++ code and compile a shared library that will perform the simulation. The ``compiler``, ``compiler_flags`` and part of ``cuda_config`` take their default value from the configuration file ``~/.config/ANNarchy/annarchy.json``. The following arguments are for internal development use only: * **debug_build**: creates a debug version of ANNarchy, which logs the creation of objects and some other data (default: False). * **profile_enabled**: creates a profilable version of ANNarchy, which logs several computation timings (default: False). :param directory: name of the subdirectory where the code will be generated and compiled. Must be a relative path. Default: "annarchy/". :param clean: boolean to specifying if the library should be recompiled entirely or only the changes since last compilation (default: False). :param populations: list of populations which should be compiled. If set to None, all available populations will be used. :param projections: list of projection which should be compiled. If set to None, all available projections will be used. :param compiler: C++ compiler to use. Default: g++ on GNU/Linux, clang++ on OS X. Valid compilers are [g++, clang++]. :param compiler_flags: platform-specific flags to pass to the compiler. Default: "-march=native -O2". Warning: -O3 often generates slower code and can cause linking problems, so it is not recommended. :param cuda_config: dictionary defining the CUDA configuration for each population and projection. :param annarchy_json: compiler flags etc can be stored in a .json file normally placed in the home directory (see comment below). With this flag one can directly assign a file location. :param silent: defines if status message like "Compiling... OK" should be printed. """ # Check if the network has already been compiled if Global._network[net_id]['compiled']: Global._print( """compile(): the network has already been compiled, doing nothing. If you are re-running a Jupyter notebook, you should call `clear()` right after importing ANNarchy in order to reset everything.""" ) return # Get the command-line arguments parser = setup_parser() options, unknown = parser.parse_known_args() if len(unknown) > 0 and Global.config['verbose']: Global._warning('unrecognized command-line arguments:', unknown) # if the parameters set on command-line they overwrite Global.config if options.num_threads is not None: Global.config['num_threads'] = options.num_threads if options.visible_cores is not None: try: core_list = [int(x) for x in options.visible_cores.split(",")] Global.config['visible_cores'] = core_list except: Global._error( "As argument for 'visible_cores' a comma-seperated list of integers is expected." ) # Get CUDA configuration if options.gpu_device >= 0: Global.config['paradigm'] = "cuda" cuda_config['device'] = int(options.gpu_device) # Check that a single backend is chosen if (options.num_threads != None) and (options.gpu_device >= 0): Global._error( 'CUDA and openMP can not be active at the same time, please check your command line arguments.' ) # Verbose if options.verbose is not None: Global.config['verbose'] = options.verbose # Precision if options.precision is not None: Global.config['precision'] = options.precision # Profiling if options.profile != None: profile_enabled = options.profile Global.config['profiling'] = options.profile Global.config['profile_out'] = options.profile_out if profile_enabled != False and options.profile == None: # Profiling enabled due compile() Global.config['profiling'] = True # Debug if not debug_build: debug_build = options.debug # debug build Global.config["debug"] = debug_build # Clean clean = options.clean or clean # enforce rebuild # Populations to compile if populations is None: # Default network populations = Global._network[net_id]['populations'] # Projections to compile if projections is None: # Default network projections = Global._network[net_id]['projections'] # Compiling directory annarchy_dir = os.getcwd() + '/' + directory if not annarchy_dir.endswith('/'): annarchy_dir += '/' # Turn OMP off for MacOS #if (Global._check_paradigm("openmp") and Global.config['num_threads'] > 1 and sys.platform == "darwin"): # Global._warning("OpenMP is still not supported by the default clang on Mac OS... Running single-threaded.") # Global.config['num_threads'] = 1 # Test if the current ANNarchy version is newer than what was used to create the subfolder from pkg_resources import parse_version if os.path.isfile(annarchy_dir + '/release'): with open(annarchy_dir + '/release', 'r') as rfile: prev_release = rfile.read().strip() prev_paradigm = '' # HD (03.08.2016): # in ANNarchy 4.5.7b I added also the paradigm to the release tag. # This if clause can be removed in later releases (TODO) if prev_release.find(',') != -1: prev_paradigm, prev_release = prev_release.split(', ') else: # old release tag clean = True if parse_version(prev_release) < parse_version( ANNarchy.__release__): clean = True elif prev_paradigm != Global.config['paradigm']: clean = True else: clean = True # for very old versions # Check if the last compilation was successful if os.path.isfile(annarchy_dir + '/compilation'): with open(annarchy_dir + '/compilation', 'r') as rfile: res = rfile.read() if res.strip() == "0": # the last compilation failed clean = True else: clean = True # Manage the compilation subfolder _folder_management(annarchy_dir, profile_enabled, clean, net_id) # Create a Compiler object compiler = Compiler(annarchy_dir=annarchy_dir, clean=clean, compiler=compiler, compiler_flags=compiler_flags, add_sources=add_sources, extra_libs=extra_libs, path_to_json=annarchy_json, silent=silent, cuda_config=cuda_config, debug_build=debug_build, profile_enabled=profile_enabled, populations=populations, projections=projections, net_id=net_id) # Code Generation compiler.generate() if Global.config['verbose']: net_str = "" if compiler.net_id == 0 else str(compiler.net_id) + " " Global._print('Construct network ' + net_str + '...', end=" ") # Create the Python objects _instantiate(compiler.net_id, cuda_config=compiler.cuda_config, user_config=compiler.user_config) # NormProjections require an update of afferent projections _update_num_aff_connections(compiler.net_id) if Global.config['verbose']: Global._print('OK')
def __add__(self, synapse): Global._error('adding synapse models is not implemented yet.')
def report_latex(filename="./report.tex", standalone=True, gather_subprojections=False, net_id=0): """ Generates a .tex file describing the network according to: Nordlie E, Gewaltig M-O, Plesser HE (2009). Towards Reproducible Descriptions of Neuronal Network Models. PLoS Comput Biol 5(8): e1000456. **Parameters:** * *filename*: name of the .tex file where the report will be written (default: "./report.tex") * *standalone*: tells if the generated file should be directly compilable or only includable (default: True) * *gather_subprojections*: if a projection between two populations has been implemented as a multiple of projections between sub-populations, this flag allows to group them in the summary (default: False). * *net_id*: id of the network to be used for reporting (default: 0, everything that was declared) """ # stdout Global._print('Generating report in', filename) # Generate the summary summary = _generate_summary(net_id) # Generate the populations populations = _generate_populations(net_id) # Generate the projections projections = _generate_projections(net_id, gather_subprojections) # Generate the neuron models neuron_models = _generate_neuron_models(net_id) # Generate the synapse models synapse_models = _generate_synapse_models(net_id) # Generate the constants constants = _generate_constants(net_id) # Generate the functions functions = _generate_functions(net_id) # Generate the population parameters pop_parameters = _generate_population_parameters(net_id) # Generate the population parameters proj_parameters = _generate_projection_parameters(net_id, gather_subprojections) # Generate the measurmements (in our case the BOLD recording) measurements = _generate_measurements(net_id) # Possibly create the directory if it does not exist path_name = os.path.dirname(filename) if not path_name in ["", "."]: if not os.path.exists(path_name): os.makedirs(path_name) with open(filename, 'w') as wfile: if standalone: wfile.write(header) wfile.write(preamble) wfile.write(summary) wfile.write(populations) wfile.write(projections) wfile.write(neuron_models) wfile.write(synapse_models) wfile.write(parameters_template) wfile.write(constants) wfile.write(functions) wfile.write(pop_parameters) wfile.write(proj_parameters) wfile.write(input_template) wfile.write(measurements) if standalone: wfile.write(footer)
def _generate_default_get_set(self, proj, single_matrix): """ Instead of generating a code block with get/set for each variable we generate a common function which receives the name of the variable. """ local_accessor_template = """ std::vector<std::vector<%(ctype)s>> get_local_attribute_all_%(ctype_name)s(std::string name) { %(local_get1)s // should not happen std::cerr << "ProjStruct%(id_proj)s::get_local_attribute_all_%(ctype_name)s: " << name << " not found" << std::endl; return std::vector<std::vector<%(ctype)s>>(); } std::vector<%(ctype)s> get_local_attribute_row_%(ctype_name)s(std::string name, int rk_post) { %(local_get2)s // should not happen std::cerr << "ProjStruct%(id_proj)s::get_local_attribute_row_%(ctype_name)s: " << name << " not found" << std::endl; return std::vector<%(ctype)s>(); } %(ctype)s get_local_attribute_%(ctype_name)s(std::string name, int rk_post, int rk_pre) { %(local_get3)s // should not happen std::cerr << "ProjStruct%(id_proj)s::get_local_attribute: " << name << " not found" << std::endl; return 0.0; } void set_local_attribute_all_%(ctype_name)s(std::string name, std::vector<std::vector<%(ctype)s>> value) { %(local_set1)s } void set_local_attribute_row_%(ctype_name)s(std::string name, int rk_post, std::vector<%(ctype)s> value) { %(local_set2)s } void set_local_attribute_%(ctype_name)s(std::string name, int rk_post, int rk_pre, %(ctype)s value) { %(local_set3)s } """ semiglobal_accessor_template = """ std::vector<%(ctype)s> get_semiglobal_attribute_all_%(ctype_name)s(std::string name) { %(semiglobal_get1)s // should not happen std::cerr << "ProjStruct%(id_proj)s::get_semiglobal_attribute_all_%(ctype_name)s: " << name << " not found" << std::endl; return std::vector<%(ctype)s>(); } %(ctype)s get_semiglobal_attribute_%(ctype_name)s(std::string name, int rk_post) { %(semiglobal_get2)s // should not happen std::cerr << "ProjStruct%(id_proj)s::get_semiglobal_attribute_%(ctype_name)s: " << name << " not found" << std::endl; return 0.0; } void set_semiglobal_attribute_all_%(ctype_name)s(std::string name, std::vector<%(ctype)s> value) { %(semiglobal_set1)s } void set_semiglobal_attribute_%(ctype_name)s(std::string name, int rk_post, %(ctype)s value) { %(semiglobal_set2)s } """ global_accessor_template = """ %(ctype)s get_global_attribute_%(ctype_name)s(std::string name) { %(global_get)s // should not happen std::cerr << "ProjStruct%(id_proj)s::get_global_attribute_%(ctype_name)s: " << name << " not found" << std::endl; return 0.0; } void set_global_attribute_%(ctype_name)s(std::string name, %(ctype)s value) { %(global_set)s } """ declare_parameters_variables = "" # The transpose projection contains no own synaptic parameters if isinstance(proj, Transpose): return "", "" # choose templates dependend on the paradigm decl_template = self._templates['attribute_decl'] attributes = [] code_ids_per_type = {} # Sort the parameters/variables per type for var in proj.synapse_type.description['parameters'] + proj.synapse_type.description['variables']: # Avoid doublons if var['name'] in attributes: continue # add an empty list for this type if needed if var['ctype'] not in code_ids_per_type.keys(): code_ids_per_type[var['ctype']] = [] # important properties for code generation locality = var['locality'] attr_type = 'parameter' if var in proj.synapse_type.description['parameters'] else 'variable' # Special case for single weights if var['name'] == "w" and proj._has_single_weight(): locality = 'global' # For GPUs we need to tell the host that this variable need to be updated if Global._check_paradigm("cuda"): if locality == "global" and attr_type=="parameter": write_dirty_flag = "" read_dirty_flag = "" else: write_dirty_flag = "%(name)s_host_to_device = true;" % {'name': var['name']} read_dirty_flag = "if ( %(name)s_device_to_host < t ) device_to_host();" % {'name': var['name']} else: write_dirty_flag = "" read_dirty_flag = "" code_ids_per_type[var['ctype']].append({ 'type' : var['ctype'], 'name': var['name'], 'locality': locality, 'attr_type': attr_type, 'read_dirty_flag': read_dirty_flag, 'write_dirty_flag': write_dirty_flag }) attributes.append(var['name']) # Final code, can contain of multiple sets of accessor functions final_code = "" for ctype in code_ids_per_type.keys(): # Attribute accessors/declarators local_attribute_get1 = "" local_attribute_get2 = "" local_attribute_get3 = "" local_attribute_set1 = "" local_attribute_set2 = "" local_attribute_set3 = "" semiglobal_attribute_get1 = "" semiglobal_attribute_get2 = "" semiglobal_attribute_set1 = "" semiglobal_attribute_set2 = "" global_attribute_get = "" global_attribute_set = "" for ids in code_ids_per_type[ctype]: # Locality of a variable detemines the correct template # In case of CUDA also the attribute type is important locality = ids['locality'] attr_type = ids['attr_type'] # # Local variables can be vec[vec[d]], vec[d] or d if locality == "local": local_attribute_get1 += """ if ( name.compare("%(name)s") == 0 ) { %(read_dirty_flag)s return get_matrix_variable_all<%(type)s>(%(name)s); } """ % ids local_attribute_set1 += """ if ( name.compare("%(name)s") == 0 ) { update_matrix_variable_all<%(type)s>(%(name)s, value); %(write_dirty_flag)s return; } """ % ids local_attribute_get2 += """ if ( name.compare("%(name)s") == 0 ) { %(read_dirty_flag)s return get_matrix_variable_row<%(type)s>(%(name)s, rk_post); } """ % ids local_attribute_set2 += """ if ( name.compare("%(name)s") == 0 ) { update_matrix_variable_row<%(type)s>(%(name)s, rk_post, value); %(write_dirty_flag)s return; } """ % ids local_attribute_get3 += """ if ( name.compare("%(name)s") == 0 ) { %(read_dirty_flag)s return get_matrix_variable<%(type)s>(%(name)s, rk_post, rk_pre); } """ % ids local_attribute_set3 += """ if ( name.compare("%(name)s") == 0 ) { update_matrix_variable<%(type)s>(%(name)s, rk_post, rk_pre, value); %(write_dirty_flag)s return; } """ % ids # # Semiglobal variables can be vec[d] or d elif locality == "semiglobal": semiglobal_attribute_get1 += """ if ( name.compare("%(name)s") == 0 ) { return get_vector_variable_all<%(type)s>(%(name)s); } """ % ids semiglobal_attribute_get2 += """ if ( name.compare("%(name)s") == 0 ) { return get_vector_variable<%(type)s>(%(name)s, rk_post); } """ % ids semiglobal_attribute_set1 += """ if ( name.compare("%(name)s") == 0 ) { update_vector_variable_all<%(type)s>(%(name)s, value); %(write_dirty_flag)s return; } """ % ids semiglobal_attribute_set2 += """ if ( name.compare("%(name)s") == 0 ) { update_vector_variable<%(type)s>(%(name)s, rk_post, value); %(write_dirty_flag)s return; } """ % ids # # Global variables are only d else: global_attribute_get += """ if ( name.compare("%(name)s") == 0 ) { return %(name)s; } """ % ids global_attribute_set += """ if ( name.compare("%(name)s") == 0 ) { %(name)s = value; %(write_dirty_flag)s return; } """ % ids if Global._check_paradigm("cuda") and locality=="global": declare_parameters_variables += decl_template[locality][attr_type] % ids else: declare_parameters_variables += decl_template[locality] % ids attributes.append(var['name']) # build up the final codes if local_attribute_get1 != "": final_code += local_accessor_template % { 'local_get1' : local_attribute_get1, 'local_get2' : local_attribute_get2, 'local_get3' : local_attribute_get3, 'local_set1' : local_attribute_set1, 'local_set2' : local_attribute_set2, 'local_set3' : local_attribute_set3, 'id_proj': proj.id, 'ctype': ctype, 'ctype_name': ctype.replace(" ", "_") } if semiglobal_attribute_get1 != "": final_code += semiglobal_accessor_template % { 'semiglobal_get1' : semiglobal_attribute_get1, 'semiglobal_get2' : semiglobal_attribute_get2, 'semiglobal_set1' : semiglobal_attribute_set1, 'semiglobal_set2' : semiglobal_attribute_set2, 'id_proj': proj.id, 'ctype': ctype, 'ctype_name': ctype.replace(" ", "_") } if global_attribute_get != "": final_code += global_accessor_template % { 'global_get' : global_attribute_get, 'global_set' : global_attribute_set, 'id_proj': proj.id, 'ctype': ctype, 'ctype_name': ctype.replace(" ", "_") } return declare_parameters_variables, final_code
def save_connectivity(self, filename): """ Saves the connectivity of the projection into a file. Only the connectivity matrix, the weights and delays are saved, not the other synaptic variables. The generated data can be used to create a projection in another network:: proj.connect_from_file(filename) * If the file name is '.npz', the data will be saved and compressed using `np.savez_compressed` (recommended). * If the file name ends with '.gz', the data will be pickled into a binary file and compressed using gzip. * If the file name is '.mat', the data will be saved as a Matlab 7.2 file. Scipy must be installed. * Otherwise, the data will be pickled into a simple binary text file using pickle. *Parameters*: * **filename**: file name, may contain relative or absolute path. """ # Check that the network is compiled if not self.initialized: Global._error('save_connectivity(): the network has not been compiled yet.') return # Check if the repertory exist (path, fname) = os.path.split(filename) if not path == '': if not os.path.isdir(path): Global._print('Creating folder', path) os.mkdir(path) extension = os.path.splitext(fname)[1] # Gathering the data data = { 'name': self.name, 'post_ranks': self.post_ranks, 'pre_ranks': self.cyInstance.pre_rank_all(), # was: [self.cyInstance.pre_rank(n) for n in range(self.size)], 'w': self.cyInstance.get_w(), 'delay': self.cyInstance.get_delay() if hasattr(self.cyInstance, 'get_delay') else None, 'max_delay': self.max_delay, 'uniform_delay': self.uniform_delay, 'size': self.size, 'nb_synapses': sum([self.cyInstance.nb_synapses(n) for n in range(self.size)]) } # Save the data try: import cPickle as pickle # Python2 except: import pickle # Python3 if extension == '.gz': Global._print("Saving connectivity in gunzipped binary format...") try: import gzip except: Global._error('gzip is not installed.') return with gzip.open(filename, mode = 'wb') as w_file: try: pickle.dump(data, w_file, protocol=pickle.HIGHEST_PROTOCOL) except Exception as e: Global._print('Error while saving in gzipped binary format.') Global._print(e) return elif extension == '.npz': Global._print("Saving connectivity in Numpy format...") np.savez_compressed(filename, **data ) elif extension == '.mat': Global._print("Saving connectivity in Matlab format...") if data['delay'] is None: data['delay'] = 0 try: import scipy.io as sio sio.savemat(filename, data) except Exception as e: Global._error('Error while saving in Matlab format.') Global._print(e) return else: Global._print("Saving connectivity in text format...") # save in Pythons pickle format with open(filename, mode = 'wb') as w_file: try: pickle.dump(data, w_file, protocol=pickle.HIGHEST_PROTOCOL) except Exception as e: Global._print('Error while saving in text format.') Global._print(e) return return
def __init__(self, pre, post, target, synapse=None, name=None, copied=False): """ *Parameters*: * **pre**: pre-synaptic population (either its name or a ``Population`` object). * **post**: post-synaptic population (either its name or a ``Population`` object). * **target**: type of the connection. * **synapse**: a ``Synapse`` instance. * **name**: unique name of the projection (optional, it defaults to ``proj0``, ``proj1``, etc). By default, the synapse only ensures linear synaptic transmission: * For rate-coded populations: ``psp = w * pre.r`` * For spiking populations: ``g_target += w`` """ # Check if the network has already been compiled if Global._network[0]['compiled'] and not copied: Global._error('you cannot add a projection after the network has been compiled.') # Store the pre and post synaptic populations # the user provide either a string or a population object # in case of string, we need to search for the corresponding object if isinstance(pre, str): for pop in Global._network[0]['populations']: if pop.name == pre: self.pre = pop else: self.pre = pre if isinstance(post, str): for pop in Global._network[0]['populations']: if pop.name == post: self.post = pop else: self.post = post # Store the arguments if isinstance(target, list) and len(target) == 1: self.target = target[0] else: self.target = target # Add the target to the postsynaptic population self.post.targets.append(self.target) # check if a synapse description is attached if not synapse: # No synapse attached assume default synapse based on # presynaptic population. if self.pre.neuron_type.type == 'rate': from ANNarchy.models.Synapses import DefaultRateCodedSynapse self.synapse_type = DefaultRateCodedSynapse() self.synapse_type.type = 'rate' else: from ANNarchy.models.Synapses import DefaultSpikingSynapse self.synapse_type = DefaultSpikingSynapse() self.synapse_type.type = 'spike' elif inspect.isclass(synapse): self.synapse_type = synapse() self.synapse_type.type = self.pre.neuron_type.type else: self.synapse_type = copy.deepcopy(synapse) self.synapse_type.type = self.pre.neuron_type.type # Analyse the parameters and variables self.synapse_type._analyse() # Create a default name self.id = len(Global._network[0]['projections']) if name: self.name = name else: self.name = 'proj'+str(self.id) # Get a list of parameters and variables self.parameters = [] self.init = {} for param in self.synapse_type.description['parameters']: self.parameters.append(param['name']) self.init[param['name']] = param['init'] self.variables = [] for var in self.synapse_type.description['variables']: self.variables.append(var['name']) self.init[var['name']] = var['init'] self.attributes = self.parameters + self.variables # Get a list of user-defined functions self.functions = [func['name'] for func in self.synapse_type.description['functions']] # Add the population to the global network Global._network[0]['projections'].append(self) # Finalize initialization self.initialized = False # Cython instance self.cyInstance = None # Connectivity self._synapses = None self._connection_method = None self._connection_args = None self._connection_delay = None self._connector = None # List of post ranks is full by default, will be changed when the weights are created self.post_ranks = list(range(self.post.size)) # Default configuration for connectivity self._storage_format = "lil" self._storage_order = "post_to_pre" # If a single weight value is used self._single_constant_weight = False # If a dense matrix should be used instead of LIL self._dense_matrix = False # Reporting self.connector_name = "Specific" self.connector_description = "Specific" # Overwritten by derived classes, to add # additional code self._specific_template = {} # Set to false by derived classes to prevent saving of # data, e. g. in case of weight-sharing projections self._saveable = True # To allow case-specific adjustment of parallelization # parameters, e. g. openMP schedule, we introduce a # dictionary read by the ProjectionGenerator. # # Will be overwritten either by inherited classes or # by an omp_config provided to the compile() method. self._omp_config = { #'psp_schedule': 'schedule(dynamic)' }
def determine_idx_type_for_projection(proj): """ The suitable index type depends on the maximum number of neurons in pre-synaptic and post-synaptic layer. Notice (8th June 2021): It appears to a problem for the current Cython version to handle datatypes like "unsigned int". So I decided to replace the unsigned datatypes by an own definition. These definitions are placed in *ANNarchy/generator/Template/PyxTemplate.py* """ # The user disabled this optimization. if Global.config["only_int_idx_type"]: return "int", "int", "int", "int" # Currently only implemented for some cases, # the others default to "old" configuration if proj.synapse_type.type == "spike": return "int", "int", "int", "int" if Global._check_paradigm("cuda"): return "int", "int", "int", "int" if proj._storage_format != "lil" and Global.config["num_threads"] > 1: return "int", "int", "int", "int" # max_size is related to the population sizes. As we use one type for # both dimension we need to determine the maximum pre_size = proj.pre.population.size if isinstance( proj.pre, PopulationView) else proj.pre.size post_size = proj.post.population.size if isinstance( proj.post, PopulationView) else proj.post.size max_size_one_dim = max(pre_size, post_size) max_size_both_dim = pre_size * post_size # For type decision we rely on the C++ boundaries which are decremented by 1 # to allow usage of CSR-like formats without row overflow. if max_size_one_dim < 255: # 1 byte cpp_idx_type = "unsigned char" cython_idx_type = "_ann_uint8" if max_size_both_dim < 255: # can use the same type (should be seldom ...) cpp_size_type = "unsigned char" cython_size_type = "_ann_uint8" else: # next higher data type cpp_size_type = "unsigned short int" cython_size_type = "_ann_uint16" elif max_size_one_dim < 65534: # 2 byte cpp_idx_type = "unsigned short int" cython_idx_type = "_ann_uint16" if max_size_both_dim < 65534: cpp_size_type = "unsigned short int" cython_size_type = "_ann_uint16" else: cpp_size_type = "unsigned int" cython_size_type = "_ann_uint32" elif max_size_one_dim < 4294967294: # 4 byte cpp_idx_type = "unsigned int" cython_idx_type = "_ann_uint32" if max_size_both_dim < 4294967294: cpp_size_type = "unsigned int" cython_size_type = "_ann_uint32" else: cpp_size_type = "unsigned long int" cython_size_type = "_ann_uint64" else: # this is a hypothetical case I guess (HD: 4th June 2021) raise NotImplementedError( "The matrix dimension exceeded the representable size ...") return cpp_idx_type, cython_idx_type, cpp_size_type, cython_size_type
def nb_synapses(self): "Total number of synapses in the projection." if self.cyInstance == None: Global._warning("Access 'nb_synapses' attribute of a Projection is only valid after compile()") return 0 return sum([self.cyInstance.nb_synapses(n) for n in range(self.size)])
def _set_delay(self, value): if self.cyInstance: # After compile() if not hasattr(self.cyInstance, 'get_delay'): if self.max_delay <= 1 and value != Global.config['dt']: Global._error("set_delay: the projection was instantiated without delays, it is too late to create them...") elif self.uniform_delay != -1: current_delay = self.uniform_delay if isinstance(value, (np.ndarray)): if value.size > 1: Global._error("set_delay: the projection was instantiated with uniform delays, it is too late to load non-uniform values...") else: value = max(1, round(value[0]/Global.config['dt'])) elif isinstance(value, (float, int)): value = max(1, round(float(value)/Global.config['dt'])) else: Global._error("set_delay: only float, int or np.array values are possible.") # The new max_delay is higher than before if value > self.max_delay: self.max_delay = value self.uniform_delay = value self.cyInstance.set_delay(value) if isinstance(self.pre, PopulationView): self.pre.population.max_delay = max(self.max_delay, self.pre.population.max_delay) self.pre.population.cyInstance.update_max_delay(self.pre.population.max_delay) else: self.pre.max_delay = max(self.max_delay, self.pre.max_delay) self.pre.cyInstance.update_max_delay(self.pre.max_delay) return else: self.uniform_delay = value self.cyInstance.set_delay(value) else: # variable delays if not isinstance(value, (np.ndarray, list)): Global._error("set_delay with variable delays: you must provide a list of lists of exactly the same size as before.") # Check the number of delays nb_values = sum([len(s) for s in value]) if nb_values != self.nb_synapses: Global._error("set_delay with variable delays: the sizes do not match. You have to provide one value for each existing synapse.") if len(value) != len(self.post_ranks): Global._error("set_delay with variable delays: the sizes do not match. You have to provide one value for each existing synapse.") # Convert to steps if isinstance(value, np.ndarray): delays = [[max(1, round(value[i, j]/Global.config['dt'])) for j in range(value.shape[1])] for i in range(value.shape[0])] else: delays = [[max(1, round(v/Global.config['dt'])) for v in c] for c in value] # Max delay max_delay = max([max(l) for l in delays]) # Send the max delay to the pre population if max_delay > self.max_delay: self.max_delay = max_delay self.cyInstance.update_max_delay(self.max_delay) if isinstance(self.pre, PopulationView): self.pre.population.max_delay = max(self.max_delay, self.pre.population.max_delay) self.pre.population.cyInstance.update_max_delay(self.pre.population.max_delay) else: self.pre.max_delay = max(self.max_delay, self.pre.max_delay) self.pre.cyInstance.update_max_delay(self.pre.max_delay) # Send the new values to the projection self.cyInstance.set_delay(delays) else: # before compile() Global._error("set_delay before compile(): not implemented yet.")
def _check_reserved_names(populations, projections): """ Checks no reserved variable names is redefined """ # Check populations for pop in populations: # Reserved variable names for term in reserved_variables: if term in pop.attributes: Global._print(pop.neuron_type.parameters) Global._print(pop.neuron_type.equations) Global._error(term + ' is a reserved variable name') # Check projections for proj in projections: # Reserved variable names for term in reserved_variables: if term in proj.attributes: Global._print(proj.synapse_type.parameters) Global._print(proj.synapse_type.equations) Global._error(term + ' is a reserved variable name')
def save_connectivity(self, filename): "Not available." Global._warning('Transposed projections can not be saved.')
def _check_locality(populations, projections): """ Checks that a global variable does not depend on local ones. """ for proj in projections: for var in proj.synapse_type.description['variables']: if var['locality'] == 'global': # cannot depend on local or semiglobal variables # Inside the equation for v in var['dependencies']: if _get_locality(v, proj.synapse_type.description) in ['local', 'semiglobal']: Global._print(var['eq']) Global._error('The global variable', var['name'], 'cannot depend on a synapse-specific/post-synaptic one:', v) # As pre/post dependencies deps = var['prepost_dependencies'] if len(deps['pre']) > 0 or len(deps['post']) > 0 : Global._print(proj.synapse_type.equations) Global._error('The global variable', var['name'], 'cannot depend on pre- or post-synaptic variables.') if var['locality'] == 'semiglobal': # cannot depend on pre-synaptic variables # Inside the equation for v in var['dependencies']: if _get_locality(v, proj.synapse_type.description) == 'local': Global._print(var['eq']) Global._error('The postsynaptic variable', var['name'], 'cannot depend on a synapse-specific one:', v) # As pre/post dependencies deps = var['prepost_dependencies'] if len(deps['pre']) > 0 : Global._print(proj.synapse_type.equations) Global._error('The postsynaptic variable', var['name'], 'cannot depend on pre-synaptic ones (e.g. pre.r).')
def _check_prepost(populations, projections): """ Checks that when a synapse uses pre.x r post.x, the variable x exists in the corresponding neuron """ for proj in projections: for dep in proj.synapse_type.description['dependencies']['pre']: if dep.startswith('sum('): target = re.findall(r'\(([\s\w]+)\)', dep)[0].strip() if not target in proj.pre.targets: Global._print(proj.synapse_type.equations) Global._error('The pre-synaptic population ' + proj.pre.name + ' receives no projection with the type ' + target) continue if not dep in proj.pre.attributes: Global._print(proj.synapse_type.equations) Global._error('The pre-synaptic population ' + proj.pre.name + ' has no variable called ' + dep) for dep in proj.synapse_type.description['dependencies']['post']: if dep.startswith('sum('): target = re.findall(r'\(([\s\w]+)\)', dep)[0].strip() if not target in proj.post.targets: Global._print(proj.synapse_type.equations) Global._error('The post-synaptic population ' + proj.post.name + ' receives no projection with the type ' + target) continue if not dep in proj.post.attributes: Global._print(proj.synapse_type.equations) Global._error('The post-synaptic population ' + proj.post.name + ' has no variable called ' + dep)
def receptive_fields(self, variable='w', in_post_geometry=True): "Not available." Global._warning( 'Transposed projections can not display receptive fields.')
def connectivity_matrix(self, fill=0.0): "Not available." Global._warning( 'Transposed projections can not display connectivity matrices.')
def generate_makefile(self): """ Generate the Makefile. The makefile consists of two stages compile the cython wrapper and compile the ANNarchy model files. Both is then linked together to a shared library usable in Python. """ # Compiler if self.compiler == "default": self.compiler = self.user_config['openmp']['compiler'] if self.compiler_flags == "default": self.compiler_flags = self.user_config['openmp']['flags'] # flags are common to all platforms if not self.debug_build: cpu_flags = self.compiler_flags else: cpu_flags = "-O0 -g -D_DEBUG -march=native" if self.profile_enabled: cpu_flags += " -g" #extra_libs.append("-lpapi") # OpenMP flag omp_flag = "" if Global.config['paradigm'] == "openmp": omp_flag = "-fopenmp" # Disable openMP parallel RNG? if Global.config['disable_parallel_rng'] and Global._check_paradigm( "openmp"): cpu_flags += " -D_DISABLE_PARALLEL_RNG " # Cuda Library and Compiler # # hdin (22.03.2016): we should verify in the future, if compute_35 remains as best # configuration for Keplar and upwards. cuda_gen = "" gpu_flags = "" gpu_compiler = "nvcc" gpu_ldpath = "" if sys.platform.startswith( 'linux') and Global.config['paradigm'] == "cuda": cuda_gen = "" # TODO: -arch sm_%(ver)s if self.debug_build: gpu_flags = "-g -G -D_DEBUG" # read the config file for the cuda lib path if 'cuda' in self.user_config.keys(): gpu_compiler = self.user_config['cuda']['compiler'] gpu_ldpath = '-L' + self.user_config['cuda']['path'] + '/lib' gpu_flags += self.user_config['cuda']['flags'] # -Xcompiler expects the arguments seperated by ',' if len(cpu_flags.strip()) > 0: cpu_flags = cpu_flags.replace(" ", ",") cpu_flags += "," # Extra libs from extensions such as opencv libs = self.extra_libs for lib in extra_libs: libs += str(lib) + ' ' # Python environment py_version, py_major, python_include, python_lib, python_libpath, cython = python_environment( ) # Include path to Numpy is not standard on all distributions numpy_include = np.get_include() # ANNarchy default header: sparse matrix formats annarchy_include = ANNarchy.__path__[0] + '/include' # The connector module needs to reload some header files, # ANNarchy.__path__ provides the installation directory path_to_cython_ext = "-I " + ANNarchy.__path__[ 0] + '/core/cython_ext/ -I ' + ANNarchy.__path__[0][:-8] # Create Makefiles depending on the target platform and parallel framework if sys.platform.startswith('linux'): # Linux systems if Global.config['paradigm'] == "cuda": makefile_template = linux_cuda_template else: makefile_template = linux_omp_template elif sys.platform == "darwin": # mac os if self.compiler == 'clang++': makefile_template = osx_clang_template if Global.config[ 'num_threads'] == 1: # clang should report that it does not support openmp omp_flag = "" else: makefile_template = osx_gcc_template else: # Windows: to test.... Global._warning("Compilation on windows is not supported yet.") # Gather all Makefile flags makefile_flags = { 'compiler': self.compiler, 'add_sources': self.add_sources, 'cpu_flags': cpu_flags, 'cuda_gen': cuda_gen, 'gpu_compiler': gpu_compiler, 'gpu_flags': gpu_flags, 'gpu_ldpath': gpu_ldpath, 'openmp': omp_flag, 'extra_libs': libs, 'py_version': py_version, 'py_major': py_major, 'cython': cython, 'python_include': python_include, 'python_lib': python_lib, 'python_libpath': python_libpath, 'numpy_include': numpy_include, 'annarchy_include': annarchy_include, 'net_id': self.net_id, 'cython_ext': path_to_cython_ext } # Write the Makefile to the disk with open( self.annarchy_dir + '/generate/net' + str(self.net_id) + '/Makefile', 'w') as wfile: wfile.write(makefile_template % makefile_flags)
def load(self, filename): "Not available." Global._warning('Transposed projections can not be loaded.')
def _instantiate(net_id, import_id=-1, cuda_config=None, user_config=None): """ After every is compiled, actually create the Cython objects and bind them to the Python ones.""" if Global._profiler: t0 = time.time() # parallel_run(number=x) defines multiple networks (net_id) but only network0 is compiled if import_id < 0: import_id = net_id # subdirectory where the library lies annarchy_dir = Global._network[import_id]['directory'] libname = 'ANNarchyCore' + str(import_id) libpath = annarchy_dir + '/' + libname + '.so' cython_module = load_cython_lib(libname, libpath) Global._network[net_id]['instance'] = cython_module # Set the CUDA device if Global._check_paradigm("cuda"): device = 0 if cuda_config: device = int(cuda_config['device']) elif 'cuda' in user_config['cuda']: device = int(user_config['cuda']['device']) if Global.config['verbose']: Global._print('Setting GPU device', device) cython_module.set_device(device) # Sets the desired number of threads and execute thread placement. # This must be done before any other objects are initialized. if Global._check_paradigm("openmp") and Global.config["num_threads"] > 1: core_list = Global.config['visible_cores'] if core_list != []: # some sanity check if len(core_list) > multiprocessing.cpu_count(): Global._error( "The length of core ids provided to setup() is larger than available number of cores" ) if len(core_list) < Global.config['num_threads']: Global._error( "The list of visible cores should be at least the number of cores." ) if np.amax(np.array(core_list)) > multiprocessing.cpu_count(): Global._error( "At least one of the core ids provided to setup() is larger than available number of cores" ) cython_module.set_number_threads(Global.config['num_threads'], core_list) else: # HD (26th Oct 2020): the current version of psutil only consider one CPU socket # but there is a discussion of adding multi-sockets, so we could # re-add this code later ... """ num_cores = psutil.cpu_count(logical=False) # Check if the number of threads make sense if num_cores < Global.config['num_threads']: Global._warning("The number of threads =", Global.config['num_threads'], "exceeds the number of available physical cores =", num_cores) # ANNarchy should run only on physical cpu cores core_list = np.arange(0, num_cores) """ cython_module.set_number_threads(Global.config['num_threads'], []) if Global.config["num_threads"] > 1: if Global.config['verbose']: Global._print('Running simulation with', Global.config['num_threads'], 'threads.') else: if Global.config['verbose']: Global._print('Running simulation single-threaded.') # Sets the desired computation device for CUDA if Global._check_paradigm("cuda") and (user_config != None): # check if there is a configuration, # otherwise fall back to default device try: dev_id = int(user_config['cuda']['device']) except KeyError: dev_id = 0 cython_module.set_device(dev_id) # Configure seeds for random number generators # Required for state updates and also (in future) construction of connectivity if Global.config['seed'] == -1: seed = time.time() else: seed = Global.config['seed'] if not Global.config['disable_parallel_rng']: cython_module.set_seed(seed, Global.config['num_threads'], Global.config['use_seed_seq']) else: cython_module.set_seed(seed, 1, Global.config['use_seed_seq']) # Bind the py extensions to the corresponding python objects for pop in Global._network[net_id]['populations']: if Global.config['verbose']: Global._print('Creating population', pop.name) if Global.config['show_time']: t0 = time.time() # Instantiate the population pop._instantiate(cython_module) if Global.config['show_time']: Global._print('Creating', pop.name, 'took', (time.time() - t0) * 1000, 'milliseconds') # Instantiate projections for proj in Global._network[net_id]['projections']: if Global.config['verbose']: Global._print('Creating projection from', proj.pre.name, 'to', proj.post.name, 'with target="', proj.target, '"') if Global.config['show_time']: t0 = time.time() # Create the projection proj._instantiate(cython_module) if Global.config['show_time']: Global._print('Creating the projection took', (time.time() - t0) * 1000, 'milliseconds') # Finish to initialize the network cython_module.pyx_create(Global.config['dt']) # Set the user-defined constants for obj in Global._objects['constants']: getattr(cython_module, '_set_' + obj.name)(obj.value) # Transfer initial values for pop in Global._network[net_id]['populations']: if Global.config['verbose']: Global._print('Initializing population', pop.name) pop._init_attributes() for proj in Global._network[net_id]['projections']: if Global.config['verbose']: Global._print('Initializing projection', proj.name, 'from', proj.pre.name, 'to', proj.post.name, 'with target="', proj.target, '"') proj._init_attributes() # The rng dist must be initialized after the pops and projs are created! if Global._check_paradigm("openmp"): cython_module.pyx_init_rng_dist() # Start the monitors for monitor in Global._network[net_id]['monitors']: monitor._init_monitoring() if Global._profiler: t1 = time.time() Global._profiler.add_entry(t0, t1, "instantiate()", "compile")
def step(self): """ Performs a single simulation step (duration = ``dt``). """ Global.step(self.id)
def __init__(self, parameters="", equations="", psp=None, operation='sum', pre_spike=None, post_spike=None, pre_axon_spike=None, functions=None, pruning=None, creating=None, name=None, description=None, extra_values={}): """ :param parameters: parameters of the neuron and their initial value. :param equations: equations defining the temporal evolution of variables. :param psp: continuous influence of a single synapse on the post-synaptic neuron (default for rate-coded: ``w*pre.r``). Synaptic transmission in spiking synapses occurs in ``pre_spike``. :param operation: operation (sum, max, min, mean) performed by the post-synaptic neuron on the individual psp (rate-coded only, default=sum). :param pre_spike: updating of variables when a pre-synaptic spike is received (spiking only). :param post_spike: updating of variables when a post-synaptic spike is emitted (spiking only). :param pre_axon_spike: updating of variables when an axonal spike was emitted (spiking only, default None). The usage of this arguments prevents the application of learning rules. :param functions: additional functions used in the equations. :param name: name of the synapse type (used for reporting only). :param description: short description of the synapse type (used for reporting). """ # Store the parameters and equations self.parameters = parameters self.equations = equations self.functions = functions self.pre_spike = pre_spike self.post_spike = post_spike self.psp = psp self.pre_axon_spike = pre_axon_spike self.operation = operation self.extra_values = extra_values self.pruning = pruning self.creating = creating # Type of the synapse TODO: smarter self.type = 'spike' if pre_spike else 'rate' # Check the operation if self.type == 'spike' and self.operation != 'sum': Global._error( 'Spiking synapses can only perform a sum of presynaptic potentials.' ) if not self.operation in ['sum', 'min', 'max', 'mean']: Global._error( 'The only operations permitted are: sum (default), min, max, mean.' ) # Sanity check if self.pre_axon_spike and self.post_spike: Global._error( "The usage of axonal spike events is currently not allowed for plastic connections." ) if (self.pruning or self.creating) and not Global.config['structural_plasticity']: Global._error( '"structural_plasticity" has not been set to True in setup(), pruning or creating statements in Synapse() would be without effect.' ) # Description self.description = None # Reporting if not hasattr(self, '_instantiated'): # User-defined Global._objects['synapses'].append(self) elif len(self._instantiated) == 0: # First instantiation of the class Global._objects['synapses'].append(self) self._rk_synapses_type = len(Global._objects['synapses']) if name: self.name = name else: self.name = self._default_names[self.type] if description: self.short_description = description else: if self.type == 'spike': self.short_description = "User-defined spiking synapse." else: self.short_description = "User-defined rate-coded synapse."
def _generate_spiking(self): """ Generates the transpose projection for spiking models. TODO: openMP """ if Global.config["num_threads"] > 1: Global._error( 'TransposeProjection for spiking projections is only available for single-thread yet ...' ) # Which projection is transposed self._specific_template['struct_additional'] = """ extern ProjStruct%(fwd_id_proj)s proj%(fwd_id_proj)s; // Forward projection """ % { 'fwd_id_proj': self.fwd_proj.id } # Connectivity self._specific_template[ 'declare_connectivity_matrix'] = "" # reuse fwd proj data self._specific_template['access_connectivity_matrix'] = """ std::vector<int> get_post_rank() { return proj%(fwd_id_proj)s.inv_post_rank; } size_t nb_synapses() { size_t size = 0; for (auto it = proj%(fwd_id_proj)s.inv_pre_rank.cbegin(); it != proj%(fwd_id_proj)s.inv_pre_rank.cend(); it++) { size += (it->second).size(); } return size; } int nb_dendrites() { return proj%(fwd_id_proj)s.inv_post_rank.size(); } int dendrite_size(int lil_idx) { int post_rank = proj%(fwd_id_proj)s.inv_post_rank[lil_idx]; return proj%(fwd_id_proj)s.inv_pre_rank[post_rank].size(); } """ % { 'fwd_id_proj': self.fwd_proj.id } self._specific_template['export_connector_call'] = "" self._specific_template['export_connectivity'] = """ size_t nb_synapses() int nb_dendrites() int dendrite_size(int) vector[int] get_post_rank() """ self._specific_template['wrapper_init_connectivity'] = """ pass """ self._specific_template['wrapper_access_connectivity'] = """ def post_rank(self): return proj%(id_proj)s.get_post_rank() def nb_synapses(self): return proj%(id_proj)s.nb_synapses() def nb_dendrites(self): return proj%(id_proj)s.nb_dendrites() def dendrite_size(self, lil_idx): return proj%(id_proj)s.dendrite_size(lil_idx) """ % { 'id_proj': self.id } self._specific_template['wrapper_connector_call'] = "" # The weight index depends on the # weight of the forward projection if self.fwd_proj._has_single_weight(): weight_index = "" else: weight_index = "[post_idx][syn_idx]" # Computation self._specific_template['psp_prefix'] = "" self._specific_template['psp_code'] = """ for (auto it = pop%(id_pre)s.spiked.cbegin(); it != pop%(id_pre)s.spiked.cend(); it++) { auto pos_it = std::find(proj%(fwd_id_proj)s.post_rank.cbegin(), proj%(fwd_id_proj)s.post_rank.cend(), *it); if (pos_it == proj%(fwd_id_proj)s.post_rank.end()) continue; auto post_idx = std::distance(proj%(fwd_id_proj)s.post_rank.cbegin(), pos_it); for (int syn_idx = 0; syn_idx < proj%(fwd_id_proj)s.pre_rank[post_idx].size(); syn_idx++) { auto pre_idx = proj%(fwd_id_proj)s.pre_rank[post_idx][syn_idx]; pop%(id_post)s.g_%(target)s[pre_idx] += proj%(fwd_id_proj)s.w%(weight_index)s; } } """ % { 'id_pre': self.pre.id, 'id_post': self.post.id, 'target': self.target, 'fwd_id_proj': self.fwd_proj.id, 'weight_index': weight_index } # transpose means we use the forward view of the target matrix # # suppress monitor self._specific_template['monitor_export'] = "" self._specific_template['monitor_wrapper'] = "" self._specific_template['monitor_class'] = "" self._specific_template['pyx_wrapper'] = "" # Others self._specific_template['size_in_bytes'] = "//TODO:" self._specific_template['clear'] = "//TODO:"