def test_check_weight_with_scalar(): assert_equal(4.3, common.check_weight(4.3, 'excitatory', is_conductance=True)) assert_equal(4.3, common.check_weight(4.3, 'excitatory', is_conductance=False)) assert_equal(4.3, common.check_weight(4.3, 'inhibitory', is_conductance=True)) assert_equal(-4.3, common.check_weight(-4.3, 'inhibitory', is_conductance=False)) assert_equal(common.DEFAULT_WEIGHT, common.check_weight(None, 'excitatory', is_conductance=True)) assert_raises(errors.InvalidWeightError, common.check_weight, 4.3, 'inhibitory', is_conductance=False) assert_raises(errors.InvalidWeightError, common.check_weight, -4.3, 'inhibitory', is_conductance=True) assert_raises(errors.InvalidWeightError, common.check_weight, -4.3, 'excitatory', is_conductance=True) assert_raises(errors.InvalidWeightError, common.check_weight, -4.3, 'excitatory', is_conductance=False)
def test_check_weight_with_list(): w = range(10) assert_equal( w, common.check_weight(w, 'excitatory', is_conductance=True).tolist()) assert_equal( w, common.check_weight(w, 'excitatory', is_conductance=False).tolist()) assert_equal( w, common.check_weight(w, 'inhibitory', is_conductance=True).tolist()) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'inhibitory', is_conductance=False) w = range(-10, 0) assert_equal( w, common.check_weight(w, 'inhibitory', is_conductance=False).tolist()) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'inhibitory', is_conductance=True) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'excitatory', is_conductance=True) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'excitatory', is_conductance=False) w = range(-5, 5) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'excitatory', is_conductance=True) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'excitatory', is_conductance=False) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'inhibitory', is_conductance=True) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'inhibitory', is_conductance=False)
def connect(self, projection): """Connect-up a Projection.""" if isinstance(projection.rng, random.NativeRNG): raise Exception("Warning: use of NativeRNG not implemented.") for target in projection.post.local_cells.flat: # pick n neurons at random if hasattr(self, 'rand_distr'): n = self.rand_distr.next() else: n = self.n candidates = projection.pre.all_cells.flatten().tolist() if not self.allow_self_connections and projection.pre == projection.post: candidates.remove(target) sources = [] while len(sources) < n: # if the number of requested cells is larger than the size of the # presynaptic population, we allow multiple connections for a given cell sources += [candidates[candidates.index(id)] for id in projection.rng.permutation(candidates)[0:n]] # have to use index() because rng.permutation returns ints, not ID objects sources = sources[:n] weights = self.get_weights(n) is_conductance = common.is_conductance(projection.post.index(0)) weights = common.check_weight(weights, projection.synapse_type, is_conductance) delays = self.get_delays(n) projection.connection_manager.convergent_connect(sources, [target], weights, delays)
def test_check_weight_with_list(): w = range(10) assert_equal(w, common.check_weight(w, 'excitatory', is_conductance=True).tolist()) assert_equal(w, common.check_weight(w, 'excitatory', is_conductance=False).tolist()) assert_equal(w, common.check_weight(w, 'inhibitory', is_conductance=True).tolist()) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'inhibitory', is_conductance=False) w = range(-10,0) assert_equal(w, common.check_weight(w, 'inhibitory', is_conductance=False).tolist()) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'inhibitory', is_conductance=True) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'excitatory', is_conductance=True) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'excitatory', is_conductance=False) w = range(-5,5) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'excitatory', is_conductance=True) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'excitatory', is_conductance=False) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'inhibitory', is_conductance=True) assert_raises(errors.InvalidWeightError, common.check_weight, w, 'inhibitory', is_conductance=False)
def test_check_weight_with_NaN(): w = numpy.arange(10.0) w[0] = numpy.nan assert_arrays_equal( w[1:], common.check_weight( w, 'excitatory', is_conductance=True)[1:]) # NaN != NaN by definition
def _probabilistic_connect(self, projection, p): """ Connect-up a Projection with connection probability p, where p may be either a float 0<=p<=1, or a dict containing a float array for each pre-synaptic cell, the array containing the connection probabilities for all the local targets of that pre-synaptic cell. """ if isinstance(projection.rng, random.NativeRNG): raise Exception("Use of NativeRNG not implemented.") else: rng = projection.rng local = projection.post._mask_local.flatten() is_conductance = common.is_conductance(projection.post.index(0)) for src in projection.pre.all(): # ( the following two lines are a nice idea, but this needs some thought for # the parallel case, to ensure reproducibility when varying the number # of processors # N = rng.binomial(npost,self.p_connect,1)[0] # targets = sample(postsynaptic_neurons, N) # ) N = projection.post.size # if running in parallel, rng.next(N) will not return N values, but only # as many as are needed on this node, as determined by mask_local. # Over the simulation as a whole (all nodes), N values will indeed be # returned. rarr = rng.next(N, 'uniform', (0, 1), mask_local=local) if not common.is_listlike(rarr) and common.is_number( rarr): # if N=1, rarr will be a single number rarr = numpy.array([rarr]) if common.is_number(p): create = rarr < p else: create = rarr < p[src][local] if create.shape != projection.post.local_cells.shape: logger.warning( "Too many random numbers. Discarding the excess. Did you specify MPI rank and number of processes when you created the random number generator?" ) create = create[:projection.post.local_cells.size] targets = projection.post.local_cells[create].tolist() weights = self.get_weights(N, local)[create] weights = common.check_weight(weights, projection.synapse_type, is_conductance) delays = self.get_delays(N, local)[create] if not self.allow_self_connections and projection.pre == projection.post and src in targets: assert len(targets) == len(weights) == len(delays) i = targets.index(src) weights = numpy.delete(weights, i) delays = numpy.delete(delays, i) targets.remove(src) if len(targets) > 0: projection.connection_manager.connect(src, targets, weights, delays)
def connect(self, projection): """Connect-up a Projection.""" if isinstance(projection.rng, random.NativeRNG): raise Exception("Warning: use of NativeRNG not implemented.") for source in projection.pre.all_cells.flat: # pick n neurons at random if hasattr(self, 'rand_distr'): n = self.rand_distr.next() else: n = self.n candidates = projection.post.all_cells.flatten().tolist() if not self.allow_self_connections and projection.pre == projection.post: candidates.remove(source) targets = [] while len( targets ) < n: # if the number of requested cells is larger than the size of the # postsynaptic population, we allow multiple connections for a given cell targets += [ candidates[candidates.index(id)] for id in projection.rng.permutation(candidates)[0:n] ] # have to use index() because rng.permutation returns ints, not ID objects targets = numpy.array(targets[:n], dtype=common.IDMixin) weights = self.get_weights(n) is_conductance = common.is_conductance(projection.post.index(0)) weights = common.check_weight(weights, projection.synapse_type, is_conductance) delays = self.get_delays(n) #local = numpy.array([tgt.local for tgt in targets]) #if local.size > 0: # targets = targets[local] # weights = weights[local] # delays = delays[local] targets = targets.tolist() #print common.rank(), source, targets if len(targets) > 0: projection.connection_manager.connect(source, targets, weights, delays)
def connect(self, projection): """Connect-up a Projection.""" if projection.pre.dim == projection.post.dim: N = projection.post.size local = projection.post._mask_local.flatten() weights = self.get_weights(N, local) is_conductance = common.is_conductance(projection.post.index(0)) weights = common.check_weight(weights, projection.synapse_type, is_conductance) delays = self.get_delays(N, local) for tgt, w, d in zip(projection.post.local_cells, weights, delays): src = projection.pre.index(projection.post.id_to_index(tgt)) # the float is in case the values are of type numpy.float64, which NEST chokes on projection.connection_manager.connect(src, [tgt], float(w), float(d)) else: raise common.InvalidDimensionsError( "OneToOneConnector does not support presynaptic and postsynaptic Populations of different sizes." )
def connect(self, projection): """Connect-up a Projection.""" # Timers global rank timer0 = 0.0 timer1 = 0.0 timer2 = 0.0 timer3 = 0.0 timer4 = 0.0 # Recuperate variables # n = self.n dist_factor = self.dist_factor noise_factor = self.noise_factor # Do some checking # assert dist_factor >= 0 assert noise_factor >= 0 if isinstance(n, int): assert n >= 0 else: raise Exception("n must be an integer.") # Get posts and pres # listPostIDs = projection.post.local_cells listPreIDs = projection.pre.all_cells countPost = len(listPostIDs) countPre = len(listPreIDs) listPreIndexes = numpy.arange(countPre) listPostIndexes = map(projection.post.id_to_index, listPostIDs) # Prepare all distances # allDistances = self.space.distances(projection.post.positions,projection.pre.positions) # Get weights # weights = numpy.empty(n) weights[:] = self.weights is_conductance = common.is_conductance(projection.post[listPostIndexes[0]]) weights = common.check_weight(weights, projection.synapse_type, is_conductance) numpy.random.seed(12345) for i in xrange(len(listPostIDs)): currentPostIndex = listPostIndexes[i] currentPostID = listPostIDs[i] #currentPostIDAsList = [currentPostID] # Pick n neurons at random in pre population myTimer = time.time() chosenPresIndexes = list(numpy.random.permutation(numpy.arange(countPre))[0:n]) chosenPresIDs = list(projection.pre[chosenPresIndexes].all_cells) #if rank==0: # print(chosenPresIDs) #chosenPresIDs = chosenPresIDs.tolist() timer0 += time.time() - myTimer # Get distances myTimer = time.time() #distances = allDistances[currentPostIndex,chosenPresIndexes] distances = allDistances[currentPostIndex,chosenPresIndexes] timer1 += time.time() - myTimer # Generate gamme noise noise = numpy.random.gamma(1.0, noise_factor, n) # Create delays with distance and noise myTimer = time.time() delays = dist_factor * distances * (1.0+noise) timer2 += time.time() - myTimer #delays[:] = 1.0 # Check for small and big delays myTimer = time.time() delaysClipped = numpy.clip(delays,sim.get_min_delay(),sim.get_max_delay()) howManyClipped = len((delays != delaysClipped).nonzero()[0]) if (howManyClipped > 1): print("Warning: %d of %d delays were cliped because they were either bigger than the max delay or lower than the min delay." % (howManyClipped, n)) delaysClipped = delaysClipped.tolist() timer3 += time.time() - myTimer # Connect everything up yTimer = time.time() projection._convergent_connect(chosenPresIDs, currentPostID, weights, delaysClipped) timer4 += time.time() - myTimer # Print timings if rank==0: print("\033[2;46m" + ("Timer 0: %5.4f seconds" % timer0).ljust(60) + "\033[m") print("\033[2;46m" + ("Timer 1: %5.4f seconds" % timer1).ljust(60) + "\033[m") print("\033[2;46m" + ("Timer 2: %5.4f seconds" % timer2).ljust(60) + "\033[m") print("\033[2;46m" + ("Timer 3: %5.4f seconds" % timer3).ljust(60) + "\033[m") print("\033[2;46m" + ("Timer 4: %5.4f seconds" % timer4).ljust(60) + "\033[m")
def connect(self, projection): """Connect-up a Projection.""" # Timers global rank timer0 = 0.0 timer1 = 0.0 timer2 = 0.0 timer3 = 0.0 timer4 = 0.0 # Recuperate variables # n = self.n dist_factor = self.dist_factor noise_factor = self.noise_factor # Do some checking # assert dist_factor >= 0 assert noise_factor >= 0 if isinstance(n, int): assert n >= 0 else: raise Exception("n must be an integer.") # Get posts and pres # listPostIDs = projection.post.local_cells listPreIDs = projection.pre.all_cells countPost = len(listPostIDs) countPre = len(listPreIDs) listPreIndexes = numpy.arange(countPre) listPostIndexes = map(projection.post.id_to_index, listPostIDs) # Prepare all distances # allDistances = self.space.distances(projection.post.positions, projection.pre.positions) # Get weights # weights = numpy.empty(n) weights[:] = self.weights is_conductance = common.is_conductance(projection.post[listPostIndexes[0]]) weights = common.check_weight(weights, projection.synapse_type, is_conductance) for i in xrange(len(listPostIDs)): currentPostIndex = listPostIndexes[i] currentPostID = listPostIDs[i] #currentPostIDAsList = [currentPostID] # Pick n neurons at random in pre population myTimer = time.time() chosenPresIndexes = list(numpy.random.permutation(numpy.arange(countPre))[0:n]) chosenPresIDs = list(projection.pre[chosenPresIndexes].all_cells) #if rank==0: # print(chosenPresIDs) #chosenPresIDs = chosenPresIDs.tolist() timer0 += time.time() - myTimer # Get distances myTimer = time.time() #distances = allDistances[currentPostIndex,chosenPresIndexes] distances = allDistances[currentPostIndex, chosenPresIndexes] timer1 += time.time() - myTimer # Generate gamme noise noise = numpy.random.gamma(1.0, noise_factor, n) # Create delays with distance and noise myTimer = time.time() delays = dist_factor * distances * (1.0 + noise) timer2 += time.time() - myTimer #delays[:] = 1.0 # Check for small and big delays myTimer = time.time() delaysClipped = numpy.clip(delays, common.get_min_delay(), common.get_max_delay()) howManyClipped = len((delays != delaysClipped).nonzero()[0]) if (howManyClipped > 1): print("Warning: %d of %d delays were cliped because they were either bigger than the max delay or lower than the min delay." % (howManyClipped, n)) delaysClipped = delaysClipped.tolist() timer3 += time.time() - myTimer # Connect everything up yTimer = time.time() projection._convergent_connect(chosenPresIDs, currentPostID, weights, delaysClipped) timer4 += time.time() - myTimer # Print timings if rank == 0: print("\033[2;46m" + ("Timer 0: %5.4f seconds" % timer0).ljust(60) + "\033[m") print("\033[2;46m" + ("Timer 1: %5.4f seconds" % timer1).ljust(60) + "\033[m") print("\033[2;46m" + ("Timer 2: %5.4f seconds" % timer2).ljust(60) + "\033[m") print("\033[2;46m" + ("Timer 3: %5.4f seconds" % timer3).ljust(60) + "\033[m") print("\033[2;46m" + ("Timer 4: %5.4f seconds" % timer4).ljust(60) + "\033[m")
def test_check_weight_is_conductance_is_None(): # need to check that a log message was created assert_equal(4.3, common.check_weight(4.3, 'excitatory', is_conductance=None))
def test_check_weight_with_NaN(): w = numpy.arange(10.0) w[0] = numpy.nan assert_arrays_equal(w[1:], common.check_weight(w, 'excitatory', is_conductance=True)[1:]) # NaN != NaN by definition