def __call__(self, pre_size, post_size=None): self.num_pre = utils.size2len(pre_size) if post_size is not None: try: assert pre_size == post_size except AssertionError: raise errors.ModelUseError( f'The shape of pre-synaptic group should be the same with the post group. ' f'But we got {pre_size} != {post_size}.') self.num_post = utils.size2len(post_size) else: self.num_post = self.num_pre if len(pre_size) == 1: height, width = pre_size[0], 1 elif len(pre_size) == 2: height, width = pre_size else: raise errors.ModelUseError( 'Currently only support two-dimensional geometry.') conn_i = [] conn_j = [] for row in range(height): res = _grid_n(height=height, width=width, row=row, n=self.N, include_self=self.include_self) conn_i.extend(res[0]) conn_j.extend(res[1]) self.pre_ids = ops.as_tensor(conn_i) self.post_ids = ops.as_tensor(conn_j) return self
def __call__(self, pre_size, post_size): self.num_pre = num_pre = utils.size2len(pre_size) self.num_post = num_post = utils.size2len(post_size) assert len(pre_size) == 2 assert len(post_size) == 2 pre_height, pre_width = pre_size post_height, post_width = post_size # get the connections i, j, p = [], [], [] # conn_i, conn_j, probabilities for pre_i in range(num_pre): a = _gaussian_prob(pre_i=pre_i, pre_width=pre_width, pre_height=pre_height, num_post=num_post, post_width=post_width, post_height=post_height, p_min=self.p_min, sigma=self.sigma, normalize=self.normalize, include_self=self.include_self) i.extend(a[0]) j.extend(a[1]) p.extend(a[2]) p = np.asarray(p, dtype=np.float_) selected_idxs = np.where(np.random.random(len(p)) < p)[0] i = np.asarray(i, dtype=np.int_)[selected_idxs] j = np.asarray(j, dtype=np.int_)[selected_idxs] self.pre_ids = ops.as_tensor(i) self.post_ids = ops.as_tensor(j) return self
def __call__(self, pre_size, post_size): num_pre = utils.size2len(pre_size) num_post = utils.size2len(post_size) self.num_pre = num_pre self.num_post = num_post assert len(pre_size) == 2 assert len(post_size) == 2 pre_height, pre_width = pre_size post_height, post_width = post_size # get the connections and weights i, j, w = [], [], [] for pre_i in range(num_pre): a = _gaussian_weight(pre_i=pre_i, pre_width=pre_width, pre_height=pre_height, num_post=num_post, post_width=post_width, post_height=post_height, w_max=self.w_max, w_min=self.w_min, sigma=self.sigma, normalize=self.normalize, include_self=self.include_self) i.extend(a[0]) j.extend(a[1]) w.extend(a[2]) pre_ids = np.asarray(i, dtype=np.int_) post_ids = np.asarray(j, dtype=np.int_) w = np.asarray(w, dtype=np.float_) self.pre_ids = ops.as_tensor(pre_ids) self.post_ids = ops.as_tensor(post_ids) self.weights = ops.as_tensor(w) return self
def __call__(self, pre_size, post_size=None): num_node = utils.size2len(pre_size) assert num_node == utils.size2len(post_size) self.num_pre = self.num_post = num_node if self.m < 1 or num_node < self.m: raise ValueError( f"Must have m>1 and m<n, while m={self.m} and n={num_node}") # add m initial nodes (m0 in barabasi-speak) conn = np.zeros((num_node, num_node), dtype=bool) repeated_nodes = list(range( self.m)) # list of existing nodes to sample from # with nodes repeated once for each adjacent edge source = self.m # next node is m while source < num_node: # Now add the other n-1 nodes possible_targets = _random_subset(repeated_nodes, self.m, self.rng) # do one preferential attachment for new node target = possible_targets.pop() conn[source, target] = True if not self.directed: conn[target, source] = True repeated_nodes.append( target) # add one node to list for each new link count = 1 while count < self.m: # add m-1 more new links if self.rng.random() < self.p: # clustering step: add triangle neighbors = np.where(conn[target])[0] neighborhood = [ nbr for nbr in neighbors if not conn[source, nbr] and not nbr == source ] if neighborhood: # if there is a neighbor without a link nbr = self.rng.choice(neighborhood) conn[source, nbr] = True # add triangle if not self.directed: conn[nbr, source] = True repeated_nodes.append(nbr) count = count + 1 continue # go to top of while loop # else do preferential attachment step if above fails target = possible_targets.pop() conn[source, target] = True if not self.directed: conn[target, source] = True repeated_nodes.append(target) count = count + 1 repeated_nodes.extend([source] * self.m) # add source node to list m times source += 1 self.conn_mat = ops.as_tensor(conn) pre_ids, post_ids = np.where(conn) pre_ids = np.ascontiguousarray(pre_ids) post_ids = np.ascontiguousarray(post_ids) self.pre_ids = ops.as_tensor(pre_ids) self.post_ids = ops.as_tensor(post_ids) return self
def __call__(self, pre_size, post_size=None): num_node = utils.size2len(pre_size) assert num_node == utils.size2len(post_size) self.num_pre = self.num_post = num_node if self.m1 < 1 or self.m1 >= num_node: raise ValueError( f"Dual Barabási–Albert network must have m1 >= 1 and m1 < num_node, " f"while m1 = {self.m1} and num_node = {num_node}.") if self.m2 < 1 or self.m2 >= num_node: raise ValueError( f"Dual Barabási–Albert network must have m2 >= 1 and m2 < num_node, " f"while m2 = {self.m2} and num_node = {num_node}.") if self.p < 0 or self.p > 1: raise ValueError( f"Dual Barabási–Albert network must have 0 <= p <= 1, while p = {self.p}" ) # Add max(m1,m2) initial nodes (m0 in barabasi-speak) conn = np.zeros((num_node, num_node), dtype=bool) # Target nodes for new edges targets = list(range(max(self.m1, self.m2))) # List of existing nodes, with nodes repeated once for each adjacent edge repeated_nodes = [] # Start adding the remaining nodes. source = max(self.m1, self.m2) # Pick which m to use first time (m1 or m2) if self.rng.random() < self.p: m = self.m1 else: m = self.m2 while source < num_node: # Add edges to m nodes from the source. origins = [source] * m conn[origins, targets] = True if not self.directed: conn[targets, origins] = True # Add one node to the list for each new edge just created. repeated_nodes.extend(targets) # And the new node "source" has m edges to add to the list. repeated_nodes.extend([source] * m) # Pick which m to use next time (m1 or m2) if self.rng.random() < self.p: m = self.m1 else: m = self.m2 # Now choose m unique nodes from the existing nodes # Pick uniformly from repeated_nodes (preferential attachment) targets = _random_subset(repeated_nodes, m, self.rng) source += 1 self.conn_mat = ops.as_tensor(conn) pre_ids, post_ids = np.where(conn) pre_ids = np.ascontiguousarray(pre_ids) post_ids = np.ascontiguousarray(post_ids) self.pre_ids = ops.as_tensor(pre_ids) self.post_ids = ops.as_tensor(post_ids) return self
def __call__(self, pre_size, post_size): pre_len = utils.size2len(pre_size) post_len = utils.size2len(post_size) self.num_pre = pre_len self.num_post = post_len mat = np.ones((pre_len, post_len)) if not self.include_self: np.fill_diagonal(mat, 0) pre_ids, post_ids = np.where(mat > 0) self.pre_ids = ops.as_tensor(np.ascontiguousarray(pre_ids)) self.post_ids = ops.as_tensor(np.ascontiguousarray(post_ids)) self.conn_mat = ops.as_tensor(mat) return self
def pre_slice(i, j, num_pre=None): """Get post slicing connections by pre-synaptic ids. Parameters ---------- i : list, np.ndarray The pre-synaptic neuron indexes. j : list, np.ndarray The post-synaptic neuron indexes. num_pre : int The number of the pre-synaptic neurons. Returns ------- conn : list The conn list of post2syn. """ # check if len(i) != len(j): raise errors.ModelUseError( 'The length of "i" and "j" must be the same.') if num_pre is None: print( 'WARNING: "num_pre" is not provided, the result may not be accurate.' ) num_pre = i.max() # pre2post connection pre2post_list = [[] for _ in range(num_pre)] for pre_id, post_id in zip(i, j): pre2post_list[pre_id].append(post_id) pre_ids, post_ids = [], [] for pre_i, posts in enumerate(pre2post_list): post_ids.extend(posts) pre_ids.extend([pre_i] * len(posts)) post_ids = ops.as_tensor(post_ids, dtype=ops.int) pre_ids = ops.as_tensor(pre_ids, dtype=ops.int) # pre2post slicing slicing = [] start = 0 for posts in pre2post_list: end = start + len(posts) slicing.append([start, end]) start = end slicing = ops.as_tensor(slicing, dtype=ops.int) return pre_ids, post_ids, slicing
def ramp_input(c_start, c_end, duration, t_start=0, t_end=None, dt=None): """Get the gradually changed input current. Parameters ---------- c_start : float The minimum (or maximum) current size. c_end : float The maximum (or minimum) current size. duration : int, float The total duration. t_start : float The ramped current start time-point. t_end : float The ramped current end time-point. Default is the None. dt : float, int, optional The numerical precision. Returns ------- current_and_duration : tuple (The formatted current, total duration) """ dt = backend.get_dt() if dt is None else dt t_end = duration if t_end is None else t_end current = ops.zeros(int(math.ceil(duration / dt))) p1 = int(math.ceil(t_start / dt)) p2 = int(math.ceil(t_end / dt)) current[p1: p2] = ops.as_tensor(np.linspace(c_start, c_end, p2 - p1)) return current
def post2syn(j, num_post=None): """Get post2syn connections from `i` and `j` indexes. Parameters ---------- j : list, np.ndarray The post-synaptic neuron indexes. num_post : int The number of the post-synaptic neurons. Returns ------- conn : list The conn list of post2syn. """ if num_post is None: print( 'WARNING: "num_post" is not provided, the result may not be accurate.' ) num_post = j.max() post2syn_list = [[] for _ in range(num_post)] for syn_id, post_id in enumerate(j): post2syn_list[post_id].append(syn_id) post2syn_list = [ops.as_tensor(l, dtype=ops.int) for l in post2syn_list] if _numba_backend(): post2syn_list_nb = nb.typed.List() for pre_ids in post2syn_list: post2syn_list_nb.append(pre_ids) post2syn_list = post2syn_list_nb return post2syn_list
def __call__(self, pre_size, post_size): num_pre, num_post = utils.size2len(pre_size), utils.size2len(post_size) self.num_pre, self.num_post = num_pre, num_post num = self.num if isinstance(self.num, int) else int(self.num * num_pre) assert num <= num_pre, f'"num" must be less than "num_pre", but got {num} > {num_pre}' prob_mat = self.rng.random(size=(num_pre, num_post)) if not self.include_self: np.fill_diagonal(prob_mat, 1.) arg_sort = np.argsort(prob_mat, axis=0)[:num] pre_ids = np.asarray(np.concatenate(arg_sort), dtype=np.int_) post_ids = np.asarray(np.repeat(np.arange(num_post), num_pre), dtype=np.int_) self.pre_ids = ops.as_tensor(pre_ids) self.post_ids = ops.as_tensor(post_ids) return self
def __init__(self, size, delay_time): if isinstance(size, int): size = (size, ) self.size = tuple(size) self.delay_time = delay_time if isinstance(delay_time, (int, float)): self.uniform_delay = True self.delay_num_step = int(math.ceil( delay_time / backend.get_dt())) + 1 self.delay_data = ops.zeros((self.delay_num_step, ) + self.size) else: if not len(self.size) == 1: raise NotImplementedError( f'Currently, BrainPy only supports 1D heterogeneous delays, while does ' f'not implement the heterogeneous delay with {len(self.size)}-dimensions.' ) self.num = size2len(size) if isinstance(delay_time, type(ops.as_tensor([1]))): assert ops.shape(delay_time) == self.size elif callable(delay_time): delay_time2 = ops.zeros(size) for i in range(size[0]): delay_time2[i] = delay_time() delay_time = delay_time2 else: raise NotImplementedError( f'Currently, BrainPy does not support delay type ' f'of {type(delay_time)}: {delay_time}') self.uniform_delay = False delay = delay_time / backend.get_dt() dint = ops.as_tensor(delay_time / backend.get_dt(), dtype=int) ddiff = (delay - dint) >= 0.5 self.delay_num_step = ops.as_tensor(delay + ddiff, dtype=int) + 1 self.delay_data = ops.zeros((max(self.delay_num_step), ) + size) self.diag = ops.arange(self.num) self.delay_in_idx = self.delay_num_step - 1 if self.uniform_delay: self.delay_out_idx = 0 else: self.delay_out_idx = ops.zeros(self.num, dtype=int) self.name = None
def mat2ij(conn_mat): """Get the i-j connections from connectivity matrix. Parameters ---------- conn_mat : np.ndarray Connectivity matrix with `(num_pre, num_post)` shape. Returns ------- conn_tuple : tuple (Pre-synaptic neuron indexes, post-synaptic neuron indexes). """ if len(ops.shape(conn_mat)) != 2: raise errors.ModelUseError('Connectivity matrix must be in the ' 'shape of (num_pre, num_post).') pre_ids, post_ids = ops.where(conn_mat > 0) return ops.as_tensor(pre_ids, dtype=ops.int), \ ops.as_tensor(post_ids, dtype=ops.int)
def __call__(self, pre_size, post_size): num_pre, num_post = utils.size2len(pre_size), utils.size2len(post_size) self.num_pre, self.num_post = num_pre, num_post if self.method == 'matrix': prob_mat = self.rng.random(size=(num_pre, num_post)) if not self.include_self: np.fill_diagonal(prob_mat, 1.) conn_mat = np.array(prob_mat < self.prob, dtype=np.int_) pre_ids, post_ids = np.where(conn_mat) self.conn_mat = ops.as_tensor(conn_mat) else: pre_ids, post_ids = [], [] for i in range(num_pre): pres, posts = _prob_conn(i, num_post, self.prob, self.include_self) pre_ids.extend(pres) post_ids.extend(posts) self.pre_ids = ops.as_tensor(np.ascontiguousarray(pre_ids)) self.post_ids = ops.as_tensor(np.ascontiguousarray(post_ids)) return self
def __call__(self, pre_size, post_size=None): num_node = utils.size2len(pre_size) assert num_node == utils.size2len(post_size) self.num_pre = self.num_post = num_node if self.m < 1 or self.m >= num_node: raise ValueError(f"Barabási–Albert network must have m >= 1 and " f"m < n, while m = {self.m} and n = {num_node}") # Add m initial nodes (m0 in barabasi-speak) conn = np.zeros((num_node, num_node), dtype=bool) # Target nodes for new edges targets = list(range(self.m)) # List of existing nodes, with nodes repeated once for each adjacent edge repeated_nodes = [] # Start adding the other n-m nodes. The first node is m. source = self.m while source < num_node: # Add edges to m nodes from the source. origins = [source] * self.m conn[origins, targets] = True if not self.directed: conn[targets, origins] = True # Add one node to the list for each new edge just created. repeated_nodes.extend(targets) # And the new node "source" has m edges to add to the list. repeated_nodes.extend([source] * self.m) # Now choose m unique nodes from the existing nodes # Pick uniformly from repeated_nodes (preferential attachment) targets = _random_subset(repeated_nodes, self.m, self.rng) source += 1 self.conn_mat = ops.as_tensor(conn) pre_ids, post_ids = np.where(conn) pre_ids = np.ascontiguousarray(pre_ids) post_ids = np.ascontiguousarray(post_ids) self.pre_ids = ops.as_tensor(pre_ids) self.post_ids = ops.as_tensor(post_ids) return self
def __call__(self, pre_size, post_size): self.num_pre = num_pre = utils.size2len(pre_size) self.num_post = num_post = utils.size2len(post_size) assert len(pre_size) == 2 assert len(post_size) == 2 pre_height, pre_width = pre_size post_height, post_width = post_size # get the connections and weights i, j, w = [], [], [] # conn_i, conn_j, weights for pre_i in range(num_pre): a = _dog(pre_i=pre_i, pre_width=pre_width, pre_height=pre_height, num_post=num_post, post_width=post_width, post_height=post_height, w_max_p=self.w_max_p, w_max_n=self.w_max_n, w_min=self.w_min, sigma_p=self.sigma_p, sigma_n=self.sigma_n, normalize=self.normalize, include_self=self.include_self) i.extend(a[0]) j.extend(a[1]) w.extend(a[2]) # format connections and weights i = np.asarray(i, dtype=np.int_) j = np.asarray(j, dtype=np.int_) w = np.asarray(w, dtype=np.float_) self.pre_ids = ops.as_tensor(i) self.post_ids = ops.as_tensor(j) self.weights = ops.as_tensor(w) return self
def post2pre(i, j, num_post=None): """Get post2pre connections from `i` and `j` indexes. Parameters ---------- i : list, np.ndarray The pre-synaptic neuron indexes. j : list, np.ndarray The post-synaptic neuron indexes. num_post : int, None The number of the post-synaptic neurons. Returns ------- conn : list The conn list of post2pre. """ if len(i) != len(j): raise errors.ModelUseError( 'The length of "i" and "j" must be the same.') if num_post is None: print( 'WARNING: "num_post" is not provided, the result may not be accurate.' ) num_post = j.max() post2pre_list = [[] for _ in range(num_post)] for pre_id, post_id in zip(i, j): post2pre_list[post_id].append(pre_id) post2pre_list = [ops.as_tensor(l, dtype=ops.int) for l in post2pre_list] if _numba_backend(): post2pre_list_nb = nb.typed.List() for post_id in range(num_post): post2pre_list_nb.append(post2pre_list[post_id]) post2pre_list = post2pre_list_nb return post2pre_list
def __call__(self, pre_size, post_size): assert pre_size == post_size if isinstance(pre_size, int) or (isinstance(pre_size, (tuple, list)) and len(pre_size) == 1): num_node = pre_size[0] self.num_pre = self.num_post = num_node if self.num_neighbor > num_node: raise ValueError( "num_neighbor > num_node, choose smaller num_neighbor or larger num_node" ) # If k == n, the graph is complete not Watts-Strogatz if self.num_neighbor == num_node: conn = np.ones((num_node, num_node), dtype=bool) else: conn = np.zeros((num_node, num_node), dtype=bool) nodes = np.array(list( range(num_node))) # nodes are labeled 0 to n-1 # connect each node to k/2 neighbors for j in range(1, self.num_neighbor // 2 + 1): targets = np.concatenate( [nodes[j:], nodes[0:j]]) # first j nodes are now last in list conn[nodes, targets] = True conn[targets, nodes] = True # rewire edges from each node # loop over all nodes in order (label) and neighbors in order (distance) # no self loops or multiple edges allowed for j in range(1, self.num_neighbor // 2 + 1): # outer loop is neighbors targets = np.concatenate( [nodes[j:], nodes[0:j]]) # first j nodes are now last in list if self.directed: # inner loop in node order for u, v in zip(nodes, targets): w = _smallworld_rewire( prob=self.prob, i=u, all_j=conn[u], include_self=self.include_self) if w != -1: conn[u, v] = False conn[u, w] = True w = _smallworld_rewire( prob=self.prob, i=u, all_j=conn[:, u], include_self=self.include_self) if w != -1: conn[v, u] = False conn[w, u] = True else: # inner loop in node order for u, v in zip(nodes, targets): w = _smallworld_rewire( prob=self.prob, i=u, all_j=conn[u], include_self=self.include_self) if w != -1: conn[u, v] = False conn[v, u] = False conn[u, w] = True conn[w, u] = True else: raise NotImplementedError( 'Currently only support 1D ring connection.') self.conn_mat = ops.as_tensor(conn) pre_ids, post_ids = np.where(conn) pre_ids = np.ascontiguousarray(pre_ids) post_ids = np.ascontiguousarray(post_ids) self.pre_ids = ops.as_tensor(pre_ids) self.post_ids = ops.as_tensor(post_ids) return self