예제 #1
0
    def __call__(self, pre_size, post_size):
        self.num_pre = num_pre = utils.size2len(pre_size)
        self.num_post = num_post = utils.size2len(post_size)
        assert len(pre_size) == 2
        assert len(post_size) == 2
        pre_height, pre_width = pre_size
        post_height, post_width = post_size

        # get the connections
        i, j, p = [], [], []  # conn_i, conn_j, probabilities
        for pre_i in range(num_pre):
            a = _gaussian_prob(pre_i=pre_i,
                               pre_width=pre_width,
                               pre_height=pre_height,
                               num_post=num_post,
                               post_width=post_width,
                               post_height=post_height,
                               p_min=self.p_min,
                               sigma=self.sigma,
                               normalize=self.normalize,
                               include_self=self.include_self)
            i.extend(a[0])
            j.extend(a[1])
            p.extend(a[2])
        p = np.asarray(p, dtype=np.float_)
        selected_idxs = np.where(np.random.random(len(p)) < p)[0]
        i = np.asarray(i, dtype=np.int_)[selected_idxs]
        j = np.asarray(j, dtype=np.int_)[selected_idxs]
        self.pre_ids = ops.as_tensor(i)
        self.post_ids = ops.as_tensor(j)
        return self
예제 #2
0
    def __call__(self, pre_size, post_size):
        num_pre = utils.size2len(pre_size)
        num_post = utils.size2len(post_size)
        self.num_pre = num_pre
        self.num_post = num_post
        assert len(pre_size) == 2
        assert len(post_size) == 2
        pre_height, pre_width = pre_size
        post_height, post_width = post_size

        # get the connections and weights
        i, j, w = [], [], []
        for pre_i in range(num_pre):
            a = _gaussian_weight(pre_i=pre_i,
                                 pre_width=pre_width,
                                 pre_height=pre_height,
                                 num_post=num_post,
                                 post_width=post_width,
                                 post_height=post_height,
                                 w_max=self.w_max,
                                 w_min=self.w_min,
                                 sigma=self.sigma,
                                 normalize=self.normalize,
                                 include_self=self.include_self)
            i.extend(a[0])
            j.extend(a[1])
            w.extend(a[2])

        pre_ids = np.asarray(i, dtype=np.int_)
        post_ids = np.asarray(j, dtype=np.int_)
        w = np.asarray(w, dtype=np.float_)
        self.pre_ids = ops.as_tensor(pre_ids)
        self.post_ids = ops.as_tensor(post_ids)
        self.weights = ops.as_tensor(w)
        return self
예제 #3
0
    def __call__(self, pre_size, post_size=None):
        self.num_pre = utils.size2len(pre_size)
        if post_size is not None:
            try:
                assert pre_size == post_size
            except AssertionError:
                raise errors.ModelUseError(
                    f'The shape of pre-synaptic group should be the same with the post group. '
                    f'But we got {pre_size} != {post_size}.')
            self.num_post = utils.size2len(post_size)
        else:
            self.num_post = self.num_pre

        if len(pre_size) == 1:
            height, width = pre_size[0], 1
        elif len(pre_size) == 2:
            height, width = pre_size
        else:
            raise errors.ModelUseError(
                'Currently only support two-dimensional geometry.')

        conn_i = []
        conn_j = []
        for row in range(height):
            res = _grid_n(height=height,
                          width=width,
                          row=row,
                          n=self.N,
                          include_self=self.include_self)
            conn_i.extend(res[0])
            conn_j.extend(res[1])
        self.pre_ids = ops.as_tensor(conn_i)
        self.post_ids = ops.as_tensor(conn_j)
        return self
예제 #4
0
    def __call__(self, pre_size, post_size=None):
        num_node = utils.size2len(pre_size)
        assert num_node == utils.size2len(post_size)
        self.num_pre = self.num_post = num_node

        if self.m1 < 1 or self.m1 >= num_node:
            raise ValueError(
                f"Dual Barabási–Albert network must have m1 >= 1 and m1 < num_node, "
                f"while m1 = {self.m1} and num_node = {num_node}.")
        if self.m2 < 1 or self.m2 >= num_node:
            raise ValueError(
                f"Dual Barabási–Albert network must have m2 >= 1 and m2 < num_node, "
                f"while m2 = {self.m2} and num_node = {num_node}.")
        if self.p < 0 or self.p > 1:
            raise ValueError(
                f"Dual Barabási–Albert network must have 0 <= p <= 1, while p = {self.p}"
            )

        # Add max(m1,m2) initial nodes (m0 in barabasi-speak)
        conn = np.zeros((num_node, num_node), dtype=bool)
        # Target nodes for new edges
        targets = list(range(max(self.m1, self.m2)))
        # List of existing nodes, with nodes repeated once for each adjacent edge
        repeated_nodes = []
        # Start adding the remaining nodes.
        source = max(self.m1, self.m2)
        # Pick which m to use first time (m1 or m2)
        if self.rng.random() < self.p:
            m = self.m1
        else:
            m = self.m2
        while source < num_node:
            # Add edges to m nodes from the source.
            origins = [source] * m
            conn[origins, targets] = True
            if not self.directed:
                conn[targets, origins] = True
            # Add one node to the list for each new edge just created.
            repeated_nodes.extend(targets)
            # And the new node "source" has m edges to add to the list.
            repeated_nodes.extend([source] * m)
            # Pick which m to use next time (m1 or m2)
            if self.rng.random() < self.p:
                m = self.m1
            else:
                m = self.m2
            # Now choose m unique nodes from the existing nodes
            # Pick uniformly from repeated_nodes (preferential attachment)
            targets = _random_subset(repeated_nodes, m, self.rng)
            source += 1

        self.conn_mat = ops.as_tensor(conn)
        pre_ids, post_ids = np.where(conn)
        pre_ids = np.ascontiguousarray(pre_ids)
        post_ids = np.ascontiguousarray(post_ids)
        self.pre_ids = ops.as_tensor(pre_ids)
        self.post_ids = ops.as_tensor(post_ids)

        return self
예제 #5
0
    def __call__(self, pre_size, post_size=None):
        num_node = utils.size2len(pre_size)
        assert num_node == utils.size2len(post_size)
        self.num_pre = self.num_post = num_node

        if self.m < 1 or num_node < self.m:
            raise ValueError(
                f"Must have m>1 and m<n, while m={self.m} and n={num_node}")
        # add m initial nodes (m0 in barabasi-speak)
        conn = np.zeros((num_node, num_node), dtype=bool)
        repeated_nodes = list(range(
            self.m))  # list of existing nodes to sample from
        # with nodes repeated once for each adjacent edge
        source = self.m  # next node is m
        while source < num_node:  # Now add the other n-1 nodes
            possible_targets = _random_subset(repeated_nodes, self.m, self.rng)
            # do one preferential attachment for new node
            target = possible_targets.pop()
            conn[source, target] = True
            if not self.directed:
                conn[target, source] = True
            repeated_nodes.append(
                target)  # add one node to list for each new link
            count = 1
            while count < self.m:  # add m-1 more new links
                if self.rng.random() < self.p:  # clustering step: add triangle
                    neighbors = np.where(conn[target])[0]
                    neighborhood = [
                        nbr for nbr in neighbors
                        if not conn[source, nbr] and not nbr == source
                    ]
                    if neighborhood:  # if there is a neighbor without a link
                        nbr = self.rng.choice(neighborhood)
                        conn[source, nbr] = True  # add triangle
                        if not self.directed:
                            conn[nbr, source] = True
                        repeated_nodes.append(nbr)
                        count = count + 1
                        continue  # go to top of while loop
                # else do preferential attachment step if above fails
                target = possible_targets.pop()
                conn[source, target] = True
                if not self.directed:
                    conn[target, source] = True
                repeated_nodes.append(target)
                count = count + 1

            repeated_nodes.extend([source] *
                                  self.m)  # add source node to list m times
            source += 1

        self.conn_mat = ops.as_tensor(conn)
        pre_ids, post_ids = np.where(conn)
        pre_ids = np.ascontiguousarray(pre_ids)
        post_ids = np.ascontiguousarray(post_ids)
        self.pre_ids = ops.as_tensor(pre_ids)
        self.post_ids = ops.as_tensor(post_ids)

        return self
예제 #6
0
    def __call__(self, pre_size, post_size):
        pre_len = utils.size2len(pre_size)
        post_len = utils.size2len(post_size)
        self.num_pre = pre_len
        self.num_post = post_len

        mat = np.ones((pre_len, post_len))
        if not self.include_self:
            np.fill_diagonal(mat, 0)
        pre_ids, post_ids = np.where(mat > 0)
        self.pre_ids = ops.as_tensor(np.ascontiguousarray(pre_ids))
        self.post_ids = ops.as_tensor(np.ascontiguousarray(post_ids))
        self.conn_mat = ops.as_tensor(mat)
        return self
예제 #7
0
 def __call__(self, shape, dtype=None):
     shape = [size2len(d) for d in shape]
     fan_in, fan_out = _compute_fans(shape,
                                     in_axis=self.in_axis,
                                     out_axis=self.out_axis)
     if self.mode == "fan_in":
         denominator = fan_in
     elif self.mode == "fan_out":
         denominator = fan_out
     elif self.mode == "fan_avg":
         denominator = (fan_in + fan_out) / 2
     else:
         raise ValueError(
             "invalid mode for variance scaling initializer: {}".format(
                 self.mode))
     variance = math.array(self.scale / denominator, dtype=dtype)
     if self.distribution == "truncated_normal":
         # constant is stddev of standard normal truncated to (-2, 2)
         stddev = math.sqrt(variance) / math.array(.87962566103423978,
                                                   dtype)
         res = self.rng.truncated_normal(-2, 2, shape) * stddev
         return math.asarray(res, dtype=dtype)
     elif self.distribution == "normal":
         res = self.rng.normal(size=shape) * math.sqrt(variance)
         return math.asarray(res, dtype=dtype)
     elif self.distribution == "uniform":
         res = self.rng.uniform(low=-1, high=1, size=shape) * math.sqrt(
             3 * variance)
         return math.asarray(res, dtype=dtype)
     else:
         raise ValueError(
             "invalid distribution for variance scaling initializer")
예제 #8
0
    def __call__(self, pre_size, post_size):
        num_pre, num_post = utils.size2len(pre_size), utils.size2len(post_size)
        self.num_pre, self.num_post = num_pre, num_post
        num = self.num if isinstance(self.num, int) else int(self.num *
                                                             num_pre)
        assert num <= num_pre, f'"num" must be less than "num_pre", but got {num} > {num_pre}'

        prob_mat = self.rng.random(size=(num_pre, num_post))
        if not self.include_self:
            np.fill_diagonal(prob_mat, 1.)
        arg_sort = np.argsort(prob_mat, axis=0)[:num]
        pre_ids = np.asarray(np.concatenate(arg_sort), dtype=np.int_)
        post_ids = np.asarray(np.repeat(np.arange(num_post), num_pre),
                              dtype=np.int_)
        self.pre_ids = ops.as_tensor(pre_ids)
        self.post_ids = ops.as_tensor(post_ids)
        return self
예제 #9
0
 def __call__(self, shape, dtype=None):
   if isinstance(shape, int):
     shape = (shape, )
   elif isinstance(shape, (tuple, list)):
     if len(shape) > 2:
       raise ValueError(f'Only support initialize 2D weights for {self.__class__.__name__}.')
   else:
     raise ValueError(f'Only support shape of int, or tuple/list of int '
                      f'in {self.__class__.__name__}, but we got {shape}.')
   shape = [size2len(d) for d in shape]
   return math.eye(*shape, dtype=dtype) * self.value
예제 #10
0
    def __call__(self, pre_size, post_size):
        num_pre, num_post = utils.size2len(pre_size), utils.size2len(post_size)
        self.num_pre, self.num_post = num_pre, num_post

        if self.method == 'matrix':
            prob_mat = self.rng.random(size=(num_pre, num_post))
            if not self.include_self:
                np.fill_diagonal(prob_mat, 1.)
            conn_mat = np.array(prob_mat < self.prob, dtype=np.int_)
            pre_ids, post_ids = np.where(conn_mat)
            self.conn_mat = ops.as_tensor(conn_mat)
        else:
            pre_ids, post_ids = [], []
            for i in range(num_pre):
                pres, posts = _prob_conn(i, num_post, self.prob,
                                         self.include_self)
                pre_ids.extend(pres)
                post_ids.extend(posts)
        self.pre_ids = ops.as_tensor(np.ascontiguousarray(pre_ids))
        self.post_ids = ops.as_tensor(np.ascontiguousarray(post_ids))
        return self
예제 #11
0
    def __call__(self, pre_size, post_size=None):
        num_node = utils.size2len(pre_size)
        assert num_node == utils.size2len(post_size)
        self.num_pre = self.num_post = num_node

        if self.m < 1 or self.m >= num_node:
            raise ValueError(f"Barabási–Albert network must have m >= 1 and "
                             f"m < n, while m = {self.m} and n = {num_node}")

        # Add m initial nodes (m0 in barabasi-speak)
        conn = np.zeros((num_node, num_node), dtype=bool)
        # Target nodes for new edges
        targets = list(range(self.m))
        # List of existing nodes, with nodes repeated once for each adjacent edge
        repeated_nodes = []
        # Start adding the other n-m nodes. The first node is m.
        source = self.m
        while source < num_node:
            # Add edges to m nodes from the source.
            origins = [source] * self.m
            conn[origins, targets] = True
            if not self.directed:
                conn[targets, origins] = True
            # Add one node to the list for each new edge just created.
            repeated_nodes.extend(targets)
            # And the new node "source" has m edges to add to the list.
            repeated_nodes.extend([source] * self.m)
            # Now choose m unique nodes from the existing nodes
            # Pick uniformly from repeated_nodes (preferential attachment)
            targets = _random_subset(repeated_nodes, self.m, self.rng)
            source += 1

        self.conn_mat = ops.as_tensor(conn)
        pre_ids, post_ids = np.where(conn)
        pre_ids = np.ascontiguousarray(pre_ids)
        post_ids = np.ascontiguousarray(post_ids)
        self.pre_ids = ops.as_tensor(pre_ids)
        self.post_ids = ops.as_tensor(post_ids)

        return self
예제 #12
0
    def __init__(self, size, delay, dtype=None, dt=None, **kwargs):
        # dt
        self.dt = bm.get_dt() if dt is None else dt

        # data size
        if isinstance(size, int): size = (size, )
        if not isinstance(size, (tuple, list)):
            raise ModelBuildError(
                f'"size" must a tuple/list of int, but we got {type(size)}: {size}'
            )
        self.size = tuple(size)

        # delay time length
        self.delay = delay

        # data and operations
        if isinstance(delay, (int, float)):  # uniform delay
            self.uniform_delay = True
            self.num_step = int(pm.ceil(delay / self.dt)) + 1
            self.out_idx = bm.Variable(bm.array([0], dtype=bm.uint32))
            self.in_idx = bm.Variable(
                bm.array([self.num_step - 1], dtype=bm.uint32))
            self.data = bm.Variable(
                bm.zeros((self.num_step, ) + self.size, dtype=dtype))

        else:  # non-uniform delay
            self.uniform_delay = False
            if not len(self.size) == 1:
                raise NotImplementedError(
                    f'Currently, BrainPy only supports 1D heterogeneous '
                    f'delays, while we got the heterogeneous delay with '
                    f'{len(self.size)}-dimensions.')
            self.num = size2len(size)
            if bm.ndim(delay) != 1:
                raise ModelBuildError(f'Only support a 1D non-uniform delay. '
                                      f'But we got {delay.ndim}D: {delay}')
            if delay.shape[0] != self.size[0]:
                raise ModelBuildError(
                    f"The first shape of the delay time size must "
                    f"be the same with the delay data size. But "
                    f"we got {delay.shape[0]} != {self.size[0]}")
            delay = bm.around(delay / self.dt)
            self.diag = bm.array(bm.arange(self.num), dtype=bm.int_)
            self.num_step = bm.array(delay, dtype=bm.uint32) + 1
            self.in_idx = bm.Variable(self.num_step - 1)
            self.out_idx = bm.Variable(bm.zeros(self.num, dtype=bm.uint32))
            self.data = bm.Variable(
                bm.zeros((self.num_step.max(), ) + size, dtype=dtype))

        super(ConstantDelay, self).__init__(**kwargs)
예제 #13
0
    def __call__(self, pre_size, post_size):
        self.num_pre = num_pre = utils.size2len(pre_size)
        self.num_post = num_post = utils.size2len(post_size)
        assert len(pre_size) == 2
        assert len(post_size) == 2
        pre_height, pre_width = pre_size
        post_height, post_width = post_size

        # get the connections and weights
        i, j, w = [], [], []  # conn_i, conn_j, weights
        for pre_i in range(num_pre):
            a = _dog(pre_i=pre_i,
                     pre_width=pre_width,
                     pre_height=pre_height,
                     num_post=num_post,
                     post_width=post_width,
                     post_height=post_height,
                     w_max_p=self.w_max_p,
                     w_max_n=self.w_max_n,
                     w_min=self.w_min,
                     sigma_p=self.sigma_p,
                     sigma_n=self.sigma_n,
                     normalize=self.normalize,
                     include_self=self.include_self)
            i.extend(a[0])
            j.extend(a[1])
            w.extend(a[2])

        # format connections and weights
        i = np.asarray(i, dtype=np.int_)
        j = np.asarray(j, dtype=np.int_)
        w = np.asarray(w, dtype=np.float_)
        self.pre_ids = ops.as_tensor(i)
        self.post_ids = ops.as_tensor(j)
        self.weights = ops.as_tensor(w)
        return self
예제 #14
0
    def __call__(self, pre_size, post_size):
        try:
            assert pre_size == post_size
        except AssertionError:
            raise errors.ModelUseError(
                f'One2One connection must be defined in two groups with the same size, '
                f'but we got {pre_size} != {post_size}.')

        length = utils.size2len(pre_size)
        self.num_pre = length
        self.num_post = length

        self.pre_ids = ops.arange(length)
        self.post_ids = ops.arange(length)
        return self
예제 #15
0
 def __call__(self, shape, dtype=None):
     shape = [size2len(d) for d in shape]
     n_rows = shape[self.axis]
     n_cols = np.prod(shape) // n_rows
     matrix_shape = (n_rows, n_cols) if n_rows > n_cols else (n_cols,
                                                              n_rows)
     norm_dst = self.rng.normal(size=matrix_shape)
     q_mat, r_mat = np.linalg.qr(norm_dst)
     # Enforce Q is uniformly distributed
     q_mat *= np.sign(np.diag(r_mat))
     if n_rows < n_cols:
         q_mat = q_mat.T
     q_mat = np.reshape(q_mat,
                        (n_rows, ) + tuple(np.delete(shape, self.axis)))
     q_mat = np.moveaxis(q_mat, 0, self.axis)
     return self.scale * math.asarray(q_mat, dtype=dtype)
예제 #16
0
    def __init__(self, size, delay_time):
        if isinstance(size, int):
            size = (size, )
        self.size = tuple(size)
        self.delay_time = delay_time

        if isinstance(delay_time, (int, float)):
            self.uniform_delay = True
            self.delay_num_step = int(math.ceil(
                delay_time / backend.get_dt())) + 1
            self.delay_data = ops.zeros((self.delay_num_step, ) + self.size)
        else:
            if not len(self.size) == 1:
                raise NotImplementedError(
                    f'Currently, BrainPy only supports 1D heterogeneous delays, while does '
                    f'not implement the heterogeneous delay with {len(self.size)}-dimensions.'
                )
            self.num = size2len(size)
            if isinstance(delay_time, type(ops.as_tensor([1]))):
                assert ops.shape(delay_time) == self.size
            elif callable(delay_time):
                delay_time2 = ops.zeros(size)
                for i in range(size[0]):
                    delay_time2[i] = delay_time()
                delay_time = delay_time2
            else:
                raise NotImplementedError(
                    f'Currently, BrainPy does not support delay type '
                    f'of {type(delay_time)}: {delay_time}')
            self.uniform_delay = False
            delay = delay_time / backend.get_dt()
            dint = ops.as_tensor(delay_time / backend.get_dt(), dtype=int)
            ddiff = (delay - dint) >= 0.5
            self.delay_num_step = ops.as_tensor(delay + ddiff, dtype=int) + 1
            self.delay_data = ops.zeros((max(self.delay_num_step), ) + size)
            self.diag = ops.arange(self.num)

        self.delay_in_idx = self.delay_num_step - 1
        if self.uniform_delay:
            self.delay_out_idx = 0
        else:
            self.delay_out_idx = ops.zeros(self.num, dtype=int)
        self.name = None
예제 #17
0
    def __init__(self,
                 size,
                 monitors=None,
                 name=None,
                 show_code=False,
                 steps=None):
        # name
        # -----
        if name is None:
            name = ''
        else:
            name = '_' + name
        global _NeuGroup_NO
        _NeuGroup_NO += 1
        name = f'NG{_NeuGroup_NO}{name}'

        # size
        # ----
        if isinstance(size, (list, tuple)):
            if len(size) <= 0:
                raise errors.ModelDefError(
                    'size must be int, or a tuple/list of int.')
            if not isinstance(size[0], int):
                raise errors.ModelDefError(
                    'size must be int, or a tuple/list of int.')
            size = tuple(size)
        elif isinstance(size, int):
            size = (size, )
        else:
            raise errors.ModelDefError(
                'size must be int, or a tuple/list of int.')
        self.size = size
        self.num = utils.size2len(size)

        # initialize
        # ----------
        if steps is None:
            steps = {'update': self.update}
        super(NeuGroup, self).__init__(steps=steps,
                                       monitors=monitors,
                                       name=name,
                                       show_code=show_code)
예제 #18
0
 def __call__(self, shape, dtype=None):
     shape = [size2len(d) for d in shape]
     if len(shape) not in [3, 4, 5]:
         raise ValueError(
             "Delta orthogonal initializer requires a 3D, 4D or 5D shape.")
     if shape[-1] < shape[-2]:
         raise ValueError("`fan_in` must be less or equal than `fan_out`. ")
     ortho_init = Orthogonal(scale=self.scale, axis=self.axis)
     ortho_matrix = ortho_init(shape[-2:], dtype=dtype)
     W = math.zeros(shape, dtype=dtype)
     if len(shape) == 3:
         k = shape[0]
         W[(k - 1) // 2, ...] = ortho_matrix
     elif len(shape) == 4:
         k1, k2 = shape[:2]
         W[(k1 - 1) // 2, (k2 - 1) // 2, ...] = ortho_matrix
     else:
         k1, k2, k3 = shape[:3]
         W[(k1 - 1) // 2, (k2 - 1) // 2, (k3 - 1) // 2, ...] = ortho_matrix
     return W
예제 #19
0
 def __call__(self, shape, dtype=None):
   shape = [size2len(d) for d in shape]
   return math.ones(shape, dtype=dtype) * self.value
예제 #20
0
 def __call__(self, shape, dtype=None):
   shape = [size2len(d) for d in shape]
   return math.zeros(shape, dtype=dtype)
예제 #21
0
 def __call__(self, shape, dtype=None):
     shape = [size2len(d) for d in shape]
     r = self.rng.uniform(low=self.min_val, high=self.max_val, size=shape)
     return math.asarray(r, dtype=dtype)
예제 #22
0
 def __call__(self, shape, dtype=None):
     shape = [size2len(d) for d in shape]
     weights = self.rng.normal(size=shape, scale=self.scale)
     return math.asarray(weights, dtype=dtype)