示例#1
0
    def run_network_recall(self, T_recall=10.0, T_cue=0.0, I_cue=None, reset=True,
                           empty_history=True, plasticity_on=False, steady=True):
        """
        Run network free recall
        :param T_recall: The total time of recalling
        :param T_cue: the time that the cue is run
        :param I_cue: The current to give as the cue
        :param reset: Whether the state variables values should be returned
        :param empty_history: whether the history should be cleaned
        """
        if T_recall < 0.0:
            raise ValueError('T_recall = ' + str(T_recall) + ' has to be positive')
        time_recalling = np.arange(0, T_recall, self.dt)
        time_cue = np.arange(0, T_cue, self.dt)

        if empty_history:
            self.empty_history()
        if reset:
            # Never destroy connectivity while recalling
            self.nn.reset_values(keep_connectivity=True)
            # Recall times
            self.T_recall_total = 0
            self.n_time_total = 0

        # Set initial conditions of the current to the clamping if available
        if I_cue is None:
            # Leave things as they are, maybe this should be a randomized start?
            pass
        elif isinstance(I_cue, (float, int)):
            # If an index is passed
            self.nn.s = self.nn.g_I * self.patterns_dic[I_cue].astype('float')
            self.nn.o = strict_max(self.nn.s, minicolumns=self.nn.minicolumns)
            self.nn.i = self.nn.w @ self.nn.o / self.nn.hypercolumns
            self.nn.s += self.nn.i + self.nn.beta - self.nn.g_a * self.nn.a
        else:
            # If the whole pattern is passed
            self.nn.s = self.nn.g_I * I_cue  # The pattern is the input
            self.nn.o = strict_max(self.nn.s, minicolumns=self.nn.minicolumns)
            self.nn.i = self.nn.w @ self.nn.o / self.nn.hypercolumns
            self.nn.s += self.nn.i + self.nn.beta - self.nn.g_a * self.nn.a

        # Run the cue
        self.run_network(time=time_cue, I=I_cue, train_network=plasticity_on,
                         plasticity_on=plasticity_on, steady=steady)

        # Run the recall
        self.run_network(time=time_recalling, train_network=plasticity_on,
                         plasticity_on=plasticity_on, steady=steady)

        # Calculate total time
        self.T_recall_total += T_recall + T_cue
        self.n_time_total += self.history['o'].shape[0]
        self.time = np.linspace(0, self.T_recall_total, num=self.n_time_total)
示例#2
0
    def update_continuous(self, dt=1.0, sigma=None, non_active=False):
        # Get the noise
        if sigma is None:
            noise = self.sigma_in * np.sqrt(dt) * self.prng.normal(0, 1.0, self.n_units)
        else:
            noise = sigma

        # Calculate currents
        self.i = self.w @ self.o / self.hypercolumns
        self.s += (dt / self.tau_s) * (self.i  # Current
                                       + self.beta  # Bias
                                       + self.g_I * self.I  # Input current
                                       - self.g_a * self.a  # Adaptation
                                       - self.s)  # s follow all of the s above
        self.s += noise
        # Non-linearity
        if self.strict_maximum:
            self.o = strict_max(self.s, minicolumns=self.minicolumns)
        else:
            self.o = softmax(self.s, G=self.G, minicolumns=self.minicolumns)

        if non_active:
            self.o = np.zeros_like(self.o)

        # Update the adaptation
        self.a += (dt / self.tau_a) * (self.o - self.a)
示例#3
0
    def update_continuous(self, dt=1.0, sigma=None):
        # Get the noise
        if sigma is None:
            noise = self.sigma_in * np.sqrt(dt) * self.prng.normal(
                0, 1.0, self.n_units)
        else:
            noise = sigma

        # Calculate currents
        self.i = self.w @ self.z_pre / self.normalized_constant
        if self.perfect:
            self.s = self.i + self.g_beta * self.beta - self.g_a * self.a + self.g_I * self.I + noise
        else:
            self.s += (dt / self.tau_s) * (
                self.i  # Current
                + self.g_beta * self.beta  # Bias
                + self.g_I * self.I  # Input current
                - self.g_a * self.a  # Adaptation
                - self.s)  # s follow all of the s above
            self.s += noise
        # Non-linearity
        if self.strict_maximum:
            self.o = strict_max(self.s, minicolumns=self.minicolumns)
        else:
            self.o = softmax(self.s, G=self.G, minicolumns=self.minicolumns)

        # Update the z-traces
        self.z_pre += (dt / self.tau_z_pre) * (self.o - self.z_pre)
        self.z_post += (dt / self.tau_z_post) * (self.o - self.z_post)
        self.z_co = np.outer(self.z_post, self.z_pre)

        # Update the adaptation
        self.a += (dt / self.tau_a) * (self.o - self.a)
示例#4
0
    def update_continuous(self, dt=1.0, sigma=None):

        if sigma is None:
            noise = self.sigma * np.sqrt(dt) * self.prng.normal(
                0, 1.0, self.n_units)
        else:
            noise = sigma

        if self.normalized_current:
            normalized_constant = self.hypercolumns
        else:
            normalized_constant = 1.0

        # Updated the probability and the support
        if self.z_transfer:
            self.i_nmda = self.g_w * self.w @ self.z_pre / normalized_constant
            self.i_ampa = self.g_w_ampa * self.w_ampa @ self.z_pre_ampa / normalized_constant
        else:
            self.i_nmda = self.g_w * self.w @ self.o / normalized_constant
            self.i_ampa = self.g_w_ampa * self.w_ampa @ self.o / normalized_constant

        if self.perfect:
            self.s = (
                self.i_nmda  # NMDA effects
                + self.i_ampa  # Ampa effects
                + self.g_beta * self.beta  # Bias
                + self.g_I * self.I  # Input current
                - self.g_a * self.a  # Adaptation
                + noise)  # This last term is the noise
        else:
            self.s += (dt / self.tau_m) * (
                self.i_nmda  # NMDA effects
                + self.i_ampa  # Ampa effects
                + self.g_beta * self.beta  # Bias
                + self.g_I * self.I  # Input current
                - self.g_a * self.a  # Adaptation
                + noise  # This last term is the noise
                - self.s)  # s follow all of the s above

        # Soft-max
        if self.strict_maximum:
            self.o = strict_max(self.s, minicolumns=self.minicolumns)
        else:
            self.o = softmax(self.s, G=self.G, minicolumns=self.minicolumns)

        # Update the adaptation
        self.a += (dt / self.tau_a) * (self.o - self.a)

        # Updated the z-traces
        self.z_pre += (dt / self.tau_z_pre) * (self.o - self.z_pre)
        self.z_post += (dt / self.tau_z_post) * (self.o - self.z_post)
        self.z_co = np.outer(self.z_post, self.z_pre)

        # Updated the z-traces AMPA
        self.z_pre_ampa += (dt / self.tau_z_pre_ampa) * (self.o -
                                                         self.z_pre_ampa)
        self.z_post_ampa += (dt / self.tau_z_post_ampa) * (self.o -
                                                           self.z_post_ampa)
        self.z_co_ampa = np.outer(self.z_post_ampa, self.z_pre_ampa)

        if self.always_learning:

            # Updated the probability of the NMDA connection
            self.p_pre += (dt / self.tau_p) * (self.z_pre - self.p_pre)
            self.p_post += (dt / self.tau_p) * (self.z_post - self.p_post)
            self.p_co += (dt / self.tau_p) * (self.z_co - self.p_co)

            # Updated the probability of the AMPA connection
            self.p_pre_ampa += (dt / self.tau_p) * (self.z_pre_ampa -
                                                    self.p_pre_ampa)
            self.p_post_ampa += (dt / self.tau_p) * (self.z_post_ampa -
                                                     self.p_post_ampa)
            self.p_co_ampa += (dt / self.tau_p) * (self.z_co_ampa -
                                                   self.p_co_ampa)

            # Update the connectivity
            self.beta = get_beta(self.p_post, self.epsilon)
            self.w_ampa = get_w_pre_post(self.p_co_ampa,
                                         self.p_pre_ampa,
                                         self.p_post_ampa,
                                         self.p,
                                         self.epsilon,
                                         diagonal_zero=self.diagonal_zero)
            self.w = get_w_pre_post(self.p_co,
                                    self.p_pre,
                                    self.p_post,
                                    self.p,
                                    self.epsilon,
                                    diagonal_zero=self.diagonal_zero)

            # Otherwise only learnig when k is above epsilon

        else:
            # This determines whether the effects of training kick-in immediatley or have some dynamics of their own
            if self.k_perfect:
                # Updated the probability of the NMDA connection
                self.p_pre += (dt / self.tau_p) * (self.z_pre - self.p_pre)
                self.p_post += (dt / self.tau_p) * (self.z_post - self.p_post)
                self.p_co += (dt / self.tau_p) * (self.z_co - self.p_co)

                # Updated the probability of the AMPA connection
                self.p_pre_ampa += (dt / self.tau_p) * (self.z_pre_ampa -
                                                        self.p_pre_ampa)
                self.p_post_ampa += (dt / self.tau_p) * (self.z_post_ampa -
                                                         self.p_post_ampa)
                self.p_co_ampa += (dt / self.tau_p) * (self.z_co_ampa -
                                                       self.p_co_ampa)

            else:
                self.k_d += (dt / self.tau_k) * (self.k - self.k_d)

                # Updated the probability of the NMDA connection
                self.p_pre += (dt / self.tau_p) * (self.z_pre -
                                                   self.p_pre) * self.k_d
                self.p_post += (dt / self.tau_p) * (self.z_post -
                                                    self.p_post) * self.k_d
                self.p_co += (dt / self.tau_p) * (self.z_co -
                                                  self.p_co) * self.k_d

                # Updated the probability of AMPA connection
                self.p_pre_ampa += (dt / self.tau_p) * (
                    self.z_pre_ampa - self.p_pre_ampa) * self.k_d
                self.p_post_ampa += (dt / self.tau_p) * (
                    self.z_post_ampa - self.p_post_ampa) * self.k_d
                self.p_co_ampa += (dt / self.tau_p) * (
                    self.z_co_ampa - self.p_co_ampa) * self.k_d

            if self.k > self.epsilon:
                self.beta = get_beta(self.p_post, self.epsilon)
                self.w_ampa = get_w_pre_post(self.p_co_ampa,
                                             self.p_pre_ampa,
                                             self.p_post_ampa,
                                             self.p,
                                             self.epsilon,
                                             diagonal_zero=self.diagonal_zero)
                self.w = get_w_pre_post(self.p_co,
                                        self.p_pre,
                                        self.p_post,
                                        self.p,
                                        self.epsilon,
                                        diagonal_zero=self.diagonal_zero)
示例#5
0
    def run_network_recall(self,
                           T_recall=10.0,
                           T_cue=0.0,
                           I_cue=None,
                           reset=True,
                           empty_history=True,
                           plasticity_on=False,
                           stable_start=True,
                           NMDA=False):
        """
        Run network free recall
        :param T_recall: The total time of recalling
        :param T_cue: the time that the cue is run
        :param I_cue: The current to give as the cue
        :param reset: Whether the state variables values should be returned
        :param empty_history: whether the history should be cleaned
        """
        if T_recall < 0.0:
            raise ValueError('T_recall = ' + str(T_recall) +
                             ' has to be positive')
        time_recalling = np.arange(0, T_recall, self.dt)
        time_cue = np.arange(0, T_cue, self.dt)

        if plasticity_on:
            train_network = True
        else:
            train_network = False

        if empty_history:
            self.empty_history()
        if reset:
            # Never destroy connectivity while recalling
            self.nn.reset_values(keep_connectivity=True)
            # Recall times
            self.T_recall_total = 0
            self.n_time_total = 0

        if I_cue is None:
            pass
        elif isinstance(I_cue, int):
            self.nn.z_pre[I_cue] = 1.0
            if NMDA:
                self.nn.z_pre_ampa[I_cue] = 1.0
        else:
            self.nn.z_pre[np.where(I_cue)[0]] = 1.0
            if NMDA:
                self.nn.z_pre_ampa[np.where(I_cue)][0] = 1.0

        # Set initial conditions of the current to the clamping if available
        if stable_start:
            if I_cue is None:
                pass
            elif isinstance(I_cue, (float, int)):
                self.nn.s = self.nn.g_I * self.patterns_dic[I_cue].astype(
                    'float')
                self.nn.o = strict_max(self.nn.s,
                                       minicolumns=self.nn.minicolumns)
                self.nn.i = self.nn.w @ self.nn.o / self.nn.normalized_constant
                self.nn.s += self.nn.i + self.nn.beta - self.nn.g_a * self.nn.a
            else:
                self.nn.s = self.nn.g_I * I_cue  # The pattern is the input
                self.nn.o = strict_max(self.nn.s,
                                       minicolumns=self.nn.minicolumns)
                self.nn.i = self.nn.w @ self.nn.o / self.nn.normalized_constant
                self.nn.s += self.nn.i + self.nn.beta - self.nn.g_a * self.nn.a

        # Run the cue
        if T_cue > 0.001:
            self.run_network(time=time_cue,
                             I=I_cue,
                             train_network=train_network,
                             plasticity_on=plasticity_on)

        # Run the recall
        self.run_network(time=time_recalling,
                         train_network=train_network,
                         plasticity_on=plasticity_on)

        # Calculate total time
        self.T_recall_total += T_recall + T_cue
        self.n_time_total += self.history['o'].shape[0]
        self.time = np.linspace(0, self.T_recall_total, num=self.n_time_total)