def run_network_protocol_offline(self, protocol):
        # Build time input
        timed_input = TimedInput(protocol, self.dt)
        timed_input.build_timed_input()
        timed_input.build_filtered_input_pre(tau_z=self.nn.tau_z_pre)
        timed_input.build_filtered_input_post(tau_z=self.nn.tau_z_post)
        # Calculate probabilities
        self.nn.p_pre, self.nn.p_post, self.nn.P = timed_input.calculate_probabilities_from_time_signal(
        )
        # Store the connectivity values
        self.nn.beta = get_beta(self.nn.p_post, self.nn.epsilon)
        self.nn.w = get_w_pre_post(self.nn.P,
                                   self.nn.p_pre,
                                   self.nn.p_post,
                                   self.nn.epsilon,
                                   diagonal_zero=False)

        # Update the patterns
        self.update_patterns(protocol.network_representation)

        # Get timings quantities
        t_total, n_time_total, time = protocol.calculate_time_quantities(
            self.dt)
        self.T_training_total += t_total
        self.n_time_total += n_time_total
        self.time = np.linspace(0,
                                self.T_training_total,
                                num=self.n_time_total)

        return timed_input
    def update_continuous(self, dt=1.0, sigma=None):

        if sigma is None:
            sigma = self.prng.normal(0, self.sigma, self.n_units)

        # Updated the probability and the support
        self.s += (dt / self.tau_m) * (self.g_w * np.dot(self.w, self.z_pre)  # NMDA effects
                                       + self.g_w_ampa * np.dot(self.w_ampa, self.z_pre_ampa)  # Ampa effects
                                       + self.g_beta * self.beta  # Bias
                                       + self.g_I * log_epsilon(self.I)  # Input current
                                       - self.s  # s follow all of the s above
                                       - self.g_a * self.a  # Adaptation
                                       + sigma)  # This last term is the noise
        # Soft-max
        self.o = softmax(self.s, t=(1.0/self.G), minicolumns=self.minicolumns)

        # Update the adaptation
        self.a += (dt / self.tau_a) * (self.o - self.a)

        # Updated the z-traces
        self.z_pre += (dt / self.tau_z_pre) * (self.o - self.z_pre)
        self.z_post += (dt / self.tau_z_post) * (self.o - self.z_post)
        self.z_co = np.outer(self.z_post, self.z_pre)

        # Updated the z-traces AMPA
        self.z_pre_ampa += (dt / self.tau_z_pre_ampa) * (self.o - self.z_pre_ampa)
        self.z_post_ampa += (dt / self.tau_z_post_ampa) * (self.o - self.z_post_ampa)
        self.z_co_ampa = np.outer(self.z_post_ampa, self.z_pre_ampa)

        # Modulatory variables
        self.p += (dt / self.tau_p) * (1 - self.p)

        if self.k_inner:
            self.k_d += (dt / self.tau_k) * (self.k - self.k_d)

            # Updated the probability of the NMDA connection
            self.p_pre += (dt / self.tau_p) * (self.z_pre - self.p_pre) * self.k_d
            self.p_post += (dt / self.tau_p) * (self.z_post - self.p_post) * self.k_d
            self.p_co += (dt / self.tau_p) * (self.z_co - self.p_co) * self.k_d

            # Updated the probability of AMPA connection
            self.p_pre_ampa += (dt / self.tau_p) * (self.z_pre_ampa - self.p_pre_ampa) * self.k_d
            self.p_post_ampa += (dt / self.tau_p) * (self.z_post_ampa - self.p_post_ampa) * self.k_d
            self.p_co_ampa += (dt / self.tau_p) * (self.z_co_ampa - self.p_co_ampa) * self.k_d

        else:
            # Updated the probability of the NMDA connection
            self.p_pre += (dt / self.tau_p) * (self.z_pre - self.p_pre)
            self.p_post += (dt / self.tau_p) * (self.z_post - self.p_post)
            self.p_co += (dt / self.tau_p) * (self.z_co - self.p_co)

            # Updated the probability of the AMPA connection
            self.p_pre_ampa += (dt / self.tau_p) * (self.z_pre_ampa - self.p_pre_ampa)
            self.p_post_ampa += (dt / self.tau_p) * (self.z_post_ampa - self.p_post_ampa)
            self.p_co_ampa += (dt / self.tau_p) * (self.z_co_ampa - self.p_co_ampa)

        if self.k > self.epsilon:
            self.beta = get_beta(self.p_post, self.epsilon)
            self.w_ampa = get_w_pre_post(self.p_co_ampa, self.p_pre_ampa, self.p_post_ampa, self.p, self.epsilon)
            self.w = get_w_pre_post(self.p_co, self.p_pre, self.p_post, self.p, self.epsilon)
    def update_continuous(self, dt=1.0, sigma=None):

        if sigma is None:
            sigma = self.prng.normal(0, self.sigma, self.n_units)

        # Updated the probability and the support
        self.s += (dt / self.tau_m) * (self.g_beta * self.beta + self.g_w * np.dot(self.w, self.o)
                                       + self.g_I * log_epsilon(self.I) - self.s - self.g_a * self.a
                                       + sigma)  # This last term is the noise
        # Softmax
        self.o = softmax(self.s, t=(1/self.G), minicolumns=self.minicolumns)

        # Update the adaptation
        self.a += (dt / self.tau_a) * (self.o - self.a)

        # Updated the z-traces
        self.z_pre += (dt / self.tau_z_pre) * (self.o - self.z_pre)
        self.z_post += (dt / self.tau_z_post) * (self.o - self.z_post)
        self.z_co = np.outer(self.z_post, self.z_pre)

        if self.k > self.epsilon:
            # Updated the probability
            self.p_pre += (dt / self.tau_p) * (self.z_pre - self.p_pre) * self.k
            self.p_post += (dt / self.tau_p) * (self.z_post - self.p_post) * self.k
            self.p_co += (dt / self.tau_p) * (self.z_co - self.p_co) * self.k

            self.w = get_w_pre_post(self.p_co, self.p_pre, self.p_post)
            self.beta = get_beta(self.p_post)
 def update_weights(self):
     # Update the connectivity
     self.beta = get_beta(self.p_post, self.epsilon)
     self.w = get_w_pre_post(self.P,
                             self.p_pre,
                             self.p_post,
                             self.epsilon,
                             diagonal_zero=False)
    def run_artificial_protocol(self,
                                ws=1.0,
                                wn=0.25,
                                wb=-3.0,
                                alpha=0.5,
                                alpha_back=None,
                                cycle=False):
        """
        This creates an artificial matrix
        :return: w, the weight matrix that was created
        """
        minicolumns = self.nn.minicolumns
        extension = self.nn.minicolumns
        sequence = self.canonical_activity_representation
        if cycle:
            sequence = np.append(sequence,
                                 sequence[0]).reshape(self.nn.minicolumns + 1,
                                                      self.nn.hypercolumns)

        w = create_weight_matrix(minicolumns,
                                 sequence,
                                 ws,
                                 wn,
                                 wb,
                                 alpha,
                                 alpha_back,
                                 extension,
                                 w=None)
        self.nn.w = w

        p = np.ones(self.nn.n_units) * (1.0 / len(sequence))
        self.nn.beta = get_beta(p, self.nn.epsilon)

        # Updated the patterns in the network
        nr = self.canonical_network_representation
        self.update_patterns(nr)

        return w
Beispiel #6
0
    def update_continuous(self, dt=1.0, sigma=None):

        if sigma is None:
            noise = self.sigma * np.sqrt(dt) * self.prng.normal(
                0, 1.0, self.n_units)
        else:
            noise = sigma

        if self.normalized_current:
            normalized_constant = self.hypercolumns
        else:
            normalized_constant = 1.0

        # Updated the probability and the support
        if self.z_transfer:
            self.i_nmda = self.g_w * self.w @ self.z_pre / normalized_constant
            self.i_ampa = self.g_w_ampa * self.w_ampa @ self.z_pre_ampa / normalized_constant
        else:
            self.i_nmda = self.g_w * self.w @ self.o / normalized_constant
            self.i_ampa = self.g_w_ampa * self.w_ampa @ self.o / normalized_constant

        if self.perfect:
            self.s = (
                self.i_nmda  # NMDA effects
                + self.i_ampa  # Ampa effects
                + self.g_beta * self.beta  # Bias
                + self.g_I * self.I  # Input current
                - self.g_a * self.a  # Adaptation
                + noise)  # This last term is the noise
        else:
            self.s += (dt / self.tau_m) * (
                self.i_nmda  # NMDA effects
                + self.i_ampa  # Ampa effects
                + self.g_beta * self.beta  # Bias
                + self.g_I * self.I  # Input current
                - self.g_a * self.a  # Adaptation
                + noise  # This last term is the noise
                - self.s)  # s follow all of the s above

        # Soft-max
        if self.strict_maximum:
            self.o = strict_max(self.s, minicolumns=self.minicolumns)
        else:
            self.o = softmax(self.s, G=self.G, minicolumns=self.minicolumns)

        # Update the adaptation
        self.a += (dt / self.tau_a) * (self.o - self.a)

        # Updated the z-traces
        self.z_pre += (dt / self.tau_z_pre) * (self.o - self.z_pre)
        self.z_post += (dt / self.tau_z_post) * (self.o - self.z_post)
        self.z_co = np.outer(self.z_post, self.z_pre)

        # Updated the z-traces AMPA
        self.z_pre_ampa += (dt / self.tau_z_pre_ampa) * (self.o -
                                                         self.z_pre_ampa)
        self.z_post_ampa += (dt / self.tau_z_post_ampa) * (self.o -
                                                           self.z_post_ampa)
        self.z_co_ampa = np.outer(self.z_post_ampa, self.z_pre_ampa)

        if self.always_learning:

            # Updated the probability of the NMDA connection
            self.p_pre += (dt / self.tau_p) * (self.z_pre - self.p_pre)
            self.p_post += (dt / self.tau_p) * (self.z_post - self.p_post)
            self.p_co += (dt / self.tau_p) * (self.z_co - self.p_co)

            # Updated the probability of the AMPA connection
            self.p_pre_ampa += (dt / self.tau_p) * (self.z_pre_ampa -
                                                    self.p_pre_ampa)
            self.p_post_ampa += (dt / self.tau_p) * (self.z_post_ampa -
                                                     self.p_post_ampa)
            self.p_co_ampa += (dt / self.tau_p) * (self.z_co_ampa -
                                                   self.p_co_ampa)

            # Update the connectivity
            self.beta = get_beta(self.p_post, self.epsilon)
            self.w_ampa = get_w_pre_post(self.p_co_ampa,
                                         self.p_pre_ampa,
                                         self.p_post_ampa,
                                         self.p,
                                         self.epsilon,
                                         diagonal_zero=self.diagonal_zero)
            self.w = get_w_pre_post(self.p_co,
                                    self.p_pre,
                                    self.p_post,
                                    self.p,
                                    self.epsilon,
                                    diagonal_zero=self.diagonal_zero)

            # Otherwise only learnig when k is above epsilon

        else:
            # This determines whether the effects of training kick-in immediatley or have some dynamics of their own
            if self.k_perfect:
                # Updated the probability of the NMDA connection
                self.p_pre += (dt / self.tau_p) * (self.z_pre - self.p_pre)
                self.p_post += (dt / self.tau_p) * (self.z_post - self.p_post)
                self.p_co += (dt / self.tau_p) * (self.z_co - self.p_co)

                # Updated the probability of the AMPA connection
                self.p_pre_ampa += (dt / self.tau_p) * (self.z_pre_ampa -
                                                        self.p_pre_ampa)
                self.p_post_ampa += (dt / self.tau_p) * (self.z_post_ampa -
                                                         self.p_post_ampa)
                self.p_co_ampa += (dt / self.tau_p) * (self.z_co_ampa -
                                                       self.p_co_ampa)

            else:
                self.k_d += (dt / self.tau_k) * (self.k - self.k_d)

                # Updated the probability of the NMDA connection
                self.p_pre += (dt / self.tau_p) * (self.z_pre -
                                                   self.p_pre) * self.k_d
                self.p_post += (dt / self.tau_p) * (self.z_post -
                                                    self.p_post) * self.k_d
                self.p_co += (dt / self.tau_p) * (self.z_co -
                                                  self.p_co) * self.k_d

                # Updated the probability of AMPA connection
                self.p_pre_ampa += (dt / self.tau_p) * (
                    self.z_pre_ampa - self.p_pre_ampa) * self.k_d
                self.p_post_ampa += (dt / self.tau_p) * (
                    self.z_post_ampa - self.p_post_ampa) * self.k_d
                self.p_co_ampa += (dt / self.tau_p) * (
                    self.z_co_ampa - self.p_co_ampa) * self.k_d

            if self.k > self.epsilon:
                self.beta = get_beta(self.p_post, self.epsilon)
                self.w_ampa = get_w_pre_post(self.p_co_ampa,
                                             self.p_pre_ampa,
                                             self.p_post_ampa,
                                             self.p,
                                             self.epsilon,
                                             diagonal_zero=self.diagonal_zero)
                self.w = get_w_pre_post(self.p_co,
                                        self.p_pre,
                                        self.p_post,
                                        self.p,
                                        self.epsilon,
                                        diagonal_zero=self.diagonal_zero)
pattern1 = data[0]
pattern2 = data[1]

pattern1_neural = transform_normal_to_neural_single(pattern1)
pattern2_neural = transform_normal_to_neural_single(pattern2)

patterns = [pattern1_neural, pattern2_neural]

p_aux = 0.5 * (pattern1_neural + pattern2_neural)
P_aux = 0.5 * (np.outer(pattern1_neural, pattern1_neural) + np.outer(pattern2_neural, pattern2_neural))

p = calculate_probability(patterns)
P = calculate_coactivations(patterns)

w = get_w(P, p)
beta = get_beta(p)

# Here we have the evolution
T = 10
dt = 0.1
tau_m = 1.0
G = 1.0

o = np.random.rand(p.size)
# Save initial image
initial_image = np.copy(transform_neural_to_normal_single(o).reshape(8, 8))
m = np.zeros_like(o)

fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
im = ax.imshow(initial_image, cmap='bone', interpolation='nearest')