G=G,
                                  tau_z_pre_ampa=tau_z_pre_ampa,
                                  tau_z_post_ampa=tau_z_post_ampa,
                                  tau_p=tau_p,
                                  z_transfer=z_transfer,
                                  diagonal_zero=diagonal_zero,
                                  strict_maximum=strict_maximum,
                                  perfect=perfect_,
                                  k_perfect=k_perfect,
                                  always_learning=always_learning)

                # Build the manager
                manager = NetworkManager(nn=nn,
                                         dt=dt,
                                         values_to_save=values_to_save)
                nn.w_ampa = w

                # Recall
                T_recall = 3.0
                T_cue = 0.100
                sequences = [[i for i in range(n_patterns)]]
                n = 1

                aux = calculate_recall_time_quantities(manager, T_recall,
                                                       T_cue, n, sequences)
                total_sequence_time, mean, std, success, timings = aux

                success_vector[index_sigma, trial] = success
                persistent_time_vector[index_sigma, trial] = mean

        successes_list.append(success_vector)
                  tau_z_pre_ampa=tau_z_pre_ampa,
                  tau_z_post_ampa=tau_z_post_ampa,
                  tau_p=tau_p,
                  g_I=g_I,
                  z_transfer=z_transfer,
                  diagonal_zero=False,
                  strict_maximum=strict_maximum,
                  perfect=perfect,
                  k_perfect=k_perfect,
                  always_learning=always_learning)
nn.g_beta = 0.0

# Build the manager
manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save)
w = simple_bcpnn_matrix(minicolumns, w_self, w_next, w_rest)
nn.w_ampa = w

# Recall
T_recall = 0.450
T_cue = 0.050
sequences = [[i for i in range(n_patterns)]]
n = 1

aux = calculate_recall_time_quantities(manager, T_recall, T_cue, n, sequences)
total_sequence_time, mean, std, success, timings = aux

i_ampa = manager.history['i_ampa']
a = manager.history['a']
time = np.linspace(0, manager.T_total, a.shape[0])

##########
Пример #3
0
                          tau_z_pre_ampa=tau_z_pre_ampa,
                          tau_z_post_ampa=tau_z_post_ampa,
                          tau_p=tau_p,
                          z_transfer=z_transfer,
                          diagonal_zero=diagonal_zero,
                          strict_maximum=strict_maximum,
                          perfect=perfect,
                          k_perfect=k_perfect,
                          always_learning=always_learning,
                          normalized_currents=normalized_currents)

        # Build the manager
        manager = NetworkManager(nn=nn, dt=dt, values_to_save=values_to_save)

        # Build the protocol for training
        nn.w_ampa = w_timed

        # Recall
        patterns_indexes = [i for i in range(n_patterns)]
        sequences = [patterns_indexes]
        # manager.run_network_recall(T_recall=1.0, T_cue=0.100, I_cue=0, reset=True, empty_history=True)
        aux = calculate_recall_time_quantities(manager, T_recall, T_cue, n,
                                               sequences)
        total_sequence_time, mean, std, success, timings = aux
        w_self, w_next, w_rest = get_weights(manager,
                                             from_pattern,
                                             to_pattern,
                                             mean=False)

        success_vector[index] = success
        persistence_time_vector[index] = mean