def activation_fun(g, mu, r_plus): val = 2.0 / np.pi * np.arctan( g * (r_plus - mu)) * 0.5 * (np.sign(r_plus - mu) + 1.0) return val #generate rat data j_vals = i_vals = np.linspace(-62.5, 62.5, 50) for k in range(100): for i in range(50): for j in range(50): i_val = i_vals[i] j_val = j_vals[j] r = input_neuron.input_rates(np.array([[i_val, j_val]])) h = np.dot(last_weights[k, :], r.transpose()) firing_rate[i, j] = activation_fun(g, mu, h) plt.matshow(np.rot90(firing_rate)) plt.colorbar() plt.title('The final firing rate of neuron ' + str(k)) plt.axes([-62.5, 62.5, -62.5, 62.5]) # plt.xlabel(np.linspace(-62.5, 62.5, 10)) # plt.yticks(np.linspace(-62.5, 62.5, 10)) plt.savefig(PATH + 'neuron_r_' + str(k) + '.png', format="png") plt.close() # plt.show() # for time_step in np.linspace(0, 100, 11): #
last_weights = np.reshape(w_arr[-1, :], (network_size[0], 400), order="F") def activation_fun(g, mu, r_plus): val = 2.0 / np.pi * np.arctan(g * (r_plus - mu)) * 0.5 * (np.sign(r_plus - mu) + 1.0) return val # generate rat data j_vals = i_vals = np.linspace(-62.5, 62.5, 50) for k in range(100): for i in range(50): for j in range(50): i_val = i_vals[i] j_val = j_vals[j] r = input_neuron.input_rates(np.array([[i_val, j_val]])) h = np.dot(last_weights[k, :], r.transpose()) firing_rate[i, j] = activation_fun(g, mu, h) plt.matshow(np.rot90(firing_rate)) plt.colorbar() plt.title("The final firing rate of neuron " + str(k)) plt.axes([-62.5, 62.5, -62.5, 62.5]) # plt.xlabel(np.linspace(-62.5, 62.5, 10)) # plt.yticks(np.linspace(-62.5, 62.5, 10)) plt.savefig(PATH + "neuron_r_" + str(k) + ".png", format="png") plt.close() # plt.show() # for time_step in np.linspace(0, 100, 11): #
# -*- coding: utf-8 -*- import input_neuron import running_rat print('calling running_rat') positions = running_rat.running_rat(1e4) print('calling input_rates') rates = input_neuron.input_rates(positions) input_neuron.visualize_activity(positions, rates)