def structured_connectivity(N_excitatory, Jpos, sigma): """ J– = (360 - math.sqrt(2*pi) * sigma * J+)/ (360 - math.sqrt(2*pi) * sigma) W(θi – θj) = J– + (J+ – J–)exp[–(θi – θj)^2/2σ^2] Parameters ---------- N_excitatory(int): Size of the excitatory population Jpos(float): Strength of the recurrent input within the excitatory population. sigma(float): standard deviation of the gaussian input profile in the excitatory population. Returns ------- presyn_weight : array weight profile for the structured excitatory-to-excitatory connectivity in recurrent population """ tmp = np.sqrt(2 * np.pi) * sigma Jneg = (360 - tmp * Jpos) / (360 - tmp) neurons = np.arange(N_excitatory) delta_theta = 360 * np.minimum(neurons, N_excitatory - neurons) / N_excitatory presyn_weight = ( Jneg + (Jpos - Jneg) * np.exp(-1 / 2 * delta_theta**2 / sigma**2)) return presyn_weight
def gaussian_input(num_neurons, features, width): neurons = np.arange(0, 360, 360 / num_neurons) stim = np.zeros(num_neurons) for feature in features: tuning = stats.norm.pdf(neurons, loc=feature, scale=width) tuning = np.sqrt(2 * np.pi) * width * tuning stim += tuning return stim
def get_2d_input_weights(): name = 'XeAe' weight_matrix = np.zeros((n_input, n_e)) n_e_sqrt = int(np.sqrt(n_e)) n_in_sqrt = int(np.sqrt(n_input)) num_values_col = n_e_sqrt * n_in_sqrt num_values_row = num_values_col rearranged_weights = np.zeros((num_values_col, num_values_row)) # connMatrix = connections[name][:] connMatrix = synapses[name].w[:] weight_matrix = np.copy(connMatrix.reshape(n_input, n_e)) for i in xrange(n_e_sqrt): for j in xrange(n_e_sqrt): rearranged_weights[i*n_in_sqrt : (i+1)*n_in_sqrt, j*n_in_sqrt : (j+1)*n_in_sqrt] = \ weight_matrix[:, i + j*n_e_sqrt].reshape((n_in_sqrt, n_in_sqrt)) return rearranged_weights
def structured_connectivity(N_excitatory, Jpos, sigma): tmp = np.sqrt(2 * np.pi) * sigma Jneg = (360 - tmp * Jpos) / (360 - tmp) neuron = np.arange(N_excitatory) delta_theta = 360 * np.minimum(neuron, N_excitatory - neuron) / N_excitatory presyn_weight = ( Jneg + (Jpos - Jneg) * np.exp(-1 / 2 * delta_theta**2 / sigma**2)) return presyn_weight / sum(presyn_weight) * 360
def sub_square(n, i, j): """Return subsquare index of cell (i, j) of n x n table.""" # Check if data is valid. if n < 4: raise ValueError('Matrix size has to be at least 4-by-4!') if not np.sqrt(n).is_integer(): raise ValueError('Matrix length is not a square number!') if (i < 0) | (j < 0): raise ValueError('Some coordinate is less then 0!') if (i >= n) | (j >= n): raise ValueError('Some coordinate does not fit into matrix!') ns = int(np.sqrt(n)) # subsquare side length si = int(i / ns) sj = int(j / ns) return si, sj
def structured_connectivity_2d(N_excitatory, Jpos, sigma): tmp = np.sqrt(2 * np.pi) * sigma Jneg = (360 - tmp * Jpos) / (360 - tmp) neurons = list(product(range(N_excitatory), repeat=2)) neurons = np.linalg.norm(neurons, axis=1).reshape(N_excitatory, -1) dist = np.linalg.norm([N_excitatory, N_excitatory]) delta_theta = 360 * np.minimum(neurons, dist - neurons) / dist presyn_weight = ( Jneg + (Jpos - Jneg) * np.exp(-1 / 2 * delta_theta**2 / sigma**2)) return presyn_weight
def test_correct_solution_2D(M): """ Function to test if solution is correct, returning detailed results. M is a 2D matrix, i.e. cell-wise competition is assumed to have a winner. """ # Check if matrix is valid size. M = np.array(M) nrow, ncol = M.shape if nrow != ncol: raise ValueError('Matrix is not square size!') if not np.sqrt(nrow).is_integer(): raise ValueError('Matrix length is not a square number!') unique_vals = np.arange(nrow) + 1 # Test each row. row_correct = np.array( [np.array_equal(np.unique(M[i, :]), unique_vals) for i in range(nrow)]) # Test each column. col_correct = np.array( [np.array_equal(np.unique(M[:, j]), unique_vals) for j in range(ncol)]) # Test each sub-rectangle. nsrow, nscol = int(np.sqrt(nrow)), int(np.sqrt(ncol)) sub_correct = [[ np.array_equal( np.unique(M[(nsrow * i):(nsrow * (i + 1)), (nscol * j):(nscol * (j + 1))]), unique_vals) for j in range(nscol) ] for i in range(nsrow)] sub_correct = np.array(sub_correct) # Collest test results. test_res = {'rows': row_correct, 'cols': col_correct, 'subs': sub_correct} return test_res
def plot_sudoku(M, pM=None, cM=None, add_errors=None, remove_lbls=True, title=None, fname=None): """ Plot Sudoku matrix, optionally adding errors. M: a complete or partial solution. pM: a partial solution, if provided, numbers are colored by differently. cM: confidence matrix to scale size of numbers with. """ # Init. M = np.array(M) if pM is not None: pM = np.array(pM) if cM is not None: cM = np.array(cM) nrow, ncol = M.shape nsrow, nscol = int(np.sqrt(nrow)), int(np.sqrt(ncol)) if add_errors is None: # Add errors if matrix is complete. add_errors = not np.any(np.isnan(M)) # Init figure. base_cell_size = 1 ndigits_fac = 1 if nrow < 10 else 1.1 size = ndigits_fac * nrow * base_cell_size fig = plt.figure(figsize=(size, size)) ax = plt.axes() # Plot matrix. sns.heatmap(M, vmin=0, vmax=0, cmap='OrRd', cbar=False, square=True, linecolor='k', linewidth=1, annot=False, ax=ax) # Add cell numbers. for i, j in sudoku_util.all_ij_pairs(nrow): lbl = int(M[i, j]) if not np.isnan(M[i, j]) else '' # Color: is cell present in partial solution? c = 'k' if pM is None else 'g' if not np.isnan(pM[i, j]) else 'b' # Size: confidence level of cell. s = 30 if cM is None else 10 + 20 * cM[i, j] # Plot cell label. ax.text(j + 0.5, nrow - i - 0.5, lbl, va='center', ha='center', weight='bold', fontsize=s, color=c) # Remove tick labels. if remove_lbls: ax.tick_params(labelbottom='off') ax.tick_params(labelleft='off') # Embolden border lines. kws = {'linewidth': 6, 'color': 'k'} for i in range(nsrow + 1): ax.plot([0, ncol], [i * nsrow, i * nsrow], **kws) for j in range(nscol + 1): ax.plot([j * nscol, j * nscol], [0, ncol], **kws) # Highlight errors. if add_errors: col, alpha = 'r', 1. / 3 test_res = sudoku_util.test_correct_solution_2D(M) # Rows. for i in np.where(np.logical_not(test_res['rows']))[0]: irow = nrow - i - 1 rect = mpl.patches.Rectangle((0, irow), ncol, 1, alpha=alpha, fc=col) ax.add_patch(rect) # Columns. for j in np.where(np.logical_not(test_res['cols']))[0]: rect = mpl.patches.Rectangle((j, 0), 1, nrow, alpha=alpha, fc=col) ax.add_patch(rect) # Sub-squares. for i, j in np.argwhere(np.logical_not(test_res['subs'])): isrow = nsrow - i - 1 rect = mpl.patches.Rectangle((j * nscol, isrow * nsrow), nscol, nsrow, alpha=alpha, fc=col) ax.add_patch(rect) # Add title. if title is not None: ax.set_title(title, fontsize='xx-large') # Save figure. if fname is not None: fig.savefig(fname, dpi=300, bbox_inches='tight') return ax
def plot_synapses(S, n, elev_azim_list, fig_dir, nspl=50): """ Visualize Sudoku connectivity S as 3D matrix from different angles. TODO: make it weighted! """ # Init params. nv = np.arange(n) xspl, yspl = calc_base_spline(nspl) # Init figure. fig = plt.figure(figsize=(10, 10)) ax = plt.axes(projection='3d') ax.set_aspect(1) node_cols = get_node_colors(n) # Plot table at the bottom. kws = {'color': 'k'} lims = [-0.5, n - 0.5] zlvl = [-0.5, -0.5] for iv in range(n + 1): lvl = iv - 0.5 lw = 4 if not iv % np.sqrt(n) else 2 ax.plot(lims, [lvl, lvl], zlvl, lw=lw, **kws) ax.plot([lvl, lvl], lims, zlvl, lw=lw, **kws) # Plot nodes. x, y, z = zip(*sudoku_util.all_ijk_triples(n)) all_node_cols = [node_cols[zi] for zi in z] ax.scatter(x, y, z, marker='o', c=all_node_cols, s=200) # Plot each connection. for idx1, idx2 in zip(S.i, S.j): i1, j1, k1 = sudoku_util.mat_idx(idx1, n) i2, j2, k2 = sudoku_util.mat_idx(idx2, n) # Get 3D curve of connection. v1, v2 = [i1, j1, k1], [i2, j2, k2] x, y, z = calc_3D_spline_coords(v1, v2, xspl, yspl) # Plot connection curve. ax.plot(x, y, z, ls='-', color=node_cols[k1], alpha=0.5, lw=0.5) # TODO: add arrow head to show direction? # Format plot. ax.set_xlabel('Row') ax.set_ylabel('Column') ax.set_zlabel('Neuron') for f_tp, f_tl in [(ax.set_xticks, ax.set_xticklabels), (ax.set_yticks, ax.set_yticklabels), (ax.set_zticks, ax.set_zticklabels)]: f_tp(nv) f_tl(nv + 1) # Set limits. lim = [-0.5, n - 0.5] ax.set_xlim(lim) ax.set_ylim(lim) ax.set_zlim(lim) # Set background color. ax.set_facecolor((1.0, 1.0, 1.0)) # Save it from different viewpoints. for elev, azim in elev_azim_list: ax.view_init(elev=elev, azim=azim) ffig = fig_dir + 'elev_{}_azim_{}.png'.format(elev, azim) fig.savefig(ffig, dpi=300, bbox_inches='tight') return ax
def time_calculations_with_units(self): rmse = np.sqrt(np.mean(self.ar_with_units**2))
def evolve( self, ts_input: Optional[TSEvent] = None, duration: Optional[float] = None, num_timesteps: Optional[int] = None, verbose: bool = False, ) -> TSContinuous: """ Function to evolve the states of this layer given an input :param Optional[TSEvent] ts_input: TSEvent Input spike trian :param Optional[float] duration: Simulation/Evolution time :param Optional[int] num_timesteps: Number of evolution time steps :param bool verbose: Currently no effect, just for conformity :return TSContinuous: output spike series """ # - Prepare time base time_base, __, num_timesteps = self._prepare_input( ts_input, duration, num_timesteps ) # - Set spikes for spike generator if ts_input is not None: event_times, event_channels, _ = ts_input( t_start=time_base[0], t_stop=time_base[-1] + self.dt ) self._input_generator.set_spikes( event_channels, event_times * second, sorted=False ) else: self._input_generator.set_spikes([], [] * second) # - Generate a noise trace noise_step = ( np.random.randn(np.size(time_base), self.size) * self.noise_std * np.sqrt(2 * self.tau_syn / self.dt) ) # noise_step = np.zeros((np.size(time_base), self.size)) # noise_step[0,:] = self.noise_std # - Specifiy noise input currents, construct TimedArray inp_noise = TAShift( np.asarray(noise_step) * amp, self.dt * second, tOffset=self.t * second, name="noise_input", ) # - Perform simulation self._net.run( num_timesteps * self.dt * second, namespace={"I_inp": inp_noise}, level=0 ) self._timestep += num_timesteps # - Build response TimeSeries time_base_out = self._state_monitor.t_ use_time = self._state_monitor.t_ >= time_base[0] time_base_out = time_base_out[use_time] a = self._state_monitor.I_syn_.T a = a[use_time, :] # - Return the current state as final time point if time_base_out[-1] != self.t: time_base_out = np.concatenate((time_base_out, [self.t])) a = np.concatenate((a, np.reshape(self.state, (1, self.size)))) return TSContinuous(time_base_out, a, name="Receiver current")