def generate_connection_i(self,N_i): c = utils.Bunch(use_sparse=False, lamb=np.inf, # cannot be 0 avoid_self_connections=False) tmpsyn = synapses.create_matrix((N_i,self.N_u),c) tmpsyn.set_synapses(tmpsyn.get_synapses()*0) return tmpsyn
def generate_connection_e(self, N_e): W = zeros((N_e, self.N_a)) # excitatory neurons X input neurons available = set( range(N_e)) # range from 1 to number of excitatory neurons for a in range(self.N_a): # e.g. connections from first input to 10 sampled excitatory neurons (out of 200) temp = random.sample(available, self.N_u_e) # Set weight of sampled connections to 1 W[temp, a] = 1 if self.avoid: # if self-connections should be avoided available = available.difference(temp) # Check if the letter underscore _ is part of the letter sequence # If it is part of the sequence, set their weights to zero (no input) # The underscore has the special property that it doesn't # activate anything: if '_' in self.lookup: W[:, self.lookup['_']] = 0 # Instantiate synapses object and add connections to it c = utils.Bunch(use_sparse=False, lamb=np.inf, avoid_self_connections=False) ans = synapses.create_matrix((N_e, self.N_a), c) ans.W = W return ans
def generate_connection_i(self, N_i): c = utils.Bunch( use_sparse=False, lamb=np.inf, # cannot be 0 avoid_self_connections=False) tmpsyn = synapses.create_matrix((N_i, self.N_u), c) tmpsyn.set_synapses(tmpsyn.get_synapses() * 0) return tmpsyn
def generate_connection(self, N): W = np.zeros((N, self.X, self.Y)) available = set(range(N)) for a in range(self.X): for b in range(self.Y): temp = random.sample(available, self.symbol) W[temp, a, b] = 1 available = available.difference(temp) W.shape = (N, self.X * self.Y) c = utils.Bunch(use_sparse=False, lamb=np.inf, avoid_self_connections=False) ans = synapses.create_matrix((N, self.X * self.Y), c) ans.W = W return ans
def generate_connection(self, N_e): c = utils.Bunch( use_sparse=False, lamb=self.density * N_e, avoid_self_connections=False, #CHANGE should this be different? eta_stdp=self.eta_stdp) tmp = synapses.create_matrix((N_e, self.N), c) # get correct connection density noone = True while (noone): tmp.set_synapses((rand(N_e, self.N) < self.density).astype(float)) if sum(tmp.get_synapses()) > 0: noone = False return tmp
def generate_connection(self,N): W = np.zeros((N,self.X,self.Y)) available = set(range(N)) for a in range(self.X): for b in range(self.Y): temp = random.sample(available,self.symbol) W[temp,a,b] = 1 available = available.difference(temp) W.shape = (N,self.X*self.Y) c = utils.Bunch(use_sparse=False, lamb=np.inf, avoid_self_connections=False) ans = synapses.create_matrix((N,self.X*self.Y),c) ans.W = W return ans
def generate_connection(self,N_e): c = utils.Bunch(use_sparse=False, lamb=self.density*N_e, avoid_self_connections=False, #CHANGE should this be different? eta_stdp = self.eta_stdp) tmp = synapses.create_matrix((N_e,self.N),c) # get correct connection density noone = True while(noone): tmp.set_synapses((rand(N_e,self.N)<self.density).astype( float)) if sum(tmp.get_synapses()) > 0: noone = False return tmp
def generate_connection_i(self, N_i): c = utils.Bunch(use_sparse=False, lamb=np.inf, avoid_self_connections=False) ans = synapses.create_matrix((N_i, self.N_a), c) W = zeros((N_i, self.N_a)) if N_i > 0: available = set(range(N_i)) for a in range(self.N_a): temp = random.sample(available, self.N_u_i) W[temp, a] = 1 #~ if self.avoid: # N_i is smaller -> broad inhibition? #~ available = available.difference(temp) if '_' in self.lookup: W[:, self.lookup['_']] = 0 ans.W = W return ans
def generate_connection_i(self,N_i): c = utils.Bunch(use_sparse=False, lamb=np.inf, avoid_self_connections=False) ans = synapses.create_matrix((N_i,self.N_a),c) W = zeros((N_i, self.N_a)) if N_i>0: available = set(range(N_i)) for a in range(self.N_a): temp = random.sample(available,self.N_u_i) W[temp,a] = 1 #~ if self.avoid: # N_i is smaller -> broad inhibition? #~ available = available.difference(temp) if '_' in self.lookup: W[:,self.lookup['_']] = 0 ans.W = W return ans
def generate_connection_e(self,N_e): W = zeros((N_e,self.N_a)) available = set(range(N_e)) for a in range(self.N_a): temp = random.sample(available,self.N_u_e) W[temp,a] = 1 if self.avoid: available = available.difference(temp) # The underscore has the special property that it doesn't # activate anything: if '_' in self.lookup: W[:,self.lookup['_']] = 0 c = utils.Bunch(use_sparse=False, lamb=np.inf, avoid_self_connections=False) ans = synapses.create_matrix((N_e,self.N_a),c) ans.W = W return ans
def generate_connection_e(self, N_e): W = zeros((N_e, self.N_a)) available = set(range(N_e)) for a in range(self.N_a): temp = random.sample(available, self.N_u_e) W[temp, a] = 1 if self.avoid: available = available.difference(temp) # The underscore has the special property that it doesn't # activate anything: if '_' in self.lookup: W[:, self.lookup['_']] = 0 c = utils.Bunch(use_sparse=False, lamb=np.inf, avoid_self_connections=False) ans = synapses.create_matrix((N_e, self.N_a), c) ans.W = W return ans
def __init__(self, c, source): """ Initializes the variables of SORN Parameters: c: bunch The bunch of parameters source: Source The input source """ self.c = c self.source = source # Initialize weight matrices # W_to_from (W_ie = from excitatory to inhibitory) self.W_ie = create_matrix((c.N_i, c.N_e), c.W_ie) self.W_ei = create_matrix((c.N_e, c.N_i), c.W_ei) self.W_ee = create_matrix((c.N_e, c.N_e), c.W_ee) self.W_eu = self.source.generate_connection_e(c.N_e) self.W_iu = self.source.generate_connection_i(c.N_i) if self.c.double_synapses: import copy self.W_ee_2 = copy.deepcopy(self.W_ee) tmp = np.array(self.W_ee_2.get_synapses()) nonzero_syns = tmp[tmp != 0] shuffle(nonzero_syns) tmp[tmp != 0] = nonzero_syns self.W_ee_2.set_synapses(tmp) # scaling wee_etass = self.W_ee.c.eta_ss wee2_etass = self.W_ee_2.c.eta_ss self.W_ee.c.eta_ss = 1 self.W_ee_2.c.eta_ss = 1 self.synaptic_scaling() self.W_ee.c.eta_ss = wee_etass self.W_ee_2.c.eta_ss = wee2_etass # Initialize the activation of neurons self.x = rand(c.N_e) < c.h_ip self.y = zeros(c.N_i) self.u = source.next() # Initialize the pre-threshold variables self.R_x = zeros(c.N_e) self.R_y = zeros(c.N_i) # Initialize thresholds if c.ordered_thresholds: # From Lazar2011 self.T_i = (arange(c.N_i) + 0.5) * ((c.T_i_max - c.T_i_min) / (1.0 * c.N_i)) + c.T_i_min self.T_e = (arange(c.N_e) + 0.5) * ((c.T_e_max - c.T_e_min) / (1.0 * c.N_e)) + c.T_e_min shuffle(self.T_e) else: self.T_i = c.T_i_min + rand(c.N_i) * (c.T_i_max - c.T_i_min) self.T_e = c.T_e_min + rand(c.N_e) * (c.T_e_max - c.T_e_min) # Activate plasticity mechanisms self.update = True self.stats = None self.noise_spikes = 0
def __init__(self,c,source): """ Initializes the variables of SORN Parameters: c: bunch The bunch of parameters source: Source The input source """ self.c = c self.source = source # Initialize weight matrices # W_to_from (W_ie = from excitatory to inhibitory) self.W_ie = create_matrix((c.N_i,c.N_e),c.W_ie) self.W_ei = create_matrix((c.N_e,c.N_i),c.W_ei) self.W_ee = create_matrix((c.N_e,c.N_e),c.W_ee) self.W_eu = self.source.generate_connection_e(c.N_e) self.W_iu = self.source.generate_connection_i(c.N_i) if self.c.double_synapses: import copy self.W_ee_2 = copy.deepcopy(self.W_ee) tmp = np.array(self.W_ee_2.get_synapses()) nonzero_syns = tmp[tmp!=0] shuffle(nonzero_syns) tmp[tmp!=0] = nonzero_syns self.W_ee_2.set_synapses(tmp) # scaling wee_etass = self.W_ee.c.eta_ss wee2_etass = self.W_ee_2.c.eta_ss self.W_ee.c.eta_ss = 1 self.W_ee_2.c.eta_ss = 1 self.synaptic_scaling() self.W_ee.c.eta_ss = wee_etass self.W_ee_2.c.eta_ss = wee2_etass # Initialize the activation of neurons self.x = rand(c.N_e)<c.h_ip self.y = zeros(c.N_i) self.u = source.next() # Initialize the pre-threshold variables self.R_x = zeros(c.N_e) self.R_y = zeros(c.N_i) # Initialize thresholds if c.ordered_thresholds: # From Lazar2011 self.T_i = (arange(c.N_i)+0.5)*((c.T_i_max-c.T_i_min)/ (1.*c.N_i))+c.T_i_min self.T_e = (arange(c.N_e)+0.5)*((c.T_e_max-c.T_e_min)/ (1.*c.N_e))+c.T_e_min shuffle(self.T_e) else: self.T_i = c.T_i_min + rand(c.N_i)*(c.T_i_max-c.T_i_min) self.T_e = c.T_e_min + rand(c.N_e)*(c.T_e_max-c.T_e_min) # Activate plasticity mechanisms self.update = True self.stats = None self.noise_spikes = 0