def set_measured_activity(self): """ Set the full measured activity, from nonlinear response. """ # Learned background firing only utilizes average background signal self.Yy0 = receptor_activity(self.Ss0, self.Kk1, self.Kk2, self.eps, self.binding_competitive, self.num_binding_sites) self.Yy = receptor_activity(self.Ss, self.Kk1, self.Kk2, self.eps, self.binding_competitive, self.num_binding_sites) self.Yy += sp.random.normal(0, self.meas_noise, self.Mm) # Apply temporal kernel if self.temporal_run == False: pass else: kernel_params = [ self.kernel_T, self.kernel_dt, self.kernel_tau_1, self.kernel_tau_2, self.kernel_shape_1, self.kernel_shape_2, self.kernel_alpha, self.kernel_scale ] self.Yy0, self.memory_Yy0 = temporal_kernel( self.Yy0, self.memory_Yy0, self.signal_trace_Tt, kernel_params) self.Yy, self.memory_Yy = temporal_kernel(self.Yy, self.memory_Yy, self.signal_trace_Tt, kernel_params) # Nonlinearities self.Yy0 *= self.NL_scale * (self.Yy > self.NL_threshold) self.Yy *= self.NL_scale * (self.Yy > self.NL_threshold) self.Yy0 = sp.minimum(self.Yy0, self.firing_max) self.Yy = sp.minimum(self.Yy, self.firing_max) # Measured response above background self.dYy = self.Yy - self.Yy0 # Add effects of divisive normalization if called. if self.divisive_normalization == True: self.Yy0 = inhibitory_normalization(self.Yy0, self.inh_C, self.inh_D, self.inh_eta, self.inh_R) self.Yy = inhibitory_normalization(self.Yy, self.inh_C, self.inh_D, self.inh_eta, self.inh_R) self.dYy = self.Yy - self.Yy0
def manual_Kk_replace(self): """ Manually add high responders to Kk2 matrix """ # First get the epsilon -- needed to calculate activities to find # weak responders. self.set_normal_free_energy() # Get tuning curves tuning_curves = sp.zeros((self.Mm, self.Nn)) for iN in range(self.Nn): tuning_Ss = sp.zeros(self.Nn) tuning_Ss[iN] = self.manual_Kk_replace_Ss tuning_curves[:, iN] = receptor_activity(tuning_Ss, self.Kk1, self.Kk2, self.eps, self.binding_competitive, self.num_binding_sites) # Add specialist responses to weak responders for iM in range(self.Mm): if sp.mean(tuning_curves[iM, :]) < self.manual_Kk_replace_min_act: self.Kk2[iM, sp.random.randint(self.Nn)] = \ 10.**sp.random.uniform(self.manual_Kk_replace_lo, self.manual_Kk_replace_hi)
def set_mean_response_array(self): """ Set the mean receptor responses for the signal array. """ self.Yy = receptor_activity(self.Ss, self.Kk1, self.Kk2, self.eps) self.Yy *= self.NL_scale * (self.Yy > self.NL_threshold) self.Yy = sp.minimum(self.Yy, self.firing_max)
def set_measured_activity(self): # True receptor activity self.Yy = receptor_activity(self.Ss, self.Kk1, self.Kk2, self.eps) # Learned background activity only utilizes average background signal self.Yy0 = bkgrnd_activity(self.Ss0, self.Kk1, self.Kk2, self.eps) # Measured response above background self.dYy = self.Yy - self.Yy0
def set_temporal_adapted_epsilon(self): """ Set adapted epsilon based on current value and adaptation rate. The adapted value is set by linear decay rate equation, d(eps)/dt = beta*(a_0 - a). a_0 is set by passing the manually chosen variables temporal_adaptation_mu_eps and temporal_adaptation_mu_Ss0 to the activity. """ # Perfectly adapted activity level is set manually activity_stats = [ self.adapted_activity_mu, self.adapted_activity_sigma ] perfect_adapt_Yy = random_matrix([self.Mm], params=activity_stats, seed=self.seed_adapted_activity) beta_scale_factors = sp.random.uniform(self.adaptive_beta_scaling_min, self.adaptive_beta_scaling_max, self.Mm) # This is a kind of hacky way to incorporate WL breaking. # Requires Kk1 to be small Kk2_sum = sp.dot(self.Kk2**-1.0, self.Ss).T den = perfect_adapt_Yy.T*(1 - (1/Kk2_sum)**beta_scale_factors)\ + (1/Kk2_sum)**beta_scale_factors perfect_adapt_Yy = (perfect_adapt_Yy.T / den) # Make adaptation rate into a vector if it has not yet been set. try: self.temporal_adaptation_rate_vector except AttributeError: assert self.temporal_adaptation_rate_sigma == 0, "Before "\ "setting new epsilon with set_temporal_adapted_epsilon, "\ "you must call set_ordered_temporal_adaptation_rate, since "\ "temporal_adaptation_rate_sigma is nonzero" self.temporal_adaptation_rate_vector = sp.ones(self.Mm)*\ self.temporal_adaptation_rate # Receptor activity used for adaptation is not firing rate; just # get the Or/Orco activity, w/o LN functions at backend. current_Yy = receptor_activity(self.Ss, self.Kk1, self.Kk2, self.eps, self.binding_competitive, self.num_binding_sites) if self.temporal_adaptation_type == 'imperfect': d_eps_dt = (self.temporal_adaptation_rate_vector*\ (current_Yy.T - perfect_adapt_Yy)).T delta_t = self.signal_trace_Tt[1] - self.signal_trace_Tt[0] self.eps += delta_t * d_eps_dt elif self.temporal_adaptation_type == 'perfect': self.eps = free_energy(self.Ss, self.Kk1, self.Kk2, perfect_adapt_Yy) # Enforce epsilon limits self.eps = sp.maximum(self.eps.T, self.min_eps).T self.eps = sp.minimum(self.eps.T, self.max_eps).T
def set_measured_activity(self): """ Set the full measured activity, from nonlinear response. """ # True receptor activity self.Yy = receptor_activity(self.Ss, self.Kk1, self.Kk2, self.eps) # Learned background activity only utilizes average background signal self.Yy0 = receptor_activity(self.Ss0, self.Kk1, self.Kk2, self.eps) # Measured response above background self.dYy = self.Yy - self.Yy0 # Add effects of divisive normalization if called. if self.divisive_normalization == True: self.Yy0 = inhibitory_normalization(self.Yy0, self.inh_C, self.inh_D, self.inh_eta, self.inh_R) self.Yy = inhibitory_normalization(self.Yy, self.inh_C, self.inh_D, self.inh_eta, self.inh_R) self.dYy = self.Yy - self.Yy0
def set_temporal_adapted_epsilon(self): """ Set adapted epsilon based on current value and adaptation rate. The adapted value is set by linear decay rate equation, d(eps)/dt = beta*(a_0 - a). a_0 is set by passing the manually chosen variables temporal_adaptation_mu_eps and temporal_adaptation_mu_Ss0 to the activity. """ # Perfectly adapted activity level is based on the variables: # temporal_adaptation_mu_eps, temporal_adaptation_sigma_eps, # temporal_adaptation_mu_Ss0. These functions take the activity # level set by these variables at that signal intensity, to # adapt epsilon to the current Ss perfect_adapt_eps_base = sp.ones(self.Mm)*\ self.temporal_adaptation_mu_eps + random_matrix(self.Mm, params=[0, self.temporal_adaptation_sigma_eps], seed=self.seed_eps) perfect_adapt_Ss = sp.zeros(self.Nn) perfect_adapt_Ss[self.Ss0 != 0] = self.temporal_adaptation_mu_Ss0 perfect_adapt_Yy = receptor_activity(perfect_adapt_Ss, self.Kk1, self.Kk2, perfect_adapt_eps_base) # Make adaptation rate into a vector if it has not yet been set. try: self.temporal_adaptation_rate_vector except AttributeError: assert self.temporal_adaptation_rate_sigma == 0, "Before "\ "setting new epsilon with set_temporal_adapted_epsilon, "\ "you must call set_ordered_temporal_adaptation_rate, since "\ "temporal_adaptation_rate_sigma is nonzero" self.temporal_adaptation_rate_vector = sp.ones(self.Mm)*\ self.temporal_adaptation_rate if self.temporal_adaptation_type == 'imperfect': d_eps_dt = self.temporal_adaptation_rate_vector*\ (self.Yy - perfect_adapt_Yy) delta_t = self.signal_trace_Tt[1] - self.signal_trace_Tt[0] self.eps += delta_t * d_eps_dt elif self.temporal_adaptation_type == 'perfect': self.eps = free_energy(self.Ss, self.Kk1, self.Kk2, perfect_adapt_Yy)
def decode_nonlinear_CS( obj, opt_type="L1_strong", precision='None', init_params=[sp.random.rand() * -0.1, sp.random.rand() * 0.1]): """ Run CS decoding with L1 norm, using full activity with no linearization. Args: Rr: numpy array; measurement matrix. Yy: numpy array; Measured signal. Optional args: opt_type: String for type of L1 minimization "L1_strong" or "L1_weak". precision: Float, for L1_weak, the multiplier of the squared error. init_params: List; initialization point for the optimization. """ def L1_strong(x): return sp.sum(abs(x)) from kinetics import receptor_activity if opt_type == "L1_strong": constraints = ({ 'type': 'eq', 'fun': lambda x: receptor_activity(x, obj.Kk1, obj.Kk2, obj.eps) - obj.Yy }) res = minimize( L1_strong, obj.Ss * sp.random.normal(1, 0.1, obj.Nn), #sp.random.normal(init_params[0], init_params[1], obj.Nn), method='SLSQP', constraints=constraints) else: print('Unknown optimization type') quit() return res.x