def est_VA(data_flag, init_seed): # Load specifications from file; pass to single_cell_FRET object list_dict = read_specs_file(data_flag) vars_to_pass = compile_all_run_vars(list_dict) scF = single_cell_FRET(**vars_to_pass) # If stim and meas were not imported, then data was saved as data_flag if scF.stim_file is None: scF.stim_file = data_flag if scF.meas_file is None: scF.meas_file = data_flag scF.set_stim() scF.set_meas_data() # Initalize estimation; set the estimation and prediction windows scF.init_seed = init_seed scF.set_init_est() scF.set_est_pred_windows() # Initalize annealer class annealer = va_ode.Annealer() annealer.set_model(scF.df_estimation, scF.nD) annealer.set_data(scF.meas_data[scF.est_wind_idxs, :], stim=scF.stim[scF.est_wind_idxs], t=scF.Tt[scF.est_wind_idxs]) # Set Rm as inverse covariance; all parameters measured for now Rm = 1.0 / sp.asarray(scF.meas_noise)**2.0 P_idxs = sp.arange(scF.nP) # Estimate BFGS_options = { 'gtol': 1.0e-8, 'ftol': 1.0e-8, 'maxfun': 1000000, 'maxiter': 1000000 } tstart = time.time() annealer.anneal(scF.x_init[scF.est_wind_idxs], scF.p_init, scF.alpha, scF.beta_array, Rm, scF.Rf0, scF.L_idxs, P_idxs, dt_model=None, init_to_data=True, bounds=scF.bounds, disc='trapezoid', method='L-BFGS-B', opt_args=BFGS_options, adolcID=init_seed) print("\nADOL-C annealing completed in %f s." % (time.time() - tstart)) save_estimates(scF, annealer, data_flag)
def nn_run(data_flag, iter_var_idxs): """ Run a supervised learning classification for a single specs file. Data is read from a specifications file in the data_dir/specs/ folder, with proper formatting given in read_specs_file.py. The specs file indicates the full range of the iterated variable; this script only produces output from one of those indices, so multiple runs can be performed in parallel. """ # Aggregate all run specifications from the specs file; instantiate model list_dict = read_specs_file(data_flag) vars_to_pass = compile_all_run_vars(list_dict, iter_var_idxs) obj = nn(**vars_to_pass) # Need this to save tensor flow objects on iterations obj.data_flag = data_flag # Set the signals and free energy, depending if adaptive or not. if 'run_type' in list(list_dict['run_specs'].keys()): val = list_dict['run_specs']['run_type'] if val[0] == 'nn': obj.init_nn_frontend() elif val[0] == 'nn_adapted': obj.init_nn_frontend_adapted() else: print('`%s` run type not accepted for ' 'supervised learning calculation' % val[0]) quit() else: print ('No learning calculation run type specified, proceeding with' \ 'unadapted learning calculation') obj.init_nn_frontend() # Set the network variables, learning algorithm obj.set_AL_MB_connectome() obj.set_ORN_response_array() obj.set_PN_response_array() obj.init_tf() # Train and test performance obj.set_tf_class_labels() obj.train_and_test_tf() # Delete tensorflow variables to allow saving obj.del_tf_vars() dump_objects(obj, iter_var_idxs, data_flag) return obj
def gen_twin_data(data_flag): # Load specifications from file; to be passed to single_cell_FRET object list_dict = read_specs_file(data_flag) vars_to_pass = compile_all_run_vars(list_dict) scF = single_cell_FRET(**vars_to_pass) assert scF.meas_file is None, "For generating twin data manually, cannot "\ "import a measurement file; remove meas_file var in specs file" scF.set_stim() scF.gen_true_states() scF.set_meas_data() # Save the newly generated data; don't save stimulus if imported if scF.stim_file is None: save_stim(scF, data_flag) save_true_states(scF, data_flag) save_meas_data(scF, data_flag)
def CS_run(data_flag, iter_var_idxs): """ Run a CS decoding run for one given index of a set of iterated variables. Data is read from a specifications file in the data_dir/specs/ folder, with proper formatting given in read_specs_file.py. The specs file indicates the full range of the iterated variable; this script only produces output from one of those indices, so multiple runs can be performed in parallel. """ # Aggregate all run specifications from the specs file; instantiate model list_dict = read_specs_file(data_flag) vars_to_pass = compile_all_run_vars(list_dict, iter_var_idxs) obj = four_state_receptor_CS(**vars_to_pass) # Encode and decode obj = single_encode_CS(obj, list_dict['run_specs']) obj.decode() dump_objects(obj, iter_var_idxs, data_flag)
def pred_plot(data_flag): # Load specs file data and object list_dict = read_specs_file(data_flag) vars_to_pass = compile_all_run_vars(list_dict) scF = single_cell_FRET(**vars_to_pass) # If stim and meas were not imported, then data was saved as data_flag if scF.stim_file is None: scF.stim_file = data_flag if scF.meas_file is None: scF.meas_file = data_flag scF.set_stim() scF.set_meas_data() # Initalize estimation; set the estimation and prediction windows scF.set_est_pred_windows() # Load all of the prediction data and estimation object and dicts pred_dict = load_pred_data(data_flag) opt_IC = sp.nanargmin(pred_dict['errors']) opt_pred_path = pred_dict['pred_path'][:, :, opt_IC] est_path = pred_dict['est_path'][:, :, opt_IC] est_params = pred_dict['params'][:, opt_IC] est_range = scF.est_wind_idxs pred_range = scF.pred_wind_idxs full_range = sp.arange(scF.est_wind_idxs[0], scF.pred_wind_idxs[-1]) est_Tt = scF.Tt[est_range] pred_Tt = scF.Tt[pred_range] full_Tt = scF.Tt[full_range] # Load true data if using synthetic data true_states = None try: true_states = load_true_file(data_flag)[:, 1:] except: pass num_plots = scF.nD + 1 # Plot the stimulus plt.subplot(num_plots, 1, 1) plt.plot(full_Tt, scF.stim[full_range], color='r', lw=2) plt.xlim(full_Tt[0], full_Tt[-1]) plt.ylim(80, 160) # Plot the estimates iL_idx = 0 for iD in range(scF.nD): plt.subplot(num_plots, 1, iD + 2) plt.xlim(full_Tt[0], full_Tt[-1]) if iD in scF.L_idxs: # Plot measured data plt.plot(est_Tt, scF.meas_data[scF.est_wind_idxs, iL_idx], color='g') plt.plot(pred_Tt, scF.meas_data[scF.pred_wind_idxs, iL_idx], color='g') # Plot estimation and prediction plt.plot(est_Tt, est_path[:, iD], color='r', lw=3) plt.plot(pred_Tt, opt_pred_path[:, iD], color='r', lw=3) # Plot true states if this uses fake data if true_states is not None: plt.plot(scF.Tt, true_states[:, iD], color='k') iL_idx += 1 else: plt.plot(est_Tt, est_path[:, iD], color='r', lw=3) plt.plot(pred_Tt, opt_pred_path[:, iD], color='r', lw=3) if true_states is not None: plt.plot(scF.Tt, true_states[:, iD], color='k') save_opt_pred_plots(data_flag) plt.show() # Save all the optimal predictions, measurement and stimuli to txt files stim_to_save = sp.vstack((full_Tt.T, scF.stim[full_range].T)).T meas_to_save = sp.vstack((full_Tt.T, scF.meas_data[full_range].T)).T est_to_save = sp.vstack((est_Tt.T, est_path.T)).T pred_to_save = sp.vstack((pred_Tt.T, opt_pred_path.T)).T params_to_save = sp.vstack((scF.model.param_names, est_params)).T save_opt_pred_data(data_flag, stim_to_save, meas_to_save, est_to_save, pred_to_save, params_to_save)
def temporal_entropy_run(data_flag, iter_var_idxs, mu_dSs_offset=0, mu_dSs_multiplier=1./3., sigma_dSs_offset=0, sigma_dSs_multiplier=1./9., signal_window=None, save_data=True): assert mu_dSs_offset >= 0, "mu_dSs_offset kwarg must be >= 0" assert sigma_dSs_offset >= 0, "sigma_dSs_offset kwarg must be >= 0" # Aggregate all run specifications from the specs file; instantiate model list_dict = read_specs_file(data_flag) if 'run_type' in list_dict['run_specs'].keys(): print ('!!\n\nrun_spec %s passed in specs file. run_specs are not ' 'accepted for temporal entropy calculations at this time. ' 'Ignoring...\n\n!!\n' % list_dict['run_specs']['run_type']) vars_to_pass = compile_all_run_vars(list_dict, iter_var_idxs) obj = response_entropy(**vars_to_pass) obj.encode_power_Kk() # Set the temporal signal array from file; truncate to signal window obj.set_signal_trace() assert sp.sum(obj.signal_trace <= 0) == 0, \ "Signal contains negative values; increase signal_trace_offset" if signal_window is not None: obj.signal_trace_Tt = obj.signal_trace_Tt[signal_window[0]: \ signal_window[1]] obj.signal_trace = obj.signal_trace[signal_window[0]: signal_window[1]] # Load dual odor dSs from file (this is considered non-adapted fluctuation # and should have a shorter timescale than the first odor). Can also use # Kk_1 and Kk_2 for separate complexities of odor 1 and 2, respectively. if (obj.Kk_1 is not None) and (obj.Kk_2 is not None): obj.Kk = obj.Kk_1 + obj.Kk_2 obj.Kk_split = obj.Kk_2 if (obj.Kk_split is not None) and (obj.Kk_split != 0): try: obj.signal_trace_2 except AttributeError: print('Need to assign signal_trace_2 if setting Kk_split or ' \ 'Kk_1 and Kk_2 nonzero') quit() assert sp.sum(obj.signal_trace_2 <= 0) == 0, \ "Signal_2 contains neg values; increase signal_trace_offset_2" if signal_window is not None: obj.signal_trace_2 = obj.signal_trace_2[signal_window[0]: \ signal_window[1]] obj_list = [] for iT, dt in enumerate(obj.signal_trace_Tt): print('%s/%s' % (iT + 1, len(obj.signal_trace)), end=' ') sys.stdout.flush() # Set mu_Ss0 from signal trace, if desired if obj.set_mu_Ss0_temporal_signal == True: obj.mu_Ss0 = obj.signal_trace[iT] # Set estimation dSs values from signal trace and kwargs signal = obj.signal_trace[iT] obj.mu_dSs = mu_dSs_offset + signal*mu_dSs_multiplier obj.sigma_dSs = sigma_dSs_offset + signal*sigma_dSs_multiplier # Set estimation dSs values for dual odor if needed if (obj.Kk_split is not None) and (obj.Kk_split != 0): signal_2 = obj.signal_trace_2[iT] obj.mu_dSs_2 = mu_dSs_offset + signal_2*mu_dSs_multiplier obj.sigma_dSs_2 = sigma_dSs_offset + signal_2*sigma_dSs_multiplier # Set the full signal array from the above signal parameters obj.set_ordered_dual_signal_array() # At first step, set energy; from then on it is dynamical. if iT == 0: obj.set_normal_free_energy() # Spread adaptation rates over the system if obj.temporal_adaptation_rate_sigma != 0: obj.set_ordered_temporal_adaptation_rate() else: obj.set_temporal_adapted_epsilon() # Calculate MI obj.set_mean_response_array() obj.set_ordered_dual_response_pdf() obj.calc_MI_fore_only() print (sp.mean(obj.entropy)) # Deep copy to take all aspects of the object but not update it obj_list.append(copy.deepcopy(obj)) if save_data == True: dump_objects(obj_list, iter_var_idxs, data_flag) return obj_list
def temporal_CS_run(data_flag, iter_var_idxs, mu_dSs_offset=0, mu_dSs_multiplier=1. / 3., sigma_dSs_offset=0, sigma_dSs_multiplier=1. / 9., signal_window=None, save_data=True, decode=True): """ Run a CS decoding run for a full temporal signal trace. Data is read from a specifications file in the data_dir/specs/ folder, with proper formatting given in read_specs_file.py. The specs file indicates the full range of the iterated variable; this script only produces output from one of those indices, so multiple runs can be performed in parallel. """ assert mu_dSs_offset >= 0, "mu_dSs_offset kwarg must be >= 0" assert sigma_dSs_offset >= 0, "sigma_dSs_offset kwarg must be >= 0" # Aggregate all run specifications from the specs file; instantiate model list_dict = read_specs_file(data_flag) vars_to_pass = compile_all_run_vars(list_dict, iter_var_idxs) obj = four_state_receptor_CS(**vars_to_pass) # Set the temporal signal array from file; truncate to signal window obj.set_signal_trace() assert sp.sum(obj.signal_trace <= 0) == 0, \ "Signal contains negative values; increase signal_trace_offset" if signal_window is not None: obj.signal_trace_Tt = obj.signal_trace_Tt[signal_window[0]: \ signal_window[1]] obj.signal_trace = obj.signal_trace[signal_window[0]:signal_window[1]] # Load dual odor dSs from file (this is considered non-adapted fluctuation # and should have a shorter timescale than the first odor). Can also use # Kk_1 and Kk_2 for separate complexities of odor 1 and 2, respectively. if (obj.Kk_1 is not None) and (obj.Kk_2 is not None): obj.Kk = obj.Kk_1 + obj.Kk_2 obj.Kk_split = obj.Kk_2 if (obj.Kk_split is not None) and (obj.Kk_split != 0): try: obj.signal_trace_2 except AttributeError: print('Need to assign signal_trace_2 if setting Kk_split or ' \ 'Kk_1 and Kk_2 nonzero') quit() assert sp.sum(obj.signal_trace_2 <= 0) == 0, \ "Signal_2 contains neg values; increase signal_trace_offset_2" if signal_window is not None: obj.signal_trace_2 = obj.signal_trace_2[signal_window[0]: \ signal_window[1]] obj_list = [] for iT, dt in enumerate(obj.signal_trace_Tt): print('%s/%s' % (iT + 1, len(obj.signal_trace)), end=' ') sys.stdout.flush() # Set mu_Ss0 from signal trace, if desired if obj.set_mu_Ss0_temporal_signal == True: obj.mu_Ss0 = obj.signal_trace[iT] # Set estimation dSs values from signal trace and kwargs signal = obj.signal_trace[iT] obj.mu_dSs = mu_dSs_offset + signal * mu_dSs_multiplier obj.sigma_dSs = sigma_dSs_offset + signal * sigma_dSs_multiplier # Set estimation dSs values for dual odor if needed if (obj.Kk_split is not None) and (obj.Kk_split != 0): signal_2 = obj.signal_trace_2[iT] obj.mu_dSs_2 = mu_dSs_offset + signal_2 * mu_dSs_multiplier obj.sigma_dSs_2 = sigma_dSs_offset + signal_2 * sigma_dSs_multiplier # Encode / decode fully first time; then just update eps and responses if iT == 0: obj = single_encode_CS(obj, list_dict['run_specs']) # Spread adaptation rates over the system if obj.temporal_adaptation_rate_sigma != 0: obj.set_ordered_temporal_adaptation_rate() else: obj.set_sparse_signals() obj.set_temporal_adapted_epsilon() obj.set_measured_activity() obj.set_linearized_response() # Estimate signal at point iT if decode == True: obj.decode() # Deep copy to take all aspects of the object but not update it obj_list.append(copy.deepcopy(obj)) if save_data == True: dump_objects(obj_list, iter_var_idxs, data_flag) return obj_list