def main(): add_script_execution(SCRIPT_ID, session=session, notes=SCRIPT_NOTES) for expt in EXPERIMENTS: for odor_state in ODOR_STATES: sim_id = SIMULATION_ID.format(expt, odor_state) sim = session.query(models.Simulation).get(sim_id) print(sim_id) pos_idxs_start = [] for trial in sim.trials: tp_id_start = trial.start_timepoint_id tp = session.query(models.Timepoint).get(tp_id_start) pos_idxs_start += [(tp.xidx, tp.yidx, tp.zidx)] pos_start = [sim.env.pos_from_idx(idx) for idx in pos_idxs_start] # build the histogram bins = (sim.env.xbins, sim.env.ybins, sim.env.zbins) hist, _ = np.histogramdd(np.array(pos_start), bins=bins) # create the data model and store it hist_data_model = models.SimulationAnalysisTakeOffPositionHistogram( ) hist_data_model.simulation = sim hist_data_model.store_data(session, hist.astype(int)) session.add(hist_data_model) session.commit()
def main(): add_script_execution(SCRIPT_ID, session=session, notes=SCRIPT_NOTES) for expt in EXPERIMENTS: for odor_state in ODOR_STATES: sim_id = SIMULATION_ID.format(expt, odor_state) sim = session.query(models.Simulation).get(sim_id) print(sim_id) pos_idxs_start = [] for trial in sim.trials: tp_id_start = trial.start_timepoint_id tp = session.query(models.Timepoint).get(tp_id_start) pos_idxs_start += [(tp.xidx, tp.yidx, tp.zidx)] pos_start = [sim.env.pos_from_idx(idx) for idx in pos_idxs_start] # build the histogram bins = (sim.env.xbins, sim.env.ybins, sim.env.zbins) hist, _ = np.histogramdd(np.array(pos_start), bins=bins) # create the data model and store it hist_data_model = models.SimulationAnalysisTakeOffPositionHistogram() hist_data_model.simulation = sim hist_data_model.store_data(session, hist.astype(int)) session.add(hist_data_model) session.commit()
def main(traj_limit=None): # add script execution to database add_script_execution(SCRIPTID, session=session, multi_use=False, notes=SCRIPTNOTES) for sim_id_template in SIMULATION_IDS: for expt in EXPERIMENTS: for odor_state in ODOR_STATES: sim_id = sim_id_template.format(expt, odor_state) print(sim_id) sim = session.query(models.Simulation).get(sim_id) # get the position indexes for all time points for all trials pos_idxs = [] for trial in sim.trials[:traj_limit]: tps = trial.get_timepoints(session) pos_idxs += [np.array([(tp.xidx, tp.yidx, tp.zidx) for tp in tps])] pos_idxs = np.concatenate(pos_idxs, axis=0) pos = np.array([sim.env.pos_from_idx(pos_idx) for pos_idx in pos_idxs]) # build the histogram bins = (sim.env.xbins, sim.env.ybins, sim.env.zbins) pos_histogram, _ = np.histogramdd(pos, bins=bins) # create the data model and store it pos_hist_data_model = models.SimulationAnalysisPositionHistogram() pos_hist_data_model.simulation = sim pos_hist_data_model.store_data(session, pos_histogram.astype(int)) session.add(pos_hist_data_model) session.commit()
def test_correct_number_of_histograms_made(self): for sim_id in [SIM_ID_0, SIM_ID_1]: sim = session.query(models.Simulation).get(sim_id) self.assertEqual( N_N_TIMESTEPS, len(sim.analysis_displacement_after_n_timesteps_histograms))
def main(traj_limit=None): # add script execution to database add_script_execution(SCRIPTID, session=session, multi_use=False, notes=SCRIPTNOTES) for sim_id_template in SIMULATION_IDS: for expt in EXPERIMENTS: for odor_state in ODOR_STATES: sim_id = sim_id_template.format(expt, odor_state) print(sim_id) sim = session.query(models.Simulation).get(sim_id) for n_timesteps in N_TIMESTEPSS: # get the displacements for all trials displacements = [] for trial in sim.trials[:traj_limit]: tps = trial.get_timepoints(session).all() pos_idx_start = np.array((tps[0].xidx, tps[0].yidx, tps[0].zidx)) if n_timesteps > len(tps) - 1: # skip if the trajectory has ended by n_timesteps continue pos_idx_end = np.array((tps[n_timesteps].xidx, tps[n_timesteps].yidx, tps[n_timesteps].zidx)) displacements += [(pos_idx_end - pos_idx_start).astype(int)] displacements = np.array(displacements) # build the histogram x_ub = min(n_timesteps + 1, sim.env.nx) x_lb = -x_ub y_ub = min(n_timesteps + 1, sim.env.ny) y_lb = -y_ub z_ub = min(n_timesteps + 1, sim.env.nz) z_lb = -z_ub x_bins = np.arange(x_lb, x_ub) + 0.5 y_bins = np.arange(y_lb, y_ub) + 0.5 z_bins = np.arange(z_lb, z_ub) + 0.5 displacement_histogram, _ = \ np.histogramdd(displacements, bins=(x_bins, y_bins, z_bins)) # create the data model and store it displacement_hist_data_model = \ models.SimulationAnalysisDisplacementAfterNTimestepsHistogram() displacement_hist_data_model.n_timesteps = n_timesteps displacement_hist_data_model.simulation = sim displacement_hist_data_model.shape = displacement_histogram.shape displacement_hist_data_model. \ store_data(session, displacement_histogram.astype(int)) session.add(displacement_hist_data_model) session.commit()
def test_starting_position_idxs_are_correct(self): sims = session.query(models.Simulation).\ filter(models.Simulation.id.like(self.sim_id_pattern)).all() for sim in sims: for trial in sim.trials: first_tp = trial.get_timepoints(session).first() trial_start_idx = (first_tp.xidx, first_tp.yidx, first_tp.zidx) self.assertEqual(trial_start_idx, trial.geom_config.start_idx)
def test_number_of_points_in_histogram_is_number_of_trials(self): for sim_id in [SIM_ID_0, SIM_ID_1]: sim = session.query(models.Simulation).get(sim_id) sim.analysis_displacement_total_histogram.fetch_data(session) n_trials = len(sim.trials) n_points = sim.analysis_displacement_total_histogram.xy.sum() self.assertEqual(n_points, n_trials) n_points = sim.analysis_displacement_total_histogram.xz.sum() self.assertEqual(n_points, n_trials) n_points = sim.analysis_displacement_total_histogram.yz.sum() self.assertEqual(n_points, n_trials)
def test_correct_number_of_simulations_trials_and_timepoints(self): sims = session.query(models.Simulation).\ filter(models.Simulation.id.like(self.sim_id_pattern)).all() self.assertEqual(len(sims), 12) for sim in sims: self.assertEqual(len(sim.trials), TRAJ_LIMIT) for trial in sim.trials: self.assertEqual(len(trial.get_timepoints(session).all()), trial.trial_info.duration) self.assertEqual(len(trial.get_timepoints(session).all()), trial.geom_config.duration)
def test_correct_number_of_geom_configs_added(self): gcgs = session.query(models.GeomConfigGroup). \ filter(models.GeomConfigGroup.id.like('wind_tunnel_matched_discretized%')) self.assertEqual(len(gcgs.all()), 12) for gcg in gcgs: self.assertEqual(len(gcg.geom_configs), TRAJ_LIMIT) # make sure all geom_configs have geom_config_extension with all fields filled out for gc in gcg.geom_configs: self.assertGreater(gc.extension_real_trajectory.avg_dt, 0) self.assertGreater(len(gc.extension_real_trajectory.real_trajectory_id), 0)
def test_windspeeds_are_correct(self): sims = session.query(models.Simulation).\ filter(models.Simulation.id.like(self.sim_id_pattern)).all() for sim in sims: insect_params = {ip.name: ip.value for ip in sim.insect.insect_params} w = insect_params['w'] if '0.3mps' in sim.id: self.assertAlmostEqual(w, 0.3, delta=.0001) elif '0.4mps' in sim.id: self.assertAlmostEqual(w, 0.4, delta=.0001) elif '0.6mps' in sim.id: self.assertAlmostEqual(w, 0.6, delta=.0001)
def test_correct_number_of_geom_configs_added(self): gcgs = session.query(models.GeomConfigGroup). \ filter(models.GeomConfigGroup.id.like('wind_tunnel_matched_discretized%')) self.assertEqual(len(gcgs.all()), 12) for gcg in gcgs: self.assertEqual(len(gcg.geom_configs), TRAJ_LIMIT) # make sure all geom_configs have geom_config_extension with all fields filled out for gc in gcg.geom_configs: self.assertGreater(gc.extension_real_trajectory.avg_dt, 0) self.assertGreater( len(gc.extension_real_trajectory.real_trajectory_id), 0)
def test_plot_heatmaps(self): for sim_id in [SIM_ID_0, SIM_ID_1]: sim = session.query(models.Simulation).get(sim_id) sim.analysis_position_histogram.fetch_data(session) heatmap_xy = sim.analysis_position_histogram.xy heatmap_xz = sim.analysis_position_histogram.xz heatmap_yz = sim.analysis_position_histogram.yz fig, axs = plt.subplots(1, 3) axs[0].matshow(heatmap_xy.T, origin='lower', extent=sim.env.extentxy) axs[1].matshow(heatmap_xz.T, origin='lower', extent=sim.env.extentxz) axs[2].matshow(heatmap_yz.T, origin='lower', extent=sim.env.extentyz) plt.show()
def test_histogram_dimensions_correct(self): for sim_id in [SIM_ID_0, SIM_ID_1]: sim = session.query(models.Simulation).get(sim_id) sim.analysis_position_histogram.fetch_data(session) heatmap_xy = sim.analysis_position_histogram.xy self.assertEqual(heatmap_xy.shape[0], sim.env.nx) self.assertEqual(heatmap_xy.shape[1], sim.env.ny) heatmap_xz = sim.analysis_position_histogram.xz self.assertEqual(heatmap_xz.shape[0], sim.env.nx) self.assertEqual(heatmap_xz.shape[1], sim.env.nz) heatmap_yz = sim.analysis_position_histogram.yz self.assertEqual(heatmap_yz.shape[0], sim.env.ny) self.assertEqual(heatmap_yz.shape[1], sim.env.nz)
def test_plotting_of_histograms(self): for sim_id in [SIM_ID_0, SIM_ID_1]: sim = session.query(models.Simulation).get(sim_id) hists = sim.analysis_displacement_after_n_timesteps_histograms [hist.fetch_data(session) for hist in hists] fig, axs = plt.subplots(3, 2) axs[0, 0].matshow(hists[2].xy.T, origin='lower') axs[1, 0].matshow(hists[2].xz.T, origin='lower') axs[2, 0].matshow(hists[2].yz.T, origin='lower') axs[0, 1].matshow(hists[4].xy.T, origin='lower') axs[1, 1].matshow(hists[4].xz.T, origin='lower') axs[2, 1].matshow(hists[4].yz.T, origin='lower') plt.show(block=True)
def test_correct_number_of_histograms_and_correct_size_and_count(self): hists = session.query(models.SimulationAnalysisTakeOffPositionHistogram). \ filter(models.SimulationAnalysisTakeOffPositionHistogram.simulation_id. \ like(SIM_ID_START + '%')) self.assertEqual(len(hists.all()), 9) for hist in hists: hist.fetch_data(session) sim = hist.simulation shape = (sim.env.nx, sim.env.ny, sim.env.nz) self.assertEqual(shape, hist._data.shape) self.assertEqual(len(sim.trials), hist._data.sum()) print(hist._data.sum())
def test_correct_number_of_simulations_trials_and_timepoints(self): sims = session.query(models.Simulation). \ filter(models.Simulation.id.like(self.sim_id_pattern)) self.assertEqual(len(sims.all()), 12) for sim in sims: self.assertEqual(len(list(sim.trials)), TRAJ_LIMIT) for trial in sim.trials: # check to make sure trial info duration matches geom_config duration self.assertEqual(trial.trial_info.duration, trial.geom_config.duration) # check to make sure there are actually as many timepoints connected to the trial # as there should be self.assertEqual(trial.trial_info.duration, len(trial.get_timepoints(session).all()))
def test_plot_heatmaps(self): for sim_id in [SIM_ID_0, SIM_ID_1]: sim = session.query(models.Simulation).get(sim_id) sim.analysis_displacement_total_histogram.fetch_data(session) heatmap_xy = sim.analysis_displacement_total_histogram.xy heatmap_xz = sim.analysis_displacement_total_histogram.xz heatmap_yz = sim.analysis_displacement_total_histogram.yz extent_xy = sim.analysis_displacement_total_histogram.extent_xy extent_xz = sim.analysis_displacement_total_histogram.extent_xz extent_yz = sim.analysis_displacement_total_histogram.extent_yz fig, axs = plt.subplots(1, 3) axs[0].matshow(heatmap_xy.T, origin='lower', extent=extent_xy) axs[1].matshow(heatmap_xz.T, origin='lower', extent=extent_xz) axs[2].matshow(heatmap_yz.T, origin='lower', extent=extent_yz) plt.show()
def main(traj_limit=None): # add script execution to database add_script_execution(SCRIPTID, session=session, multi_use=False, notes=SCRIPTNOTES) for sim_id_template in SIMULATION_IDS: for expt in EXPERIMENTS: for odor_state in ODOR_STATES: sim_id = sim_id_template.format(expt, odor_state) print(sim_id) sim = session.query(models.Simulation).get(sim_id) # get the position indexes for all time points for all trials pos_idxs = [] for trial in sim.trials[:traj_limit]: tps = trial.get_timepoints(session) pos_idxs += [ np.array([(tp.xidx, tp.yidx, tp.zidx) for tp in tps]) ] pos_idxs = np.concatenate(pos_idxs, axis=0) pos = np.array( [sim.env.pos_from_idx(pos_idx) for pos_idx in pos_idxs]) # build the histogram bins = (sim.env.xbins, sim.env.ybins, sim.env.zbins) pos_histogram, _ = np.histogramdd(pos, bins=bins) # create the data model and store it pos_hist_data_model = models.SimulationAnalysisPositionHistogram( ) pos_hist_data_model.simulation = sim pos_hist_data_model.store_data(session, pos_histogram.astype(int)) session.add(pos_hist_data_model) session.commit()
def test_histograms_are_correct_size_and_have_correct_number_of_points(self): for sim_id in [SIM_ID_0, SIM_ID_1]: sim = session.query(models.Simulation).get(sim_id) hists = sim.analysis_displacement_after_n_timesteps_histograms [hist.fetch_data(session) for hist in hists] for h_ctr, hist in enumerate(hists): n_timesteps = hist.n_timesteps nx = min(2 * sim.env.nx - 1, 2 * n_timesteps + 1) ny = min(2 * sim.env.ny - 1, 2 * n_timesteps + 1) nz = min(2 * sim.env.nz - 1, 2 * n_timesteps + 1) self.assertEqual(hist._data.shape, (nx, ny, nz)) if n_timesteps == 1: self.assertEqual(hist._data[1, 1, 1], 0) print(hist._data.sum()) if h_ctr < len(hists) - 1: self.assertGreaterEqual(hist._data.sum(), hists[h_ctr + 1]._data.sum())
def test_histograms_are_correct_size_and_have_correct_number_of_points( self): for sim_id in [SIM_ID_0, SIM_ID_1]: sim = session.query(models.Simulation).get(sim_id) hists = sim.analysis_displacement_after_n_timesteps_histograms [hist.fetch_data(session) for hist in hists] for h_ctr, hist in enumerate(hists): n_timesteps = hist.n_timesteps nx = min(2 * sim.env.nx - 1, 2 * n_timesteps + 1) ny = min(2 * sim.env.ny - 1, 2 * n_timesteps + 1) nz = min(2 * sim.env.nz - 1, 2 * n_timesteps + 1) self.assertEqual(hist._data.shape, (nx, ny, nz)) if n_timesteps == 1: self.assertEqual(hist._data[1, 1, 1], 0) print(hist._data.sum()) if h_ctr < len(hists) - 1: self.assertGreaterEqual(hist._data.sum(), hists[h_ctr + 1]._data.sum())
def main(traj_limit=None): # add script execution to database add_script_execution(SCRIPTID, session=session, multi_use=False, notes=SCRIPTNOTES) for sim_id_template in SIMULATION_IDS: for expt in EXPERIMENTS: for odor_state in ODOR_STATES: sim_id = sim_id_template.format(expt, odor_state) print(sim_id) sim = session.query(models.Simulation).get(sim_id) for n_timesteps in N_TIMESTEPSS: # get the displacements for all trials displacements = [] for trial in sim.trials[:traj_limit]: tps = trial.get_timepoints(session).all() pos_idx_start = np.array( (tps[0].xidx, tps[0].yidx, tps[0].zidx)) if n_timesteps > len(tps) - 1: # skip if the trajectory has ended by n_timesteps continue pos_idx_end = np.array( (tps[n_timesteps].xidx, tps[n_timesteps].yidx, tps[n_timesteps].zidx)) displacements += [ (pos_idx_end - pos_idx_start).astype(int) ] displacements = np.array(displacements) # build the histogram x_ub = min(n_timesteps + 1, sim.env.nx) x_lb = -x_ub y_ub = min(n_timesteps + 1, sim.env.ny) y_lb = -y_ub z_ub = min(n_timesteps + 1, sim.env.nz) z_lb = -z_ub x_bins = np.arange(x_lb, x_ub) + 0.5 y_bins = np.arange(y_lb, y_ub) + 0.5 z_bins = np.arange(z_lb, z_ub) + 0.5 displacement_histogram, _ = \ np.histogramdd(displacements, bins=(x_bins, y_bins, z_bins)) # create the data model and store it displacement_hist_data_model = \ models.SimulationAnalysisDisplacementAfterNTimestepsHistogram() displacement_hist_data_model.n_timesteps = n_timesteps displacement_hist_data_model.simulation = sim displacement_hist_data_model.shape = displacement_histogram.shape displacement_hist_data_model. \ store_data(session, displacement_histogram.astype(int)) session.add(displacement_hist_data_model) session.commit()
def main(traj_limit=None): # add script execution to database add_script_execution(SCRIPTID, session=session, multi_use=True, notes=SCRIPTNOTES) for expt in EXPERIMENTS: if '0.3mps' in expt: w = 0.3 elif '0.4mps' in expt: w = 0.4 elif '0.6mps' in expt: w = 0.6 insect_params = INSECT_PARAMS.copy() insect_params['w'] = w for odor_state in ODOR_STATES: print('Running simulation for expt "{}" with odor "{}"...'. format(expt, odor_state)) # get geom_config_group for this experiment and odor state geom_config_group_id = GEOM_CONFIG_GROUP_ID.format(expt, odor_state) geom_config_group = session.query(models.GeomConfigGroup).get(geom_config_group_id) # get wind tunnel copy simulation so we can match plume and insect # note we select the first simulation that is of this type and corresponds to the # right geom_config_group, since we only use the plume from it, which is independent # of what insect parameters were used # # for instance, the plume bound to a simulation in which the insect had D = 0.6 and that # bound to a simulation where D = 0.4 will be the same, since it is only the insect's # internal model that has changed wt_copy_sims = session.query(models.Simulation).\ filter(models.Simulation.geom_config_group == geom_config_group).\ filter(models.Simulation.id.like(WIND_TUNNEL_DISCRETIZED_SIMULATION_ID_PATTERN)) # get plume from corresponding discretized real wind tunnel trajectory if 'fruitfly' in expt: pl = CollimatedPlume(env=ENV, dt=-1, orm=wt_copy_sims.first().plume) elif 'mosquito' in expt: pl = SpreadingGaussianPlume(env=ENV, dt=-1, orm=wt_copy_sims.first().plume) # create insect # note: we will actually make a new insect for each trial, since the dt's vary; # here we just set dt=-1, since this doesn't get stored in the db anyhow ins = Insect(env=ENV, dt=-1) ins.set_params(**insect_params) ins.generate_orm(models) # create simulation sim_id = SIMULATION_ID.format(insect_params['r'], insect_params['d'], expt, odor_state) sim_desc = SIMULATION_DESCRIPTION.format(expt, odor_state) sim = models.Simulation(id=sim_id, description=sim_desc) sim.env = ENV sim.dt = -1 sim.total_trials = len(geom_config_group.geom_configs) sim.heading_smoothing = 0 sim.geom_config_group = geom_config_group sim.plume = pl.orm sim.insect = ins.orm session.add(sim) # create ongoing run ongoing_run = models.OngoingRun(trials_completed=0, simulations=[sim]) session.add(ongoing_run) session.commit() # generate trials for gctr, geom_config in enumerate(geom_config_group.geom_configs): if gctr == traj_limit: break # make new plume and insect with proper dts ins = Insect(env=ENV, dt=geom_config.extension_real_trajectory.avg_dt) ins.set_params(**insect_params) ins.loglike_function = LOGLIKE # set insect starting position ins.set_pos(geom_config.start_idx, is_idx=True) # initialize plume and insect and create trial pl.initialize() ins.initialize() trial = Trial(pl=pl, ins=ins, nsteps=geom_config.duration) # run trial for step in xrange(geom_config.duration - 1): trial.step() # save trial trial.add_timepoints(models, session=session, heading_smoothing=sim.heading_smoothing) trial.generate_orm(models) trial.orm.geom_config = geom_config trial.orm.simulation = sim session.add(trial.orm) # update ongoing_run ongoing_run.trials_completed = gctr + 1 session.add(ongoing_run) session.commit()
def main(traj_limit=None): # add script execution to infotaxis database add_script_execution(script_id=SCRIPT_ID, session=session, multi_use=True, notes=SCRIPT_NOTES) session.commit() # get wind tunnel connection and models wt_session = imp.load_source('connect', os.path.join(WT_REPO, 'db_api', 'connect.py')).session wt_models = imp.load_source('models', os.path.join(WT_REPO, 'db_api', 'models.py')) for experiment_id in EXPERIMENT_IDS: for odor_state in ODOR_STATES: # make geom_config_group geom_config_group_id = '{}_{}_odor_{}'.format( GEOM_CONFIG_GROUP_ID, experiment_id, odor_state) geom_config_group = session.query( models.GeomConfigGroup).get(geom_config_group_id) # make simulation r = INSECT_PARAMS_DICT[experiment_id]['r'] d = INSECT_PARAMS_DICT[experiment_id]['d'] sim_id = SIMULATION_ID.format(r, d, experiment_id, odor_state) sim_description = SIMULATION_DESCRIPTION.format( experiment_id, odor_state) sim = models.Simulation(id=sim_id, description=sim_description) sim.env, sim.dt = ENV, DT sim.heading_smoothing = 0 sim.geom_config_group = geom_config_group # make plume if 'fruitfly' in experiment_id: pl = CollimatedPlume(env=ENV, dt=DT) elif 'mosquito' in experiment_id: pl = SpreadingGaussianPlume(env=ENV, dt=DT) pl.set_params(**PLUME_PARAMS_DICT[experiment_id]) if odor_state in ('none', 'afterodor'): pl.set_params(threshold=-1) pl.initialize() pl.generate_orm(models, sim=sim) # make insect ins = Insect(env=ENV, dt=DT) ins.set_params(**INSECT_PARAMS_DICT[experiment_id]) ins.loglike_function = LOGLIKE ins.initialize() ins.generate_orm(models, sim=sim) # add simulation and ongoing run sim.ongoing_run = models.OngoingRun(trials_completed=0) session.add(sim) session.commit() # loop through all geom_configs in group, look up corresponding trajectory, # and discretize it for gctr, geom_config in enumerate(geom_config_group.geom_configs): # get trajectory id from geom_config and trajectory from wind tunnel database traj_id = geom_config.extension_real_trajectory.real_trajectory_id traj = wt_session.query(wt_models.Trajectory).get(traj_id) # get positions from traj positions = traj.positions(wt_session) # create discretized version of trajectory trial = TrialFromPositionSequence(positions, pl, ins) # add timepoints to trial and generate data model trial.add_timepoints(models, session=session, heading_smoothing=sim.heading_smoothing) trial.generate_orm(models) # bind simulation, geom_config trial.orm.simulation = sim trial.orm.geom_config = geom_config # update ongoing run sim.ongoing_run.trials_completed += 1 session.add(sim) session.commit() if traj_limit and (gctr == traj_limit - 1): break # update total number of trials sim.total_trials = gctr + 1 session.add(sim) session.commit()
def infotaxis_analysis( WIND_TUNNEL_CG_IDS, INFOTAXIS_WIND_SPEED_CG_IDS, MAX_CROSSINGS, INFOTAXIS_HISTORY_DEPENDENCE_CG_IDS, MAX_CROSSINGS_EARLY, X_0_MIN, X_0_MAX, H_0_MIN, H_0_MAX, X_0_MIN_SIM, X_0_MAX_SIM, X_0_MIN_SIM_HISTORY, X_0_MAX_SIM_HISTORY, T_BEFORE_EXPT, T_AFTER_EXPT, TS_BEFORE_SIM, TS_AFTER_SIM, HEADING_SMOOTHING_SIM, HEAT_MAP_EXPT_ID, HEAT_MAP_SIM_ID, N_HEAT_MAP_TRAJS, X_BINS, Y_BINS, FIG_SIZE, FONT_SIZE, EXPT_LABELS, EXPT_COLORS, SIM_LABELS): """ Show infotaxis-generated trajectories alongside empirical trajectories. Show wind-speed dependence and history dependence. """ from db_api.infotaxis import models as models_infotaxis from db_api.infotaxis.connect import session as session_infotaxis ts_before_expt = int(round(T_BEFORE_EXPT / DT)) ts_after_expt = int(round(T_AFTER_EXPT / DT)) headings = {} # get headings for wind tunnel plume crossings headings['wind_tunnel'] = {} for cg_id in WIND_TUNNEL_CG_IDS: crossings_all = session.query(models.Crossing).filter_by(crossing_group_id=cg_id).all() headings['wind_tunnel'][cg_id] = [] cr_ctr = 0 for crossing in crossings_all: if cr_ctr >= MAX_CROSSINGS: break # skip this crossing if it doesn't meet our inclusion criteria x_0 = crossing.feature_set_basic.position_x_peak h_0 = crossing.feature_set_basic.heading_xyz_peak if not (X_0_MIN <= x_0 <= X_0_MAX): continue if not (H_0_MIN <= h_0 <= H_0_MAX): continue # store crossing heading temp = crossing.timepoint_field( session, 'heading_xyz', -ts_before_expt, ts_after_expt - 1, 'peak', 'peak', nan_pad=True) # subtract initial heading temp -= temp[ts_before_expt] headings['wind_tunnel'][cg_id].append(temp) cr_ctr += 1 headings['wind_tunnel'][cg_id] = np.array(headings['wind_tunnel'][cg_id]) # get headings from infotaxis plume crossings headings['infotaxis'] = {} for cg_id in INFOTAXIS_WIND_SPEED_CG_IDS: crossings_all = list(session_infotaxis.query(models_infotaxis.Crossing).filter_by( crossing_group_id=cg_id).all()) print('{} crossings for infotaxis crossing group: "{}"'.format( len(crossings_all), cg_id)) headings['infotaxis'][cg_id] = [] cr_ctr = 0 for crossing in crossings_all: if cr_ctr >= MAX_CROSSINGS: break # skip this crossing if it doesn't meet our inclusion criteria x_0 = crossing.feature_set_basic.position_x_peak h_0 = crossing.feature_set_basic.heading_xyz_peak if not (X_0_MIN_SIM <= x_0 <= X_0_MAX_SIM): continue if not (H_0_MIN <= h_0 <= H_0_MAX): continue # store crossing heading temp = crossing.timepoint_field( session_infotaxis, 'hxyz', -TS_BEFORE_SIM, TS_AFTER_SIM - 1, 'peak', 'peak', nan_pad=True) temp[~np.isnan(temp)] = gaussian_filter1d( temp[~np.isnan(temp)], HEADING_SMOOTHING_SIM) # subtract initial heading and store result temp -= temp[TS_BEFORE_SIM] headings['infotaxis'][cg_id].append(temp) cr_ctr += 1 headings['infotaxis'][cg_id] = np.array(headings['infotaxis'][cg_id]) # get history dependences for infotaxis simulations headings['it_hist_dependence'] = {} for cg_id in INFOTAXIS_HISTORY_DEPENDENCE_CG_IDS: crossings_all = list(session_infotaxis.query(models_infotaxis.Crossing).filter_by( crossing_group_id=cg_id).all()) headings['it_hist_dependence'][cg_id] = {'early': [], 'late': []} cr_ctr = 0 for crossing in crossings_all: if cr_ctr >= MAX_CROSSINGS: break # skip this crossing if it doesn't meet our inclusion criteria x_0 = crossing.feature_set_basic.position_x_peak h_0 = crossing.feature_set_basic.heading_xyz_peak if not (X_0_MIN_SIM_HISTORY <= x_0 <= X_0_MAX_SIM_HISTORY): continue if not (H_0_MIN <= h_0 <= H_0_MAX): continue # store crossing heading temp = crossing.timepoint_field( session_infotaxis, 'hxyz', -TS_BEFORE_SIM, TS_AFTER_SIM - 1, 'peak', 'peak', nan_pad=True) temp[~np.isnan(temp)] = gaussian_filter1d( temp[~np.isnan(temp)], HEADING_SMOOTHING_SIM) # subtract initial heading temp -= temp[TS_BEFORE_SIM] # store according to its crossing number if crossing.crossing_number <= MAX_CROSSINGS_EARLY: headings['it_hist_dependence'][cg_id]['early'].append(temp) elif crossing.crossing_number > MAX_CROSSINGS_EARLY: headings['it_hist_dependence'][cg_id]['late'].append(temp) else: raise Exception('crossing number is not early or late for crossing {}'.format( crossing.id)) cr_ctr += 1 headings['it_hist_dependence'][cg_id]['early'] = np.array( headings['it_hist_dependence'][cg_id]['early']) headings['it_hist_dependence'][cg_id]['late'] = np.array( headings['it_hist_dependence'][cg_id]['late']) # get heatmaps if N_HEAT_MAP_TRAJS: trajs_expt = session.query(models.Trajectory).\ filter_by(experiment_id=HEAT_MAP_EXPT_ID, odor_state='on').limit(N_HEAT_MAP_TRAJS) trials_sim = session_infotaxis.query(models_infotaxis.Trial).\ filter_by(simulation_id=HEAT_MAP_SIM_ID).limit(N_HEAT_MAP_TRAJS) else: trajs_expt = session.query(models.Trajectory).\ filter_by(experiment_id=HEAT_MAP_EXPT_ID, odor_state='on') trials_sim = session_infotaxis.query(models_infotaxis.Trial).\ filter_by(simulation_id=HEAT_MAP_SIM_ID) expt_xs = [] expt_ys = [] sim_xs = [] sim_ys = [] for traj in trajs_expt: expt_xs.append(traj.timepoint_field(session, 'position_x')) expt_ys.append(traj.timepoint_field(session, 'position_y')) for trial in trials_sim: sim_xs.append(trial.timepoint_field(session_infotaxis, 'xidx')) sim_ys.append(trial.timepoint_field(session_infotaxis, 'yidx')) expt_xs = np.concatenate(expt_xs) expt_ys = np.concatenate(expt_ys) sim_xs = np.concatenate(sim_xs) * 0.02 - 0.3 sim_ys = np.concatenate(sim_ys) * 0.02 - 0.15 ## MAKE PLOTS fig, axs = plt.figure(figsize=FIG_SIZE, tight_layout=True), [] axs.append(fig.add_subplot(4, 3, 1)) axs.append(fig.add_subplot(4, 3, 2, sharey=axs[0])) # plot wind-speed dependence of wind tunnel trajectories t = np.arange(-ts_before_expt, ts_after_expt) * DT handles = [] for cg_id in WIND_TUNNEL_CG_IDS: label = EXPT_LABELS[cg_id] color = EXPT_COLORS[cg_id] headings_mean = np.nanmean(headings['wind_tunnel'][cg_id], axis=0) headings_sem = stats.nansem(headings['wind_tunnel'][cg_id], axis=0) # plot mean and sem handles.append( axs[0].plot(t, headings_mean, lw=3, color=color, zorder=1, label=label)[0]) axs[0].fill_between( t, headings_mean - headings_sem, headings_mean + headings_sem, color=color, alpha=0.2) axs[0].set_xlabel('time since odor peak (s)') axs[0].set_ylabel('$\Delta$ heading (degrees)') axs[0].set_title('experimental data\n(wind speed comparison)') axs[0].legend(handles=handles, loc='best') t = np.arange(-TS_BEFORE_SIM, TS_AFTER_SIM) for cg_id, wt_cg_id in zip(INFOTAXIS_WIND_SPEED_CG_IDS, WIND_TUNNEL_CG_IDS): label = EXPT_LABELS[wt_cg_id] color = EXPT_COLORS[wt_cg_id] headings_mean = np.nanmean(headings['infotaxis'][cg_id], axis=0) headings_sem = stats.nansem(headings['infotaxis'][cg_id], axis=0) # plot mean and sem axs[1].plot(t, headings_mean, lw=3, color=color, zorder=1, label=label) axs[1].fill_between( t, headings_mean - headings_sem, headings_mean + headings_sem, color=color, alpha=0.2) axs[1].set_xlabel('time steps since odor peak (s)') axs[1].set_title('infotaxis simulations\n(wind speed comparison)') # add axes for infotaxis history dependence and make plots [axs.append(fig.add_subplot(4, 3, 3 + ctr)) for ctr in range(4)] for (ax, cg_id) in zip(axs[-4:], INFOTAXIS_HISTORY_DEPENDENCE_CG_IDS): mean_early = np.nanmean(headings['it_hist_dependence'][cg_id]['early'], axis=0) sem_early = stats.nansem(headings['it_hist_dependence'][cg_id]['early'], axis=0) mean_late = np.nanmean(headings['it_hist_dependence'][cg_id]['late'], axis=0) sem_late = stats.nansem(headings['it_hist_dependence'][cg_id]['late'], axis=0) # plot means and stds try: handle_early = ax.plot(t, mean_early, lw=3, color='b', zorder=0, label='early')[0] ax.fill_between( t, mean_early - sem_early, mean_early + sem_early, color='b', alpha=0.2) except: pass try: handle_late = ax.plot(t, mean_late, lw=3, color='g', zorder=0, label='late')[0] ax.fill_between( t, mean_late - sem_late, mean_late + sem_late, color='g', alpha=0.2) except: pass ax.set_xlabel('time steps since odor peak (s)') ax.set_title(SIM_LABELS[cg_id]) try: ax.legend(handles=[handle_early, handle_late]) except: pass axs[3].set_ylabel('$\Delta$ heading (degrees)') # plot heat maps axs.append(fig.add_subplot(4, 1, 3)) axs.append(fig.add_subplot(4, 1, 4)) axs[6].hist2d(expt_xs, expt_ys, bins=(X_BINS, Y_BINS)) axs[7].hist2d(sim_xs, sim_ys, bins=(X_BINS, Y_BINS)) axs[6].set_ylabel('y (m)') axs[7].set_ylabel('y (m)') axs[7].set_xlabel('x (m)') axs[6].set_title('experimental data (fly 0.4 m/s)') axs[7].set_title('infotaxis simulation') for ax in axs: set_fontsize(ax, FONT_SIZE) return fig
def main(SIM_PREFIX=None, sim_ids=None, thresholds=None, trial_limit=None): if thresholds is None: thresholds = THRESHOLDS SCRIPTNOTES = ('Identify plume crossings for simulations with prefix "{}" ' 'using heading smoothing "{}" and thresholds "{}"'.format( SIM_PREFIX, HEADING_SMOOTHING, thresholds)) if sim_ids is None: SIM_SUFFIXES = [ 'fruitfly_0.3mps_checkerboard_floor_odor_on', 'fruitfly_0.3mps_checkerboard_floor_odor_none', 'fruitfly_0.3mps_checkerboard_floor_odor_afterodor', 'fruitfly_0.4mps_checkerboard_floor_odor_on', 'fruitfly_0.4mps_checkerboard_floor_odor_none', 'fruitfly_0.4mps_checkerboard_floor_odor_afterodor', 'fruitfly_0.6mps_checkerboard_floor_odor_on', 'fruitfly_0.6mps_checkerboard_floor_odor_none', 'fruitfly_0.6mps_checkerboard_floor_odor_afterodor', 'mosquito_0.4mps_checkerboard_floor_odor_on', 'mosquito_0.4mps_checkerboard_floor_odor_none', 'mosquito_0.4mps_checkerboard_floor_odor_afterodor',] sim_ids = [ '{}_{}'.format(SIM_PREFIX, sim_suffix) for sim_suffix in SIM_SUFFIXES ] # add script execution to database add_script_execution( SCRIPTID, session=session, multi_use=True, notes=SCRIPTNOTES) for sim_id in sim_ids: print('Identifying crossings from simulation: "{}"'.format(sim_id)) # get simulation sim = session.query(models.Simulation).filter_by(id=sim_id).first() # get all trials from this simulation trials = session.query(models.Trial).filter_by(simulation=sim).all() # make crossing group if 'fly' in sim_id: threshold = thresholds['fly'] elif 'mosq' in sim_id: threshold = thresholds['mosq'] cg_id = '{}_th_{}_hsmoothing_{}'.format( sim_id, threshold, HEADING_SMOOTHING) print('Storing in crossing group:') print(cg_id) cg = models.CrossingGroup( id=cg_id, simulation=sim, threshold=threshold, heading_smoothing=HEADING_SMOOTHING) session.add(cg) # loop through trials and identify crossings trial_ctr = 0 for trial in trials: if trial_limit and trial_ctr >= trial_limit: break # get relevant time-series odors = trial.timepoint_field(session, 'odor') xs = trial.timepoint_field(session, 'xidx') ys = trial.timepoint_field(session, 'yidx') zs = trial.timepoint_field(session, 'zidx') # get smoothed headings hs = smooth(trial.timepoint_field(session, 'hxyz'), HEADING_SMOOTHING) # identify crossings crossing_lists, peaks = time_series.segment_by_threshold( odors, threshold) tr_start = trial.start_timepoint_id # add crossings for c_ctr, (crossing_list, peak) in enumerate(zip(crossing_lists, peaks)): crossing = models.Crossing( trial=trial, crossing_number=c_ctr+1, crossing_group=cg, start_timepoint_id=crossing_list[0] + tr_start, entry_timepoint_id=crossing_list[1] + tr_start, peak_timepoint_id=crossing_list[2] + tr_start, exit_timepoint_id=crossing_list[3] + tr_start - 1, end_timepoint_id=crossing_list[4] + tr_start - 1, max_odor=peak,) session.add(crossing) # create this crossing's basic feature set crossing.feature_set_basic = models.CrossingFeatureSetBasic( position_x_entry=xs[crossing_list[1]], position_y_entry=ys[crossing_list[1]], position_z_entry=zs[crossing_list[1]], heading_xyz_entry=hs[crossing_list[1]], position_x_peak=xs[crossing_list[2]], position_y_peak=ys[crossing_list[2]], position_z_peak=zs[crossing_list[2]], heading_xyz_peak=hs[crossing_list[2]], position_x_exit=xs[crossing_list[3] - 1], position_y_exit=ys[crossing_list[3] - 1], position_z_exit=zs[crossing_list[3] - 1], heading_xyz_exit=hs[crossing_list[3] - 1], ) session.add(crossing) trial_ctr += 1 # commit after all crossings from all trials from a simulation have been added session.commit()
def test_correct_simulations_analyzed(self): for sim_id in [SIM_ID_0, SIM_ID_1]: sim = session.query(models.Simulation).get(sim_id) self.assertEqual(sim.analysis_position_histogram.simulation, sim)
def main(INSECT_PARAMS, SCRIPTNOTES, threshold=None, sim_ids=None, sim_descs=None, expts=None, odor_states=None, traj_limit=None): # add script execution to database add_script_execution(SCRIPTID, session=session, multi_use=True, notes=SCRIPTNOTES) if expts is None: expts = EXPERIMENTS if odor_states is None: odor_states = ODOR_STATES for expt in expts: if '0.3mps' in expt: w = 0.3 elif '0.4mps' in expt: w = 0.4 elif '0.6mps' in expt: w = 0.6 insect_params = INSECT_PARAMS.copy() insect_params['w'] = w for odor_state in odor_states: print('Running simulation for expt "{}" with odor "{}"...'.format( expt, odor_state)) # get geom_config_group for this experiment and odor state geom_config_group_id = GEOM_CONFIG_GROUP_ID.format( expt, odor_state) geom_config_group = session.query( models.GeomConfigGroup).get(geom_config_group_id) # get wind tunnel copy simulation so we can match plume and insect # note we select the first simulation that is of this type and # corresponds to the right geom_config_group, since we only use the # plume from it, which is independent of insect parameters used # # for instance, the plume bound to a simulation in which the insect # had D = 0.6 and that bound to a simulation where D = 0.4 will be # the same, since it is only the insect's # internal model that has changed wt_copy_sims = session.query(models.Simulation).\ filter(models.Simulation.geom_config_group == geom_config_group).\ filter(models.Simulation.id.like( WIND_TUNNEL_DISCRETIZED_SIMULATION_ID_PATTERN)) # get plume from corresponding discretized real wind tunnel trajectory if 'fruitfly' in expt: pl = CollimatedPlume(env=ENV, dt=-1, orm=wt_copy_sims.first().plume) elif 'mosquito' in expt: pl = SpreadingGaussianPlume(env=ENV, dt=-1, orm=wt_copy_sims.first().plume) if threshold is not None: print('Setting plume detectability threshold to ' '{}'.format(threshold)) pl.set_params(threshold=threshold) # create insect # note: we will actually make a new insect for each trial, # since the dt's vary; # here we set dt=-1, since this doesn't get stored in the db anyhow ins = Insect(env=ENV, dt=-1) ins.set_params(**insect_params) ins.generate_orm(models) # create simulation if sim_ids is None: sim_id = SIMULATION_ID.format(insect_params['r'], insect_params['d'], expt, odor_state) else: sim_id = sim_ids[(expt, odor_state)] if sim_descs is None: sim_desc = SIMULATION_DESCRIPTION.format(expt, odor_state) else: sim_desc = sim_descs[(expt, odor_state)] sim = models.Simulation(id=sim_id, description=sim_desc) sim.env = ENV sim.dt = -1 sim.total_trials = len(geom_config_group.geom_configs) sim.heading_smoothing = 0 sim.geom_config_group = geom_config_group sim.plume = pl.orm sim.insect = ins.orm session.add(sim) # create ongoing run ongoing_run = models.OngoingRun(trials_completed=0, simulations=[sim]) session.add(ongoing_run) session.commit() # generate trials for gctr, geom_config in enumerate(geom_config_group.geom_configs): if gctr == traj_limit: break # make new plume and insect with proper dts ins = Insect(env=ENV, dt=geom_config.extension_real_trajectory.avg_dt) ins.set_params(**insect_params) ins.loglike_function = LOGLIKE # set insect starting position ins.set_pos(geom_config.start_idx, is_idx=True) # initialize plume and insect and create trial pl.initialize() ins.initialize() trial = Trial(pl=pl, ins=ins, nsteps=geom_config.duration) # run trial for step in xrange(geom_config.duration - 1): trial.step() # save trial trial.add_timepoints(models, session=session, heading_smoothing=sim.heading_smoothing) trial.generate_orm(models) trial.orm.geom_config = geom_config trial.orm.simulation = sim session.add(trial.orm) # update ongoing_run ongoing_run.trials_completed = gctr + 1 session.add(ongoing_run) session.commit()
def main(trial_limit=None): # add script execution to database add_script_execution(SCRIPTID, session=session, multi_use=True, notes=SCRIPTNOTES) for sim_id in SIM_IDS: print('Identifying crossings from simulation: "{}"'.format(sim_id)) # get simulation sim = session.query(models.Simulation).filter_by(id=sim_id).first() # get all trials from this simulation trials = session.query(models.Trial).filter_by(simulation=sim).all() # make crossing group if 'fly' in sim_id: threshold = THRESHOLDS['fly'] elif 'mosq' in sim_id: threshold = THRESHOLDS['mosq'] cg_id = '{}_th_{}_hsmoothing_{}'.format(sim_id, threshold, HEADING_SMOOTHING) cg = models.CrossingGroup( id=cg_id, simulation=sim, threshold=threshold, heading_smoothing=HEADING_SMOOTHING) session.add(cg) # loop through trials and identify crossings trial_ctr = 0 for trial in trials: if trial_limit and trial_ctr >= trial_limit: break # get relevant time-series odors = trial.timepoint_field(session, 'odor') xs = trial.timepoint_field(session, 'xidx') ys = trial.timepoint_field(session, 'yidx') zs = trial.timepoint_field(session, 'zidx') # get smoothed headings hs = smooth(trial.timepoint_field(session, 'hxyz'), HEADING_SMOOTHING) # identify crossings crossing_lists, peaks = time_series.segment_by_threshold( odors, threshold) tr_start = trial.start_timepoint_id # add crossings for c_ctr, (crossing_list, peak) in enumerate(zip(crossing_lists, peaks)): crossing = models.Crossing( trial=trial, crossing_number=c_ctr+1, crossing_group=cg, start_timepoint_id=crossing_list[0] + tr_start, entry_timepoint_id=crossing_list[1] + tr_start, peak_timepoint_id=crossing_list[2] + tr_start, exit_timepoint_id=crossing_list[3] + tr_start - 1, end_timepoint_id=crossing_list[4] + tr_start - 1, max_odor=peak,) session.add(crossing) # create this crossing's basic feature set crossing.feature_set_basic = models.CrossingFeatureSetBasic( position_x_entry=xs[crossing_list[1]], position_y_entry=ys[crossing_list[1]], position_z_entry=zs[crossing_list[1]], heading_xyz_entry=hs[crossing_list[1]], position_x_peak=xs[crossing_list[2]], position_y_peak=ys[crossing_list[2]], position_z_peak=zs[crossing_list[2]], heading_xyz_peak=hs[crossing_list[2]], position_x_exit=xs[crossing_list[3] - 1], position_y_exit=ys[crossing_list[3] - 1], position_z_exit=zs[crossing_list[3] - 1], heading_xyz_exit=hs[crossing_list[3] - 1], ) session.add(crossing) trial_ctr += 1 # commit after all crossings from all trials from a simulation have been added session.commit()
def test_correct_number_of_histograms_made(self): for sim_id in [SIM_ID_0, SIM_ID_1]: sim = session.query(models.Simulation).get(sim_id) self.assertEqual(N_N_TIMESTEPS, len(sim.analysis_displacement_after_n_timesteps_histograms))