def set_contour(self, Ncontour=22): vertices = [np.array(seg.vertices[0]) for seg in self.segs] l_side = fun.flatten_list([v[:int(len(v) / 2)] for v in vertices]) r_side = fun.flatten_list( [np.flip(v[int(len(v) / 2):], axis=0) for v in vertices]) r_side.reverse() total_contour = l_side + r_side if len(total_contour) > Ncontour: seed(1) contour = [ total_contour[i] for i in sorted(sample(range(len(total_contour)), Ncontour)) ] else: contour = total_contour # self.contour = contour[ConvexHull(contour).vertices].tolist() return contour
def xy(points, flat=False): if type(points) == str: return [f'{points}_x', f'{points}_y'] elif type(points) == list: t=[[f'{p}_x', f'{p}_y'] for p in points] if flat : return fun.flatten_list(t) else : return t
def generate_gain_space(pars, ranges, Ngrid, values=None): if len(pars) != 1 or len(Ngrid) != 1 or len(ranges) != 1: raise ValueError( 'There must be a single parameter, range and space step') r, s = ranges[0], Ngrid[0] if values is None: values = np.linspace(r[0], r[1], s) values = [flatten_list([[[a, b] for a in values] for b in values])] values_dict = dict(zip(pars, values)) space = cartesian_product(values_dict) return space
def data_collection_config(dataset, sim_params): d = dataset effectors = [e for e in sim_params['collect_effectors']] # effectors = [e for e in sim_params['collect_effectors'] if component_params[e]] step_pars = list( set( fun.flatten_list( [effector_collection[e]['step'] for e in effectors]))) end_pars = list( set( fun.flatten_list( [effector_collection[e]['endpoint'] for e in effectors]))) if sim_params['collect_midline']: step_pars += fun.flatten_list(d.points_xy) if sim_params['collect_contour']: step_pars += fun.flatten_list(d.contour_xy) collected_pars = { 'step': sim_params['step_pars'] + step_pars, 'endpoint': sim_params['end_pars'] + end_pars } return collected_pars
def collection_conf(dataset, collections): d = dataset step_pars = [] end_pars = [] for c in collections: if c == 'midline': step_pars += list(midline_xy_pars(N=d.Nsegs).keys()) # step_pars += fun.flatten_list(d.points_xy) elif c == 'contour': step_pars += fun.flatten_list(d.contour_xy) else: step_pars += effector_collection[c]['step'] end_pars += effector_collection[c]['endpoint'] collected_pars = {'step': fun.unique_list(step_pars), 'endpoint': fun.unique_list(end_pars)} return collected_pars
def deb_analysis(traj): data_dir = traj.config.dataset_path parent_dir = traj.config.dir_path df = save_results_df(traj) runs_idx, runs, par_names, par_full_names, par_values, res_names, res_values = get_results( traj, res_names=None) if len(par_names) == 2: # z0s=[1.0,0.5,1.0] for i in range(len(res_names)): r = res_names[i] labels = par_names + [r] plot_3pars(df, labels, save_to=traj.config.dir_path, pref=r) # plot_3pars(df, labels, z0=z0s[i], save_to = traj.config.dir_path, pref=r) dirs = [f'{data_dir}/{dir}' for dir in os.listdir(data_dir)] dirs.sort() ds = [LarvaDataset(dir) for dir in dirs] if len(ds) == 1: new_ids = [None] else: if len(par_names) == 1: new_ids = [f'{par_names[0]} : {v}' for v in par_values[0]] else: new_ids = [d.id for d in ds] plot_endpoint_params(ds, new_ids, mode='deb', save_to=parent_dir) # print(new_ids,[d.id for d in ds]) # raise deb_dicts = flatten_list( [[deb_dict(d, id, new_id=new_id) for id in d.agent_ids] for d, new_id in zip(ds, new_ids)]) plot_debs(deb_dicts=deb_dicts, save_to=parent_dir, save_as='deb_f.pdf', mode='f') plot_debs(deb_dicts=deb_dicts, save_to=parent_dir, save_as='deb.pdf') plot_debs(deb_dicts=deb_dicts, save_to=parent_dir, save_as='deb_minimal.pdf', mode='minimal') return df
def get_space_from_file(space_filepath=None, params=None, space_pd=None, returned_params=None, flag=None, flag_range=[0, +np.inf], ranges=None, par4ranges=None, additional_params=None, additional_values=None): if space_pd is None: space_pd = pd.read_csv(space_filepath, index_col=0) if params is None: params = space_pd.columns.values.tolist() if returned_params is None: returned_params = params if ((ranges is not None) and (par4ranges is not None)): for p, r in zip(par4ranges, ranges): space_pd = space_pd[(space_pd[p] >= r[0]) & (space_pd[p] <= r[1])].copy(deep=True) print('Ranges found. Selecting combinations within range') if flag: r0, r1 = flag_range space_pd = space_pd[space_pd[flag].dropna() > r0].copy(deep=True) space_pd = space_pd[space_pd[flag].dropna() < r1].copy(deep=True) print(f'Using {flag} to select suitable parameter combinations') values = [space_pd[p].values.tolist() for p in params] values = [[float(b) for b in a] for a in values] if additional_params is not None and additional_values is not None: for p, vs in zip(additional_params, additional_values): Nspace = len(values[0]) Nv = len(vs) values = [a * Nv for a in values] + flatten_list([[v] * Nspace for v in vs]) returned_params += [p] space = dict(zip(returned_params, values)) return space
def __init__(self, unique_id, model, schedule, length=5, data=None): Larva.__init__(self, unique_id=unique_id, model=model) self.schedule = schedule self.data = data self.pars = self.data.columns.values self.Nticks = len(self.data.index.unique().values) self.t0 = self.data.index.unique().values[0] d = self.model.dataset self.spinepoint_xy_pars = [ p for p in fun.flatten_list(d.points_xy) if p in self.pars ] self.Npoints = int(len(self.spinepoint_xy_pars) / 2) self.contour_xy_pars = [ p for p in fun.flatten_list(d.contour_xy) if p in self.pars ] self.Ncontour = int(len(self.contour_xy_pars) / 2) self.centroid_xy_pars = [p for p in d.cent_xy if p in self.pars] Nsegs = self.model.draw_Nsegs if Nsegs is not None: if Nsegs == self.Npoints - 1: self.orientation_pars = [ p for p in nam.orient(d.segs) if p in self.pars ] self.Nors = len(self.orientation_pars) self.Nangles = 0 if self.Nors != Nsegs: raise ValueError( f'Orientation values are not present for all body segments : {self.Nors} of {Nsegs}' ) elif Nsegs == 2: self.orientation_pars = [ p for p in ['front_orientation'] if p in self.pars ] self.Nors = len(self.orientation_pars) self.angle_pars = [p for p in ['bend'] if p in self.pars] self.Nangles = len(self.angle_pars) if self.Nors != 1 or self.Nangles != 1: raise ValueError( f'{self.Nors} orientation and {Nsegs} angle values are present and 1,1 are needed.' ) else: raise ValueError( f'Defined number of segments {Nsegs} must be either 2 or {self.Npoints - 1}' ) else: self.Nors, self.Nangles = 0, 0 # self.angle_pars=[p for p in d.angles + ['bend'] if p in self.pars] # self.Nangles=len(self.angle_pars) # # self.orientation_pars=[p for p in nam.orient(d.segments) + ['front_orientation', 'rear_orientation'] if p in self.pars] # self.Nors = len(self.orientation_pars) self.chunk_ids = None self.trajectory = [] self.color = deepcopy(self.default_color) self.sim_length = length self.radius = self.sim_length / 2 if self.Npoints > 0: self.spinepoint_positions_ar = self.data[ self.spinepoint_xy_pars].values self.spinepoint_positions_ar = self.spinepoint_positions_ar.reshape( [self.Nticks, self.Npoints, 2]) else: self.spinepoint_positions_ar = np.ones( [self.Nticks, self.Npoints, 2]) * np.nan if self.Ncontour > 0: self.contourpoint_positions_ar = self.data[ self.contour_xy_pars].values self.contourpoint_positions_ar = self.contourpoint_positions_ar.reshape( [self.Nticks, self.Ncontour, 2]) else: self.contourpoint_positions_ar = np.ones( [self.Nticks, self.Ncontour, 2]) * np.nan if len(self.centroid_xy_pars) == 2: self.centroid_position_ar = self.data[self.centroid_xy_pars].values else: self.centroid_position_ar = np.ones([self.Nticks, 2]) * np.nan if len(self.model.pos_xy_pars) == 2: self.position_ar = self.data[self.model.pos_xy_pars].values else: self.position_ar = np.ones([self.Nticks, 2]) * np.nan if self.Nangles > 0: self.spineangles_ar = self.data[self.angle_pars].values else: self.spineangles_ar = np.ones([self.Nticks, self.Nangles]) * np.nan if self.Nors > 0: self.orientations_ar = self.data[self.orientation_pars].values else: self.orientations_ar = np.ones([self.Nticks, self.Nors]) * np.nan vp_behavior = [p for p in self.behavior_pars if p in self.pars] self.behavior_ar = np.zeros( [self.Nticks, len(self.behavior_pars)], dtype=bool) for i, p in enumerate(self.behavior_pars): if p in vp_behavior: self.behavior_ar[:, i] = np.array( [not v for v in np.isnan(self.data[p].values).tolist()]) if self.model.draw_Nsegs is not None: LarvaBody.__init__(self, model, pos=self.position_ar[0], orientation=self.orientations_ar[0][0], initial_length=self.sim_length / 1000, length_std=0, Nsegs=self.model.draw_Nsegs, interval=0) self.pos = self.position_ar[0] self.id_box = self.init_id_box()
def sim_analysis(d, experiment): if d is None: return s, e = d.step_data, d.endpoint_data if experiment in ['feed_patchy', 'feed_scatter', 'feed_grid']: # am = e['amount_eaten'].values # print(am) # cr,pr,fr=e['stride_dur_ratio'].values, e['pause_dur_ratio'].values, e['feed_dur_ratio'].values # print(cr+pr+fr) # cN, pN, fN = e['num_strides'].values, e['num_pauses'].values, e['num_feeds'].values # print(cN, pN, fN) # cum_sd, f_success=e['cum_scaled_dst'].values, e['feed_success_rate'].values # print(cum_sd, f_success) plot_endpoint_scatter(datasets=[d], labels=[d.id], par_shorts=['cum_sd', 'f_am', 'str_tr', 'fee_tr']) plot_endpoint_scatter(datasets=[d], labels=[d.id], par_shorts=['cum_sd', 'f_am']) elif experiment in ['growth', 'growth_2x']: starvation_hours = d.config['starvation_hours'] f = d.config['deb_base_f'] deb_model = deb_default(starvation_hours=starvation_hours, base_f=f) if experiment == 'growth_2x': roversVSsitters = True datasets = d.split_dataset(larva_id_prefixes=['Sitter', 'Rover']) labels = ['Sitters', 'Rovers'] else: roversVSsitters = False datasets = [d] labels = [d.id] cc = {'datasets': datasets, 'labels': labels, 'save_to': d.plot_dir} plot_gut(**cc) plot_food_amount(**cc) plot_food_amount(filt_amount=True, **cc) # raise plot_pathlength(scaled=False, **cc) plot_endpoint_params(mode='deb', **cc) try: barplot(par_shorts=['f_am'], **cc) except: pass deb_dicts = [deb_dict(d, id, starvation_hours=starvation_hours) for id in d.agent_ids] + [deb_model] c = {'save_to': d.plot_dir, 'roversVSsitters': roversVSsitters} plot_debs(deb_dicts=deb_dicts[:-1], save_as='deb_hunger_sim_start.pdf', mode='hunger', sim_only=True, start_at_sim_start=True, **c) plot_debs(deb_dicts=deb_dicts[:-1], save_as='deb_f_sec.pdf', mode='f', sim_only=True, time_unit='seconds', start_at_sim_start=True, **c) plot_debs(deb_dicts=deb_dicts[:-1], save_as='deb_f.pdf', mode='f', sim_only=True, **c) plot_debs(deb_dicts=deb_dicts[:-1], save_as='deb_hunger.pdf', mode='hunger', sim_only=True, **c) plot_debs(deb_dicts=deb_dicts, save_as='comparative_deb_complete.pdf', mode='complete', **c) # raise plot_debs(deb_dicts=deb_dicts, save_as='comparative_deb.pdf', **c) plot_debs(deb_dicts=deb_dicts, save_as='comparative_deb_minimal.pdf', mode='minimal', **c) plot_debs(deb_dicts=deb_dicts[:-1], save_as='deb.pdf', sim_only=True, **c) plot_debs(deb_dicts=deb_dicts[:-1], save_as='deb_minimal.pdf', mode='minimal', sim_only=True, **c) plot_debs(deb_dicts=deb_dicts[:-1], save_as='deb_complete.pdf', mode='complete', sim_only=True, **c) plot_debs(deb_dicts=[deb_dicts[-1]], save_as='default_deb.pdf', **c) plot_debs(deb_dicts=deb_dicts[:-1], save_as='deb_f_sec.pdf', mode='f', sim_only=True, time_unit='seconds', start_at_sim_start=True, **c) plot_debs(deb_dicts=deb_dicts[:-1], save_as='deb_f.pdf', mode='f', sim_only=True, **c) plot_debs(deb_dicts=deb_dicts[:-1], save_as='deb_hunger_sim_start.pdf', mode='hunger', sim_only=True, start_at_sim_start=True, **c) elif experiment == 'dispersion': target_dataset = load_reference_dataset() datasets = [d, target_dataset] labels = ['simulated', 'empirical'] comparative_analysis(datasets=datasets, labels=labels, simVSexp=True, save_to=None) plot_marked_strides(dataset=d, agent_ids=d.agent_ids[:3], title=' ', slices=[[10, 50], [60, 100]]) plot_marked_turns(dataset=d, agent_ids=d.agent_ids[:3], min_turn_angle=20) elif experiment in ['chemorbit', 'chemotax']: plot_timeplot('c_odor1', datasets=[d]) plot_timeplot('dc_odor1', datasets=[d]) plot_timeplot('A_olf', datasets=[d]) plot_timeplot('A_tur', datasets=[d]) plot_timeplot('Act_tur', datasets=[d]) plot_distance_to_source(dataset=d, experiment=experiment) d.visualize(agent_ids=[d.agent_ids[0]], mode='image', image_mode='final', contours=False, centroid=False, spinepoints=False, random_colors=True, trajectories=True, trajectory_dt=0, save_as='single_trajectory') elif experiment == 'odor_pref': ind = d.compute_preference_index(arena_diameter_in_mm=100) print(ind) return ind elif experiment == 'imitation': d.save_agent(pars=fun.flatten_list(d.points_xy) + fun.flatten_list(d.contour_xy), header=True)