def pathangles(theta, X, Y, queue_length=1.0, jump=1, color='k', figsize=(16, 6)): """Plots ants' body orientation (takes one value every 'jump' points). Examples: if jump = 1, all points are considered if jump = 2, every other point is considered""" fig, ax = plt.subplots(figsize=figsize) x_theta, y_theta = pol2cart(queue_length, np.pi + theta) x_dir = X + x_theta y_dir = Y + y_theta X = X[::jump] X_dir = x_dir[::jump] Y = Y[::jump] Y_dir = y_dir[::jump] xarray = np.array([X, X_dir]) yarray = np.array([Y, Y_dir]) ax.scatter(X, Y, marker='.', color=color) ax.plot(xarray, yarray, color=color) plt.axis('equal') return fig, ax
# For the unidirectional sampling fixed_direction = 90 # in degrees # ---------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # Import 3D world for generating snapshots in world_mesh, simulation_engine = initialize_simulation(virtual_world) # Define base coordinates for the Learning Walk lw_positioal_thetas = np.deg2rad(LW_first_theta) + np.linspace( 0.0, LW_spiral_rotations * 2 * np.pi, LW_nb_views, endpoint=False) radii = np.linspace( 0.1, LW_max_radius, LW_nb_views) # Learning Walk is a spiral, so radii are in a linspace lw_x, lw_y = pol2cart(radii, lw_positioal_thetas) lw_thetas = lw_positioal_thetas - np.pi # The orientation of the views (towards the nest) # Add optional angular noise lw_ang_noise = np.random.uniform(-LW_max_noise_angl, LW_max_noise_angl, LW_nb_views) lw_thetas += np.deg2rad(lw_ang_noise) # Define base coordinates for the Training Route tr_x = np.arange(TR_start_dist, -LW_max_radius, TR_steps_length) tr_y = np.zeros_like(tr_x) tr_thetas = np.zeros_like(tr_x) # Put all coordinates together and recenter to another location all_x = np.hstack((tr_x, lw_x)) + setup_loc[0] all_y = np.hstack((tr_y, lw_y)) + setup_loc[1]
lw_positional_thetas = np.deg2rad(LW_first_theta) + np.linspace( 0.0, LW_spiral_rotations * 2 * np.pi, LW_nb_views, endpoint=False) r_thetas = np.deg2rad(REL_first_theta) + np.linspace( 0.0, 2 * np.pi, REL_nb_rep, endpoint=False) # ---------------------------------- # Lauch parameter exploration # ---------------------------------- for lw, curlw_dist in enumerate(lw_dist_list): # ------------------- (Re)generate LW and TR coords (for current LW size) -------------- radii = np.linspace(0.1, curlw_dist, LW_nb_views) lw_x, lw_y = pol2cart(radii, lw_positional_thetas) lw_thetas = lw_positional_thetas - np.pi tr_x = np.arange(TR_start_dist, -curlw_dist, TR_steps_length) tr_y = np.zeros_like(tr_x) tr_thetas = np.zeros_like(tr_x) all_x = np.hstack((tr_x, lw_x)) + setup_loc[0] all_y = np.hstack((tr_y, lw_y)) + setup_loc[1] all_z = np.ones_like(all_x) * ground_distance all_thetas = np.hstack((tr_thetas, lw_thetas)) mem_coords = np.stack((all_x, all_y), axis=1) # ------------------- (Re)generate memory views (for current LW size) -------------- print("Learning attractive views...")
def oscillator(start_coords, mem_viewbank_attr, mem_viewbank_rep, steps, baseline, oscillator_gain, norm_minmax, step_length, mu, sigma, simulation_engine): """ Oscillating moving agent """ # Get the resolution (nb of azimutal px) of the memorised view, # automatically get current view at this resolution resolution = mem_viewbank_attr.shape[-1] randomness = np.random.normal(mu, sigma, steps) theta_osc = np.zeros(steps) x = np.zeros(steps) y = np.zeros(steps) turn_effective = np.zeros(steps) raw_overall_drive = np.zeros(steps) theta_osc[0] = np.random.randint(0, 360) x[0], y[0], z = start_coords osc = cycle([1, -1]) for i in range(steps): # Alternate between oscillator states (left-right-left-right-...) oscil_state = next(osc) view = get_views_resized(x[i], y[i], z, theta_osc[i] % 360, simulation_engine, resolution=resolution) unfam_attr = image_difference(view, mem_viewbank_attr) # Neuron between 0 (very familiar) and 1 (very unfamiliar) unfam_attr_norm = normalize_unfamiliarity(unfam_attr, *norm_minmax, mean=0.5) if mem_viewbank_rep is not None: unfam_rep = image_difference(view, mem_viewbank_rep) # Neuron between 0 (very familiar) and 1 (very unfamiliar) unfam_rep_norm = normalize_unfamiliarity(unfam_rep, *norm_minmax, mean=0.5) else: unfam_rep_norm = 0.5 raw_overall_drive[i] = unfam_attr_norm - unfam_rep_norm turn_intensity = baseline + (raw_overall_drive[i] * oscillator_gain) turn_intensity = np.clip(turn_intensity, 0, 180) turn_effective[i] = turn_intensity * oscil_state + randomness[i] if i < (steps - 1): # Just to prevent the last step # Rotate according to oscillator state newtheta = (theta_osc[i] + turn_effective[i]) % 360 x_displacement, y_displacement = pol2cart(step_length, np.deg2rad(newtheta)) theta_osc[i + 1] = newtheta x[i + 1] = x[i] + x_displacement y[i + 1] = y[i] + y_displacement return x, y, theta_osc, turn_effective, raw_overall_drive
turn_baseline = 90 # ---------------------------------------------------------------------------------------------------------------------- # ---------------------------------------------------------------------------------------------------------------------- # Import 3D world for generating snapshots in world_mesh, simulation_engine = initialize_simulation(virtual_world) # Define base coordinates for the Learning Walk lw_positioal_thetas = np.deg2rad(LW_first_theta) + np.linspace( 0.0, LW_spiral_rotations * 2 * np.pi, LW_nb_views, endpoint=False) radii = np.linspace( 0.1, LW_max_radius, LW_nb_views) # Learning Walk is a spiral, so radii are in a linspace lw_x, lw_y = pol2cart(radii, lw_positioal_thetas) lw_thetas = lw_positioal_thetas - np.pi # The orientation of the views (towards the nest) # Add optional angular noise lw_ang_noise = np.random.uniform(-LW_max_noise_angl, LW_max_noise_angl, LW_nb_views) lw_thetas += np.deg2rad(lw_ang_noise) # Define base coordinates for the Training Route tr_x = np.arange(TR_start_dist, -LW_max_radius, TR_steps_length) tr_y = np.zeros_like(tr_x) tr_thetas = np.zeros_like(tr_x) # Put all coordinates together and recenter to another location all_x = np.hstack((tr_x, lw_x)) + setup_loc[0] all_y = np.hstack((tr_y, lw_y)) + setup_loc[1]