Esempio n. 1
0
    def compute_LC_activity(self, width=27.5):
        """Compute landmark cell activitiy.

        The activity is given by a gaussian around the preferred angular distance of each landmark cell, following
        Dolle et al. (2010). The width of the gaussian is inversely proportional to the euclidean distance to of the
        landmark to the agent.

        :param width: Width of the receptive fields.
        :return:
        """
        self.previous_LC_activity = self.LC_activations
        activity = np.zeros(self.LC_activations.shape)
        for landmark in self.env.landmark_locations:
            # First compute the relative angle to the landmark
            landmark_direction = self.angle_to_landmark(landmark)
            # Then the activity
            angular_distances = abs(
                get_relative_angle(landmark_direction,
                                   self.landmark_cell_centres))
            euclidian_distance = np.linalg.norm(
                landmark - [self.env.curr_x, self.env.curr_y])
            activity += (1 / ((euclidian_distance + .5))
                         ) * np.exp(-(angular_distances**2 /
                                      (2 * (self.field_width)**2)))
            activity /= np.linalg.norm(activity)
        return activity
Esempio n. 2
0
 def get_available_actions(self):
     """Return those angles for which the absolute angle with the current direction is smaller than the max angle.
     """
     available_actions = [
         d for d in self.allocentric_directions
         if abs(get_relative_angle(self.env.curr_orientation, d)) <=
         self.max_turning_angle
     ]
     return available_actions
Esempio n. 3
0
    def choose_action(self):
        available_actions = self.get_available_actions()

        action_goodness = []
        for act in available_actions:
            next_x, next_y = self.env.compute_new_position(act)
            goal_cell_rate_diff = self.get_value(next_x, next_y)
            action_goodness.append(goal_cell_rate_diff)

        action_idx = utils.random_argmax(action_goodness)

        allocentric_action = available_actions[action_idx]
        # Back to egocentric reference frame
        egocentric_action = get_relative_angle(allocentric_action,
                                               self.env.curr_orientation)
        return egocentric_action, action_goodness[action_idx]
Esempio n. 4
0
    def choose_action(self):
        available_actions = self.get_available_actions()

        action_goodness = []
        for act in available_actions:
            next_x, next_y = self.env.compute_new_position(act)
            goal_cell_rate_diff = self.get_goal_cell_rate(
                next_x, next_y) - self.goal_cell_rate
            action_goodness.append(goal_cell_rate_diff)

        #if np.all(np.array(action_goodness) < 0):  # top reached but not goal
        #    self.weights *= 0

        action_idx = utils.random_argmax(action_goodness)

        allocentric_action = available_actions[action_idx]
        # Back to egocentric reference frame
        egocentric_action = get_relative_angle(allocentric_action,
                                               self.env.curr_orientation)
        return egocentric_action, action_goodness[action_idx]
Esempio n. 5
0
 def compute_gen_phase(self):
     chosen_direction = self.env.actions[self.env.curr_action_idx]
     normaliser = (2 * self.generalisation_phase_var)
     self.generalisation_phase_activity = \
         np.exp(-abs(get_relative_angle(self.env.actions, chosen_direction)) / normaliser)