def ssp_to_loc_v_low_mem(sps, heatmap_vectors, xs, ys):
    """
    low memory version of vectorized version of ssp_to_loc
    Convert an encoding to the approximate location that it represents.
    Uses the heatmap vectors as a lookup table
    :param sps: array of encoded vectors of interest
    :param heatmap_vectors: encoding for every point in the space defined by xs and ys
    :param xs: linspace in x
    :param ys: linspace in y
    :return: array of the 2D coordinates that the encoding most closely represents
    """

    assert (len(sps.shape) == 2)
    assert (len(heatmap_vectors.shape) == 3)
    assert (sps.shape[1] == heatmap_vectors.shape[2])

    res_x = heatmap_vectors.shape[0]
    res_y = heatmap_vectors.shape[1]
    n_samples = sps.shape[0]

    # fast but memory intensive version
    # # Compute the dot product of every semantic pointer with every element in the heatmap
    # # vs will be of shape (n_samples, res_x, res_y)
    # vs = np.tensordot(sps, heatmap_vectors, axes=([-1], [2]))
    #
    # # Find the x and y indices for every sample. xys is a list of two elements.
    # # Each element in a numpy array of shape (n_samples,)
    # xys = np.unravel_index(vs.reshape((n_samples, res_x * res_y)).argmax(axis=1), (res_x, res_y))
    #
    # # Transform into an array containing coordinates
    # # locs will be of shape (n_samples, 2)
    # locs = np.vstack([xs[xys[0]], ys[xys[1]]]).T

    # slow version
    locs = np.zeros((n_samples, 2))
    for n in range(n_samples):
        locs[n] = ssp_to_loc(sps[n, :], heatmap_vectors, xs, ys)

    assert (locs.shape[0] == n_samples)
    assert (locs.shape[1] == 2)

    return locs
            #         # localization ghost
            #         agent_ssp = agent.localization_network(
            #             inputs=(torch.cat([velocity, distances, map_id], dim=1),),
            #             initial_ssp=agent.agent_ssp
            #         ).squeeze(0)
            #     elif args.ghost == 'snapshot_loc':
            #         # snapshot localization ghost
            #         agent_ssp = agent.snapshot_localization_network(torch.cat([distances, map_id], dim=1))
            #     agent_loc = ssp_to_loc(agent_ssp.squeeze(0).detach().numpy(), heatmap_vectors, xs, ys)
            #     # Scale to env coordinates, from (-5,5) to (0,13)
            #     agent_loc = ((agent_loc - xs[0]) / limit_range) * coarse_size
            #     env.render_ghost(x=agent_loc[0], y=agent_loc[1])

            if args.ghost:
                agent_loc = ssp_to_loc(
                    agent.clean_agent_ssp.squeeze(0).detach().numpy(),
                    heatmap_vectors, xs, ys)
                # Scale to env coordinates, from (-5,5) to (0,13)
                agent_loc = ((agent_loc - xs[0]) / limit_range) * coarse_size
                env.render_ghost(x=agent_loc[0], y=agent_loc[1])

            if args.normalize_action:
                mag = np.linalg.norm(action)
                if mag > 0.001:
                    action = action / np.linalg.norm(action)

            # Add small amount of noise to the action
            action += np.random.normal(size=2) * args.noise

            obs, reward, done, info = env.step(action)
            # print(obs)
Exemple #3
0
    def __init__(
        self,
        current_loc_sp,
        goal_loc_sp,
        closest_landmark_id,
        allo_connections_sp,
        landmark_map_sp,
        landmark_vectors,
        x_axis_sp,
        y_axis_sp,
        xs,
        ys,
        heatmap_vectors,
        diameter_increment=1,
        expanded_list=list(),
        threshold=0.08,
        # threshold=0.68,
        normalize=True,
    ):

        self.current_loc_sp = current_loc_sp
        self.goal_loc_sp = goal_loc_sp
        self.closest_landmark_id = closest_landmark_id
        self.allo_connections_sp = allo_connections_sp
        self.landmark_map_sp = landmark_map_sp
        self.landmark_vectors = landmark_vectors

        # List of the indices of already expanded nodes (as they appear in 'landmark_vectors')
        # self.expanded_list = expanded_list
        self.expanded_list = []  # fixing shared list bug

        # Similarity threshold for finding a match with the elliptic region
        # TODO: threshold should decrease with region size
        self.threshold = threshold

        # Whether or not to normalize the ellipse region SP
        self.normalize = normalize

        self.x_axis_sp = x_axis_sp
        self.y_axis_sp = y_axis_sp
        self.xs = xs
        self.ys = ys
        self.heatmap_vectors = heatmap_vectors

        # Amount to increase the diameter by on each step
        self.diameter_increment = diameter_increment

        self.current_loc = ssp_to_loc(
            self.current_loc_sp,
            heatmap_vectors=self.heatmap_vectors,
            xs=self.xs,
            ys=self.ys,
        )

        # TODO: this can just be calculated once rather than in every expanding node
        self.goal_loc = ssp_to_loc(
            self.goal_loc_sp,
            heatmap_vectors=self.heatmap_vectors,
            xs=self.xs,
            ys=self.ys,
        )

        # current diameter of the major axis of the ellipse
        # start it as the distance between the current node and the goal, forming a line
        self.diameter = np.linalg.norm(self.current_loc - self.goal_loc)

        # print("starting diameter", self.diameter)
        # print("current_loc", self.current_loc)
        # print("goal_loc", self.goal_loc)

        # add some diameter to give it a width:
        self.diameter += self.diameter_increment

        self.ellipse_sp = generate_elliptic_region_vector(
            xs=self.xs,
            ys=self.ys,
            x_axis_sp=self.x_axis_sp,
            y_axis_sp=self.y_axis_sp,
            f1=self.current_loc,
            f2=self.goal_loc,
            diameter=self.diameter,
            normalize=self.normalize,
        )
Exemple #4
0
    def __init__(
            self,
            start_landmark_id,
            end_landmark_id,
            landmark_map_sp,
            con_ego_sp,
            con_allo_sp,
            landmark_vectors,
            x_axis_sp,
            y_axis_sp,
            xs,
            ys,
            heatmap_vectors,
            # params for debugging
            true_allo_con_sps,
            connectivity_list,
            con_calculation='true_allo',
            normalize=True,
            debug_mode=False,
            # ellipse params
            diameter_increment=1,
            **unused_params):

        self.debug_mode = debug_mode

        # Various methods for calculating the connectivity of a particular node. Used for debugging
        assert con_calculation in ['ego', 'allo', 'true_allo']
        self.con_calculation = con_calculation

        self.start_landmark_id = start_landmark_id
        self.end_landmark_id = end_landmark_id
        self.landmark_map_sp = landmark_map_sp
        self.con_ego_sp = con_ego_sp
        self.con_allo_sp = con_allo_sp
        self.landmark_vectors = landmark_vectors

        self.x_axis_sp = x_axis_sp
        self.y_axis_sp = y_axis_sp
        self.xs = xs
        self.ys = ys
        self.heatmap_vectors = heatmap_vectors

        self.true_allo_con_sps = true_allo_con_sps
        self.connectivity_list = connectivity_list
        # Whether or not to normalize the ellipse region SP
        self.normalize = normalize

        self.diameter_increment = diameter_increment

        start_landmark_sp = spa.SemanticPointer(
            self.landmark_vectors[self.start_landmark_id])
        end_landmark_sp = spa.SemanticPointer(
            self.landmark_vectors[self.end_landmark_id])

        current_loc_sp = self.landmark_map_sp * ~start_landmark_sp
        self.goal_loc_sp = self.landmark_map_sp * ~end_landmark_sp

        self.goal_loc = ssp_to_loc(self.goal_loc_sp,
                                   heatmap_vectors=self.heatmap_vectors,
                                   xs=self.xs,
                                   ys=self.ys)

        # egocentric displacements to nearby landmarks
        ego_connections_sp = con_ego_sp * ~start_landmark_sp

        # allocentric coordinates of nearby landmarks
        if self.con_calculation == 'ego':
            # calculating from ego
            allo_connections_sp = current_loc_sp * ego_connections_sp
        elif self.con_calculation == 'allo':
            # getting true value from allo
            allo_connections_sp = self.con_allo_sp * ~start_landmark_sp
        elif self.con_calculation == 'true_allo':
            # get a clean value from allo
            allo_connections_sp = self.true_allo_con_sps[
                self.start_landmark_id]
        else:
            raise NotImplementedError

        # dictionary of nodes currently being expanded
        self.expanding_nodes = {
            self.start_landmark_id:
            ExpandingNode(
                current_loc_sp=current_loc_sp,
                goal_loc_sp=self.goal_loc_sp,
                closest_landmark_id=self.start_landmark_id,
                allo_connections_sp=allo_connections_sp,
                landmark_map_sp=self.landmark_map_sp,
                landmark_vectors=self.landmark_vectors,
                x_axis_sp=self.x_axis_sp,
                y_axis_sp=self.y_axis_sp,
                xs=self.xs,
                ys=self.ys,
                heatmap_vectors=self.heatmap_vectors,
                normalize=self.normalize,
            )
        }
Exemple #5
0
def surface_to_env(x):
    xy = ssp_to_loc(sp=x, heatmap_vectors=heatmap_vectors, xs=xs, ys=ys)
    # print(xy)
    return xy / (10 / 2) - 1
Exemple #6
0
def policy_gt(map_id, agent_ssp, goal_ssp, env, coarse_planning=True):
    if coarse_planning:
        maze = coarse_mazes[np.argmax(map_id)]

        if True:
            # Convert agent and goal SSP into 2D locations
            agent_loc = ssp_to_loc(
                agent_ssp.squeeze(0).detach().numpy(), heatmap_vectors, xs, ys)
            goal_loc = ssp_to_loc(
                goal_ssp.squeeze(0).detach().numpy(), heatmap_vectors, xs, ys)

            agent_loc_scaled = (
                (agent_loc - xs[0]) / limit_range) * coarse_size
            goal_loc_scaled = ((goal_loc - xs[0]) / limit_range) * coarse_size

            start_indices = np.round(agent_loc_scaled).astype(np.int32)
            goal_indices = np.round(goal_loc_scaled).astype(np.int32)
            # start_indices = np.ceil(agent_loc_scaled).astype(np.int32)
            # goal_indices = np.ceil(goal_loc_scaled).astype(np.int32)
        else:
            vs = np.tensordot(agent_ssp.squeeze(0).detach().numpy(),
                              coarse_heatmap_vectors,
                              axes=([0], [2]))
            start_indices = np.unravel_index(vs.argmax(), vs.shape)

            vs = np.tensordot(goal_ssp.squeeze(0).detach().numpy(),
                              coarse_heatmap_vectors,
                              axes=([0], [2]))
            goal_indices = np.unravel_index(vs.argmax(), vs.shape)

        # env.render_ghost(x=start_indices[0], y=start_indices[1])

        solved_maze = solve_maze(maze,
                                 start_indices=start_indices,
                                 goal_indices=goal_indices,
                                 full_solve=True,
                                 strict_cornering=True)

        # Return the action for the current location
        return solved_maze[start_indices[0], start_indices[1], :]
    else:
        maze = fine_mazes[np.argmax(map_id)]

        vs = np.tensordot(agent_ssp.squeeze(0).detach().numpy(),
                          heatmap_vectors,
                          axes=([0], [2]))
        start_indices = np.unravel_index(vs.argmax(), vs.shape)

        vs = np.tensordot(goal_ssp.squeeze(0).detach().numpy(),
                          heatmap_vectors,
                          axes=([0], [2]))
        goal_indices = np.unravel_index(vs.argmax(), vs.shape)

        solved_maze = solve_maze(maze,
                                 start_indices=start_indices,
                                 goal_indices=goal_indices,
                                 full_solve=False,
                                 strict_cornering=True)

        # Return the action for the current location
        return solved_maze[start_indices[0], start_indices[1], :]