Beispiel #1
0
def get_map_feats(feature_map,
                  batch_shape,
                  batch_size,
                  last_agent_positions_grid,
                  A,
                  phi=None):
    """Interpolate the positions into the feature map.

    :param feature_map: (B, H, W, F), feature map
    :param batch_shape: first several dimensions of positions. could be inferred but we require it for sanity checks.
    :param batch_size: size of batch. could be inferred but we require it for sanity checks.
    :param last_agent_positions_grid: batch_shape + (A, D)
    :param A: number of agents. could be inferred but we require it for sanity checks.
    :param F: feature map dimension. could be inferred but we require it for sanity checks.
    :param phi: DEPRECATED
    :returns: 
    :rtype: 

    """
    assert (tensoru.rank(feature_map) == 4)
    assert (tensoru.rank(last_agent_positions_grid) >= 3)
    F = tensoru.size(feature_map, -1)

    # (B, batch_shape[1:]*A, d)
    last_agent_positions_grid_r_xy = tf.reshape(last_agent_positions_grid,
                                                (batch_shape[0], -1, 2))
    last_agent_positions_grid_r_ij = last_agent_positions_grid_r_xy[..., ::-1]
    # (B, batch_shape[1:]*A, F)
    # N.B. the indexing order! Our points are stored in xy-format, with x corresponding to the W dimension of the feature grid.
    map_feat = tfu.interpolate_bilinear(feature_map,
                                        last_agent_positions_grid_r_ij,
                                        indexing='ij')
    # (BSize, A, F)
    map_feats = tf.reshape(map_feat, (batch_size, A, F))
    return map_feats
Beispiel #2
0
    def prepare(self, batch_shape):
        if len(batch_shape) == 2: self.batch_str = 'bk'
        elif len(batch_shape) == 1: self.batch_str = 'b'
        else: raise ValueError("Unhandled batch size")

        if len(batch_shape) == 1:
            self.current_local2world = self.original_local2world = self.world2local.invert(
            )
            # Input spaces: Car frames. Output space: grid frame.
            self.current_local2grid = self.original_local2grid = self.world2grid * self.current_local2world
        else:
            R = tensoru.repeat_expand_dims(self.world2local.R,
                                           axis=1,
                                           n=len(batch_shape) - 1)
            R = tf.tile(R, (1, ) + batch_shape[1:] + (1, ) *
                        (tensoru.rank(self.world2local.R) - 1))
            t = tensoru.repeat_expand_dims(self.world2local.t,
                                           axis=1,
                                           n=len(batch_shape) - 1)
            t = tf.tile(t, (1, ) + batch_shape[1:] + (1, ) *
                        (tensoru.rank(self.world2local.t) - 1))
            # Use this object to track the car frames during the rollout.
            # Input space: world coordinates. Output spaces: car frame coordinates.
            world2local = similarityu.SimilarityTransform(
                R=R, t=t, scale=self.world2local.scale)
            # "current_local2<X>" frames track the frame as it rolls out. The "local" frames are fixed, however.
            self.current_local2world = self.original_local2world = world2local.invert(
            )
            # Input spaces: Car frames. Output space: grid frame.
            self.current_local2grid = self.original_local2grid = self.world2grid * self.current_local2world
Beispiel #3
0
def signed_distance_transform(binary_image,
                              normalize=True,
                              clip_top=1,
                              clip_bottom=-10,
                              dtype=np.float64):
    """

    :param binary_image: np.ndarray with boolean type and two dimensions.
    :param normalize: whether to normalize the result to [0, 1]
    :param clip_top: if normalizing, positive distance at which to clip
    :param clip_bottom: if normalizing, negative distance at which to clip
    :param dtype: dtype of the 
    :returns: 
    :rtype: 

    """
    assert (binary_image.dtype is np.dtype(np.bool))
    assert (tensoru.rank(binary_image) == 2)

    dt = morph.distance_transform_edt
    sdt = (dt(binary_image) - dt(1 - binary_image)).astype(dtype)
    if not normalize:
        return sdt
    else:
        assert (clip_top > clip_bottom)
        assert (clip_top >= 1)
        sdt[sdt > clip_top] = clip_top
        sdt[sdt < clip_bottom] = clip_bottom
        return (sdt - clip_bottom) / (clip_top - clip_bottom)
Beispiel #4
0
def one_hotify_light_strings(light_strings):
    """Featurize light strings in the tensorflow graph. By putting it in the graph, we don't have to remember how they're featurized
    when it comes to inference time.

    :param light_strings: 
    :returns: 
    :rtype: 

    """
    light_strings = tf.strings.upper(light_strings)
    assert (tensoru.rank(light_strings) == 1)
    eye = tf.eye(5, dtype=tf.float64)[..., None]
    c = lambda x: tf.cast(x, tf.float64)
    none = c(tf.equal(light_strings, 'NONE'))[None]
    green = c(tf.equal(light_strings, 'GREEN'))[None]
    yellow = c(tf.equal(light_strings, 'YELLOW'))[None]
    intersection = c(tf.equal(light_strings, 'INTERSECTION'))[None]
    red = c(tf.equal(light_strings, 'RED'))[None]

    # Ensure there's exactly one catgoirzation for each.
    with tf.control_dependencies([
            tf.compat.v1.assert_equal(
                none + green + yellow + intersection + red, tf.ones_like(none))
    ]):
        # This puts them in separate bins.
        feats = tf.transpose(eye[0] * none + eye[1] * green +
                             eye[2] * intersection + eye[3] * yellow +
                             eye[4] * red)
        # This puts them all in the same bin.
        # feats = tf.transpose(eye[0] * none + eye[0] * green + eye[0] * intersection + eye[0] * yellow + eye[4] * red)
    return feats
Beispiel #5
0
 def _Rtheta(cls, theta, lib):
     Rs = lib.stack([(lib.cos(theta), -lib.sin(theta)),
                     (lib.sin(theta), lib.cos(theta))],
                    axis=0)
     # (x, y, ...) -> (..., x, y)
     if tensoru.rank(Rs) > 2:
         Rs = tensoru.rotate_left(tensoru.rotate_left(Rs, lib=lib), lib=lib)
     return Rs
Beispiel #6
0
    def __init__(self, R, t, scale, lib=tf):
        """
        Implements x' = f(x; R, t, scale) = scale * R * x + t for batched data.

        t represents the origin of the transformed coordinates.
        R represents the pre-translation rotation matrix.

        :param R: (B, A, 2, 2) or (B, 2, 2) or (2, 2)
        :param t: (B, A, 2) or (B, 2) or (2,)
        :param scale: scalar
        :returns: 
        :rtype: 
        
        """
        assert (lib in (np, tf))
        self.rr = tensoru.rank(R)
        self.tr = tensoru.rank(t)
        # self.theta = lib.atan2(R[..., 1, 0], R[..., 0, 0])
        joint_rank = (self.rr, self.tr)
        assert (joint_rank in [(5, 4), (4, 3), (3, 2), (2, 1)])
        if self.rr == 5: self._ein = 'bkaij'
        elif self.rr == 4: self._ein = 'baij'
        elif self.rr == 3: self._ein = 'bij'
        elif self.rr == 2: self._ein = 'ij'
        else: raise ValueError("Unhandled Rotation matrix dimensionality!")

        # Maintain some tiled versions for broadcasted adding.
        self._ts = {}
        if self.tr == 3:
            self._ts['baj'] = self.t
            self._ts['batj'] = tensoru.repeat_expand_dims(self.t, 1, axis=-2)
            self._ts['bkaj'] = tensoru.swap_axes(self._ts['batj'], 1, 2)
            self._ts['bkatj'] = tensoru.repeat_expand_dims(self._ts['batj'],
                                                           1,
                                                           axis=1)
            self._ts['baNj'] = self.t[..., None, :]
        elif self.tr == 4:
            self._ts['bkaj'] = self.t
            self._ts['batj'] = tensoru.swap_axes(self._ts['bkaj'], 1, 2)
            self._ts['bkaNj'] = self.t[..., None, :]
            self._ts['bkaNj'] = self.t[..., None, :]
        else:
            pass
Beispiel #7
0
def get_whisker_map_feats(feature_map,
                          batch_shape,
                          batch_size,
                          cars2grid,
                          template_cars,
                          A,
                          phi=None):
    """Interpolate the positions into the feature map.

    :param feature_map: (B, H, W, F), feature map
    :param batch_shape: first several dimensions of positions. could be inferred but we require it for sanity checks.
    :param batch_size: size of batch. could be inferred but we require it for sanity checks.
    :param last_agent_positions_cars: batch_shape + (A, D)
    :param cars2grid: SimilarityTransform from car frames to grid
    :param template: (N, D) template of positions in local frame at which to interpolate
    :param A: number of agents. could be inferred but we require it for sanity checks.
    :param F: feature map dimension. could be inferred but we require it for sanity checks.
    :param phi: DEPRECATED
    :returns: 
    :rtype: 

    """

    assert (tensoru.rank(feature_map) == 4)
    F = tensoru.size(feature_map, -1)

    if len(batch_shape) == 2:
        points_ein = 'bkaNj'
        template_cars = template_cars[None]
    elif len(batch_shape) == 1:
        points_ein = 'baNj'
    else:
        raise ValueError

    n_whiskers = tensoru.size(template_cars, -2)
    whiskers_grid = cars2grid.apply(template_cars, points_ein=points_ein)
    whiskers_grid_r_xy = tf.reshape(whiskers_grid, (batch_shape[0], -1, 2))
    # (B, batch_shape[1:]*A, F)
    # N.B. the indexing order! Our points are stored in xy-format, with x corresponding to the W dimension of the feature grid.
    map_feat = tfu.interpolate_bilinear(feature_map,
                                        whiskers_grid_r_xy,
                                        indexing='xy')
    # (B, ..., A, n_whiskers, F)
    map_feat_r = tf.reshape(map_feat, batch_shape + (A, n_whiskers, F))
    # (B*..., A, n_whiskers*F)
    map_feats = tf.reshape(map_feat_r, (batch_size, A, n_whiskers * F))
    return map_feats, whiskers_grid
Beispiel #8
0
    def apply(self,
              points,
              points_ein=None,
              dtype=None,
              name=None,
              translate=True):
        input_rank = tensoru.rank(points)
        if dtype is None: dtype = self.lib.float64
        assert (dtype in (self.lib.float64, self.lib.float32, self.lib.int32))

        if points_ein is None:
            if input_rank == 5:
                points_ein = 'bkatj'
                # Note that we can't handle 'bkaj' input implicitly... i.e. force the second batching to be the second axis.
            elif input_rank == 4:
                points_ein = 'batj'
            elif input_rank == 3:
                points_ein = 'baj'
            elif input_rank == 2:
                points_ein = 'bj'
            elif input_rank == 1:
                points_ein = 'j'
            else:
                raise ValueError(
                    "Unhandled points dimensionality: {}".format(input_rank))

        # Ensure input only uses certain characters
        assert (len(set(points_ein) - set('bkatjN')) == 0)

        einstr = '{},{}->{}'.format(self._ein, points_ein,
                                    points_ein.replace('j', 'i'))
        points_R = self.scale * self.lib.einsum(einstr, self.R, points)

        # Add the translation.
        if translate:
            if self.tr == 1: points_Rt = points_R + self.t
            else: points_Rt = points_R + self._ts[points_ein]
        else:
            points_Rt = points_R

        if dtype == self.lib.int32:
            return self.lib.cast(self.lib.round(points_Rt), self.lib.int32)
        else:
            return points_Rt
Beispiel #9
0
def plot_joint_trajectory(joint_traj,
                          key='joint_trajectories',
                          fig=None,
                          ax=None,
                          render=True,
                          limit=100,
                          **kwargs):
    """

    :param joint_traj: K x A x T x d
    :param key: 
    :param fig: 
    :param ax: 
    :param render: 
    :param kwargs: 
    :returns: 
    :rtype: 

    """

    assert (tensoru.rank(joint_traj) == 4)
    A = tensoru.size(joint_traj, 1)
    for a in range(A):
        render_a = (a == A - 1) and render
        color = kwargs.get('color', COLORS[a])
        if isinstance(joint_traj, tf.Tensor):
            single_traj = joint_traj[:, a].numpy()
        else:
            single_traj = joint_traj[:, a]
        kwargs.pop('color', None)
        fig, ax = plot_trajectory(key,
                                  single_traj,
                                  color=color,
                                  render=render_a,
                                  fig=fig,
                                  ax=ax,
                                  axis=[0, limit, limit, 0],
                                  **kwargs)
        assert (fig is not None)
    return fig, ax
Beispiel #10
0
def plot_joint_trajectory(joint_traj,
                          limit,
                          scale=1,
                          agents=None,
                          fig=None,
                          ax=None,
                          **kwargs):
    assert (tensoru.rank(joint_traj) == 4)

    if agents is None:
        agents = range(tensoru.size(joint_traj, 1))

    for a in agents:

        if "color" in kwargs:
            color = kwargs.pop("color")
        else:
            color = cm.get_cmap("tab10").colors[a]

        if isinstance(joint_traj, tf.Tensor):
            single_traj = joint_traj[:, a].numpy().copy()
        else:
            single_traj = joint_traj[:, a].copy()

        if scale > 0:
            single_traj *= scale
        single_traj[..., -1] *= -1

        fig, ax = plot_trajectory(single_traj,
                                  color=color,
                                  fig=fig,
                                  ax=ax,
                                  axis=limit,
                                  **kwargs)
        assert (fig is not None)
    return fig, ax
Beispiel #11
0
    def inverse(self, S, phi):
        """Implements the inverse component of the bijection. 

        :param S: output states. Must be in __CAR FRAMES__.
        :param phi: context
        :returns: 
        :rtype: 

        """
        r = tensoru.rank(S)

        assert (r in (4, 5))
        if r == 4:
            B, A, T, D = tensoru.shape(S)
            S_0 = phi.S_past_car_frames[..., -1, :]
            S_m1 = phi.S_past_car_frames[..., -2, :]
            batch_shape = (B, )
        elif r == 5:
            B, K, A, T, D = tensoru.shape(S)
            S_0 = tensoru.expand_and_tile_axis(phi.S_past_car_frames[...,
                                                                     -1, :],
                                               N=K,
                                               axis=1)
            S_m1 = tensoru.expand_and_tile_axis(phi.S_past_car_frames[...,
                                                                      -2, :],
                                                N=K,
                                                axis=1)
            batch_shape = (B, K)
        else:
            raise ValueError("Unhandled rank of data to invert.")

        S_history = [S_m1, S_0]

        Z_history = []
        m_history = []
        mu_history = []
        sigel_history = []
        sigma_history = []
        metadata_history = []

        phi.prepare(batch_shape)
        self._prepare(batch_shape, phi)
        for t_idx in range(T):
            m_t, sigel_t, sigma_t = self.step_generate(S_history, phi)
            mu_t = m_t + 2 * S_history[-1] - S_history[-2]

            # S first, then compute Z.
            S_t = S[..., t_idx, :]
            # expm(X) expm(-X) = I -> (expm(X))^{-1} = expm(-X). Avoids computing any inverses explicitly.
            sigma_t_inv = tf.linalg.expm(-1 * sigel_t)
            Z_t = tf.einsum('...ij,...j->...i', sigma_t_inv, S_t - mu_t)
            phi.update_frames(S_t_car_frames=S_t,
                              S_tm1_car_frames=S_history[-1])

            m_history.append(m_t)
            mu_history.append(mu_t)
            sigel_history.append(sigel_t)
            sigma_history.append(sigma_t)
            S_history.append(S_t)
            Z_history.append(Z_t)
            metadata_history.append(self.current_metadata)

        roll = interface.ESPRollout(S_car_frames_list=S_history[2:],
                                    Z_list=Z_history,
                                    m_list=m_history,
                                    mu_list=mu_history,
                                    sigma_list=sigma_history,
                                    sigel_list=sigel_history,
                                    metadata_list=metadata_history,
                                    phi=phi)
        return roll
Beispiel #12
0
    def forward(self, Z, phi):
        """Implements the forward component of the bijection. Implemented in __CAR FRAMES__.

        :param Z: latent states
        :param phi: context
        :returns: 
        :rtype: 

        """
        r = tensoru.rank(Z)

        with contextlib.ExitStack() as stack:
            if getattr(self, 'debug_eager', False):
                tape = tf.GradientTape(persistent=True)
                stack.enter_context(tape)
                tape.watch(phi.S_past_car_frames)
                tape.watch(phi.overhead_features)
            assert (r in (4, 5))
            if r == 4:
                B, A, T, D = tensoru.shape(Z)
                S_0 = phi.S_past_car_frames[..., -1, :]
                S_m1 = phi.S_past_car_frames[..., -2, :]
                batch_shape = (B, )
            elif r == 5:
                B, K, A, T, D = tensoru.shape(Z)
                # (B, A, T, D) -> (B, K, A, T, D)
                S_0 = tensoru.expand_and_tile_axis(
                    phi.S_past_car_frames[..., -1, :], N=K, axis=1)
                S_m1 = tensoru.expand_and_tile_axis(
                    phi.S_past_car_frames[..., -2, :], N=K, axis=1)
                batch_shape = (B, K)
            else:
                raise ValueError("Unhandled rank of latents to warp.")

            S_history = [S_m1, S_0]

            Z_history = []
            m_history = []
            mu_history = []
            sigel_history = []
            sigma_history = []
            metadata_history = []

            phi.prepare(batch_shape)
            self._prepare(batch_shape, phi)
            for t_idx in range(T):
                m_t, sigel_t, sigma_t = self.step_generate(S_history, phi)
                mu_t = m_t + 2 * S_history[-1] - S_history[-2]

                # Z first, then compute S.
                Z_t = Z[..., t_idx, :]
                S_t = mu_t + tf.einsum('...ij,...j->...i', sigma_t, Z_t)
                phi.update_frames(S_t_car_frames=S_t,
                                  S_tm1_car_frames=S_history[-1])

                m_history.append(m_t)
                mu_history.append(mu_t)
                sigel_history.append(sigel_t)
                sigma_history.append(sigma_t)
                S_history.append(S_t)
                Z_history.append(Z_t)
                metadata_history.append(self.current_metadata)

                if getattr(self, 'debug_eager', False): pdb.set_trace()

        roll = interface.ESPRollout(S_car_frames_list=S_history[2:],
                                    Z_list=Z_history,
                                    m_list=m_history,
                                    mu_list=mu_history,
                                    sigma_list=sigma_history,
                                    sigel_list=sigel_history,
                                    metadata_list=metadata_history,
                                    phi=phi)
        return roll
Beispiel #13
0
 def _traj_to_s_T(self, trajectories):
     assert (tensoru.rank(trajectories) == 5)
     s_T = tf.squeeze(trajectories[..., -1, :])
     assert (tensoru.shape(s_T) == self.full_shape)
     return s_T