def test(self):

        layer_1 = np.zeros((100, 100, 3))
        box_1 = cv2.boxPoints(((50, 50), (20, 20), 0))
        layer_1 = cv2.fillPoly(layer_1,
                               pts=[np.int0(box_1)],
                               color=(255, 255, 255))

        layer_2 = np.zeros((100, 100, 3))
        box_2 = cv2.boxPoints(((70, 30), (10, 10), 0))
        layer_2 = cv2.fillPoly(layer_2,
                               pts=[np.int0(box_2)],
                               color=(0, 0, 255))

        rasterizer = Rasterizer()
        image = rasterizer.combine(
            [layer_1.astype('uint8'),
             layer_2.astype('uint8')])

        answer = np.zeros((100, 100, 3))
        answer = cv2.fillPoly(answer,
                              pts=[np.int0(box_1)],
                              color=(255, 255, 255))
        answer = cv2.fillPoly(answer, pts=[np.int0(box_2)], color=(0, 0, 255))
        answer = answer.astype('uint8')

        np.testing.assert_allclose(answer, image)
Example #2
0
    def __init__(
            self,
            helper: PredictHelper,
            layer_names: List[str] = None,
            colors: List[Color] = None,
            resolution: float = 0.1,  # meters / pixel
            meters_ahead: float = 40,
            meters_behind: float = 10,
            meters_left: float = 25,
            meters_right: float = 25):

        self.helper = helper
        self.maps = load_all_maps(helper)

        if not layer_names:
            layer_names = ['drivable_area', 'ped_crossing', 'walkway']
        self.layer_names = layer_names

        if not colors:
            colors = [(255, 255, 255), (119, 136, 153), (0, 0, 255)]
        self.colors = colors

        self.resolution = resolution
        self.meters_ahead = meters_ahead
        self.meters_behind = meters_behind
        self.meters_left = meters_left
        self.meters_right = meters_right
        self.combinator = Rasterizer()
Example #3
0
    def __init__(self,
                 instance_sample_tokens,
                 helper):
        self.instance_sample_tokens = instance_sample_tokens
        self.helper = helper

        self.static_layer_rasterizer = StaticLayerRasterizer(self.helper)
        self.agent_rasterizer = AgentBoxesWithFadedHistory(self.helper, seconds_of_history=SECONDS_OF_HISTORY)
        self.mtp_input_representation = InputRepresentation(
            self.static_layer_rasterizer,
            self.agent_rasterizer,
            Rasterizer())

        self.transform_fn = transforms.Normalize(
                                mean=[0.485, 0.456, 0.406],
                                std=[0.229, 0.224, 0.225])
    def __init__(self,
                 nusc,
                 helper,
                 maps_dir,
                 save_maps_dataset=False,
                 config_name='predict_2020_icra.json',
                 history=1,
                 num_examples=None,
                 in_agent_frame=True):

        self.nusc = nusc
        self.helper = helper

        #initialize the data set
        if maps_dir == 'maps_train':
            dataset_version = "train"
        elif maps_dir == 'maps':
            dataset_version = "train_val"
        elif maps_dir == 'maps_val':
            dataset_version = "val"

        #initialize maps directory where everything will be saved
        self.maps_dir = os.path.join(os.getcwd(), maps_dir)
        self.data_set = get_prediction_challenge_split(
            dataset_version, dataroot=self.nusc.dataroot)

        if num_examples:
            self.data_set = self.data_set[:num_examples]

        #initialize rasterizers for map generation
        self.static_layer_rasterizer = StaticLayerRasterizer(self.helper)
        self.agent_rasterizer = AgentBoxesWithFadedHistory(
            self.helper, seconds_of_history=history)
        self.mtp_input_representation = InputRepresentation(
            self.static_layer_rasterizer, self.agent_rasterizer, Rasterizer())

        self.in_agent_frame = in_agent_frame

        self.config = load_prediction_config(self.helper, config_name)

        self.save_maps_dataset = save_maps_dataset

        if self.save_maps_dataset:
            self.save_maps()

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
Example #5
0
    def get_format_mha_jam_maps(self, states_filepath, out_file):
        with open(states_filepath) as fr:
            agents_states = fr.readlines()

        # format
        # agen t_id, 20x(frame_id, x, y, v, a, yaw_rate)]
        agents_states = [[float(x.rstrip()) for x in s.split(',')]
                         for s in agents_states]

        mode = "train" if out_file.find("_train") != -1 else "val"
        mini = "mini" if out_file.find("mini") != -1 else "main"

        with open("dicts_sample_and_instances_id2token_" + mode + "_" + mini +
                  ".json") as fr:
            instance_dict_id_token, sample_dict_id_token = json.load(fr)

        # Get map for each sample in states
        agent_ind = 0
        static_layer_rasterizer = StaticLayerRasterizer(self.helper)
        agent_rasterizer = AgentBoxesWithFadedHistory(self.helper,
                                                      seconds_of_history=1)
        mtp_input_representation = InputRepresentation(static_layer_rasterizer,
                                                       agent_rasterizer,
                                                       Rasterizer())

        if not os.path.exists(os.path.dirname(out_file)):
            os.makedirs(os.path.dirname(out_file))

        for agent in tqdm(agents_states):
            instance_token = instance_dict_id_token[str(int(agent[0]))]
            mid_frame_id = int(agent[1 + 6 * (MAX_TRAJ_LEN)])
            sample_token = sample_dict_id_token[str(mid_frame_id)]
            img = mtp_input_representation.make_input_representation(
                instance_token, sample_token)
            # img = cv2.resize(img, (1024, 1024))
            cv2.imwrite(
                out_file.replace("_.jpg", "__" + str(agent_ind) + ".jpg"), img)
            agent_ind += 1
Example #6
0
import cv2
from tqdm import tqdm

from pkyutils import DatasetQ10, nuscenes_collate, nuscenes_pecnet_collate, NusCustomParser
from nuscenes.prediction.input_representation.combinators import Rasterizer

import sys
sys.path.append("./utils/")
from models_map import *
from social_utils import *

import cv2
import natsort
from torch.utils.tensorboard import SummaryWriter

combinator = Rasterizer()

np.random.seed(777)
torch.manual_seed(777)

def train_single_epoch(model, optimizer, train_loader, best_of_n, device, hyper_params):

    model.train()
    train_loss = 0
    total_rcl, total_kld, total_adl = 0, 0, 0
    total_goal_sd, total_future_sd = 0, 0
    criterion = nn.MSELoss()

    for i, (traj, mask, initial_pos, _, num_future_agents, map) in tqdm(enumerate(train_loader), total=len(train_loader), desc='train'):
        traj, mask, initial_pos = torch.DoubleTensor(traj).to(device), torch.DoubleTensor(mask).to(device), torch.DoubleTensor(initial_pos).to(device)
        map = torch.DoubleTensor(map.double()).to(device)
Example #7
0
class StaticLayerRasterizer(StaticLayerRepresentation):
    """
    Creates a representation of the static map layers where
    the map layers are given a color and rasterized onto a
    three channel image.
    """

    def __init__(self, helper: PredictHelper,
                 layer_names: List[str] = None,
                 colors: List[Color] = None,
                 resolution: float = 0.1,  # meters / pixel
                 meters_ahead: float = 40, meters_behind: float = 10,
                 meters_left: float = 25, meters_right: float = 25):

        self.helper = helper
        self.maps = load_all_maps(helper)

        if not layer_names:
            layer_names = ['drivable_area', 'ped_crossing', 'walkway']
        self.layer_names = layer_names

        if not colors:
            colors = [(255, 255, 255), (119, 136, 153), (0, 0, 255)]
        self.colors = colors

        self.resolution = resolution
        self.meters_ahead = meters_ahead
        self.meters_behind = meters_behind
        self.meters_left = meters_left
        self.meters_right = meters_right
        self.combinator = Rasterizer()

    def make_representation(self, instance_token: str, sample_token: str) -> np.ndarray:
        """
        Makes rasterized representation of static map layers.
        :param instance_token: Token for instance.
        :param sample_token: Token for sample.
        :return: Three channel image.
        """

        sample_annotation = self.helper.get_sample_annotation(instance_token, sample_token)
        map_name = self.helper.get_map_name_from_sample_token(sample_token)

        x, y = sample_annotation['translation'][:2]

        yaw = quaternion_yaw(Quaternion(sample_annotation['rotation']))

        yaw_corrected = correct_yaw(yaw)

        image_side_length = 2 * max(self.meters_ahead, self.meters_behind,
                                    self.meters_left, self.meters_right)
        image_side_length_pixels = int(image_side_length / self.resolution)

        patchbox = get_patchbox(x, y, image_side_length)

        angle_in_degrees = angle_of_rotation(yaw_corrected) * 180 / np.pi

        canvas_size = (image_side_length_pixels, image_side_length_pixels)

        masks = self.maps[map_name].get_map_mask(patchbox, angle_in_degrees, self.layer_names, canvas_size=canvas_size)

        images = []
        for mask, color in zip(masks, self.colors):
            images.append(change_color_of_binary_mask(np.repeat(mask[::-1, :, np.newaxis], 3, 2), color))

        lanes = draw_lanes_in_agent_frame(image_side_length_pixels, x, y, yaw, radius=50,
                                          image_resolution=self.resolution, discretization_resolution_meters=1,
                                          map_api=self.maps[map_name])

        images.append(lanes)

        image = self.combinator.combine(images)

        row_crop, col_crop = get_crops(self.meters_ahead, self.meters_behind, self.meters_left,
                                       self.meters_right, self.resolution,
                                       int(image_side_length / self.resolution))

        return image[row_crop, col_crop, :]

    def generate_mask(self, translation, rotation, sample_token: str):

        map_name = self.helper.get_map_name_from_sample_token(sample_token)

        # translation factors (ego frame)
        x, y = translation[:2]
        yaw = quaternion_yaw(Quaternion(rotation))
        yaw_corrected = correct_yaw(yaw)

        # 1. generate map masks
        image_side_length = 2 * max(self.meters_ahead, self.meters_behind, self.meters_left, self.meters_right)
        image_side_length_pixels = int(image_side_length / self.resolution)
        patchbox = get_patchbox(x, y, image_side_length)
        angle_in_degrees = angle_of_rotation(yaw_corrected) * 180 / np.pi
        canvas_size = (image_side_length_pixels, image_side_length_pixels)

        masks = self.maps[map_name].get_map_mask(patchbox, angle_in_degrees, self.layer_names, canvas_size=canvas_size)

        # 2. generate guided lanes
        agent_pixels = int(image_side_length_pixels / 2), int(image_side_length_pixels / 2)
        base_image = np.zeros((image_side_length_pixels, image_side_length_pixels, 3))

        meter_resolution = 0.5
        radius = 50.0
        lanes = get_lanes_in_radius(x, y, radius=radius, discretization_meters=meter_resolution, map_api=self.maps[map_name])
        image_with_lanes = draw_lanes_on_image(base_image, lanes, (x, y), yaw,
                                               agent_pixels, self.resolution, color_function=color_by_yaw)
        rotation_mat = get_rotation_matrix(image_with_lanes.shape, yaw)
        rotated_image = cv2.warpAffine(image_with_lanes, rotation_mat, image_with_lanes.shape[:2])
        rotated_image_lanes = rotated_image.astype("uint8")

        # 3. combine masks
        images = []
        for mask, color in zip(masks, self.colors):
            images.append(change_color_of_binary_mask(np.repeat(mask[::-1, :, np.newaxis], 3, 2), color))
        map_img = self.combinator.combine(images)
        images.append(rotated_image_lanes)
        map_img_with_lanes = self.combinator.combine(images)

        # crop
        row_crop, col_crop = get_crops(self.meters_ahead, self.meters_behind, self.meters_left,
                                       self.meters_right, self.resolution,
                                       int(image_side_length / self.resolution))

        return np.array(images)[:, row_crop, col_crop, :], lanes, map_img, map_img_with_lanes
Example #8
0
from nuscenes.prediction.input_representation.agents import AgentBoxesWithFadedHistory
from nuscenes.prediction.input_representation.combinators import Rasterizer
from nuscenes.prediction.input_representation.interface import InputRepresentation

from nuscenes import NuScenes

import matplotlib.pyplot as plt
import torch

DATAROOT = '/data/sets/nuscenes'

nuscenes = NuScenes('v1.0-mini', dataroot=DATAROOT)
# Data Splits for the Prediction Challenge

# input representation
static_layer_rasterizer = StaticLayerRasterizer(helper)
agent_rasterizer = AgentBoxesWithFadedHistory(helper, seconds_of_history)
mtp_input_representation = InputRepresentation(static_layer_rasterizer,
                                               agent_rasterizer, Rasterizer())

instance_token_img, sample_token_img = 'bc38961ca0ac4b14ab90e547ba79fbb6', '7626dde27d604ac28a0240bdd54eba7a'
anns = [
    ann for ann in nuscenes.sample_annotation
    if ann['instance_token'] == instance_token_img
]
img = mtp_input_representation.make_input_representation(
    instance_token_img, sample_token_img)
plt.imshow(img)

# Model Implementations
Example #9
0
 def draw_map(self, instance, sample, sec_forward, predictions=None):
     img_road = self.map_rasterizer.make_representation(instance, sample)
     img_agents = self.agent_rasterizer.make_representation(
         instance, sample, sec_forward=sec_forward, predictions=predictions)
     return Rasterizer().combine([img_road, img_agents])
    def __init__(self, sec_from_now: float, helper: PredictHelper):
        """
        Inits Baseline.
        :param sec_from_now: How many seconds into the future to make the prediction.
        :param helper: Instance of PredictHelper.
        """
        assert sec_from_now % 0.5 == 0, f"Parameter sec from now must be divisible by 0.5. Received {sec_from_now}."
        self.helper = helper
        self.sec_from_now = sec_from_now
        self.sampled_at = 2  # 2 Hz between annotations.

        backbone = ResNetBackbone('resnet50')
        self.mtp = MTP(backbone, num_modes=2)

        self.covernet = CoverNet(backbone, num_modes=64)    # Note that the value of num_modes depends on the size of the lattice used for CoverNet.

        static_layer_rasterizer = StaticLayerRasterizer(helper)
        agent_rasterizer = AgentBoxesWithFadedHistory(helper, seconds_of_history=1)
        self.mtp_input_representation = InputRepresentation(static_layer_rasterizer, agent_rasterizer, Rasterizer())

        self.trajectories = pickle.load(open(PATH_TO_EPSILON_8_SET, 'rb'))
        self.trajectories = torch.Tensor(self.trajectories)
def main(version: str, data_root: str,
         split_name: str, output_dir: str, config_name: str = 'predict_2020_icra.json') -> None:
    """
    Performs inference for all of the baseline models defined in the physics model module.
    :param version: nuScenes data set version.
    :param data_root: Directory where the NuScenes data is stored.
    :param split_name: nuScenes data split name, e.g. train, val, mini_train, etc.
    :param output_dir: Directory where predictions should be stored.
    :param config_name: Name of config file.
    """

    print('timing point A')
    nusc = NuScenes(version=version, dataroot=data_root)
    print('timing point B')
    helper = PredictHelper(nusc)
    print('timing point C')
    dataset = get_prediction_challenge_split(split_name, dataroot=data_root)
    print('timing point D')
    config = load_prediction_config(helper, config_name)
    print('timing point E')

    # rasterization
    static_layer_rasterizer = StaticLayerRasterizer(helper)
    agent_rasterizer = AgentBoxesWithFadedHistory(helper, seconds_of_history=3)
    mtp_input_representation = InputRepresentation(static_layer_rasterizer, agent_rasterizer, Rasterizer())

    # loop through training tasks
    for token in dataset[40:60:2]:
        fig, axes = plt.subplots(1, 3, figsize=(18, 9))
        print(token)
        instance_token, sample_token = token.split('_')

        plot_cam_view(axes[1], nusc, token)
        plot_cam_view(axes[2], nusc, token, cam_name='CAM_FRONT_RIGHT')
        axes[0].imshow(mtp_input_representation.make_input_representation(instance_token, sample_token))
    plt.show()
Example #12
0
class StaticLayerRasterizer(StaticLayerRepresentation):
    """
    Creates a representation of the static map layers where
    the map layers are given a color and rasterized onto a
    three channel image.
    """
    def __init__(
            self,
            helper: PredictHelper,
            layer_names: List[str] = None,
            colors: List[Color] = None,
            resolution: float = 0.1,  # meters / pixel
            meters_ahead: float = 40,
            meters_behind: float = 10,
            meters_left: float = 25,
            meters_right: float = 25):

        self.helper = helper
        self.maps = load_all_maps(helper)

        if not layer_names:
            layer_names = ['drivable_area', 'ped_crossing', 'walkway']
        self.layer_names = layer_names

        if not colors:
            colors = [(255, 255, 255), (119, 136, 153), (0, 0, 255)]
        self.colors = colors

        self.resolution = resolution
        self.meters_ahead = meters_ahead
        self.meters_behind = meters_behind
        self.meters_left = meters_left
        self.meters_right = meters_right
        self.combinator = Rasterizer()

    def make_representation(self,
                            instance_token: str = None,
                            sample_token: str = None,
                            ego=False,
                            ego_pose=None) -> np.ndarray:
        """
        Makes rasterized representation of static map layers.
        :param instance_token: Token for instance.
        :param sample_token: Token for sample.
        :return: Three channel image.
        """

        if not ego:
            sample_annotation = self.helper.get_sample_annotation(
                instance_token, sample_token)
        else:
            if ego_pose is None:
                sample_ = self.helper.data.get('sample', sample_token)
                sample_data = self.helper.data.get(
                    'sample_data', sample_['data']['CAM_FRONT'])
                ego_pose = self.helper.data.get('ego_pose',
                                                sample_data['ego_pose_token'])
            sample_annotation = {
                'translation': ego_pose['translation'],
                'rotation': ego_pose['rotation'],
                'instance_token': None
            }

        map_name = self.helper.get_map_name_from_sample_token(sample_token)

        x, y = sample_annotation['translation'][:2]

        yaw = quaternion_yaw(Quaternion(sample_annotation['rotation']))

        yaw_corrected = correct_yaw(yaw)

        image_side_length = 2 * max(self.meters_ahead, self.meters_behind,
                                    self.meters_left, self.meters_right)
        image_side_length_pixels = int(image_side_length / self.resolution)

        patchbox = get_patchbox(x, y, image_side_length)

        angle_in_degrees = angle_of_rotation(yaw_corrected) * 180 / np.pi

        canvas_size = (image_side_length_pixels, image_side_length_pixels)

        masks = self.maps[map_name].get_map_mask(patchbox,
                                                 angle_in_degrees,
                                                 self.layer_names,
                                                 canvas_size=canvas_size)

        images = []
        for mask, color in zip(masks, self.colors):
            images.append(
                change_color_of_binary_mask(
                    np.repeat(mask[::-1, :, np.newaxis], 3, 2), color))

        lanes = draw_lanes_in_agent_frame(image_side_length_pixels,
                                          x,
                                          y,
                                          yaw,
                                          radius=50,
                                          image_resolution=self.resolution,
                                          discretization_resolution_meters=1,
                                          map_api=self.maps[map_name])

        images.append(lanes)

        image = self.combinator.combine(images)

        row_crop, col_crop = get_crops(
            self.meters_ahead, self.meters_behind, self.meters_left,
            self.meters_right, self.resolution,
            int(image_side_length / self.resolution))

        return image[row_crop, col_crop, :]
Example #13
0
    def __init__(self,
                 dataroot: str,
                 split: str,
                 t_h: float = 2,
                 t_f: float = 6,
                 grid_dim: int = 25,
                 img_size: int = 200,
                 horizon: int = 40,
                 grid_extent: Tuple[int, int, int, int] = (-25, 25, -10, 40),
                 num_actions: int = 4,
                 image_extraction_mode: bool = False):
        """
        Initializes dataset class for nuScenes prediction

        :param dataroot: Path to tables and data
        :param split: Dataset split for prediction benchmark ('train'/'train_val'/'val')
        :param t_h: Track history in seconds
        :param t_f: Prediction horizon in seconds
        :param grid_dim: Size of grid, default: 25x25
        :param img_size: Size of raster map image in pixels, default: 200x200
        :param horizon: MDP horizon
        :param grid_extent: Map extents in meters, (-left, right, -behind, front)
        :param num_actions: Number of actions for each state (4: [D,R,U,L] or 8: [D, R, U, L, DR, UR, DL, UL])
        :param image_extraction_mode: Whether dataset class is being used for image extraction
        """

        # Nuscenes dataset and predict helper
        self.dataroot = dataroot
        self.ns = NuScenes('v1.0-trainval', dataroot=dataroot)
        self.helper = PredictHelper(self.ns)
        self.token_list = get_prediction_challenge_split(split,
                                                         dataroot=dataroot)

        # Useful parameters
        self.grid_dim = grid_dim
        self.grid_extent = grid_extent
        self.img_size = img_size
        self.t_f = t_f
        self.t_h = t_h
        self.horizon = horizon
        self.num_actions = num_actions

        # Map row, column and velocity states to actual values
        grid_size_m = self.grid_extent[1] - self.grid_extent[0]
        self.row_centers = np.linspace(
            self.grid_extent[3] - grid_size_m / (self.grid_dim * 2),
            self.grid_extent[2] + grid_size_m / (self.grid_dim * 2),
            self.grid_dim)

        self.col_centers = np.linspace(
            self.grid_extent[0] + grid_size_m / (self.grid_dim * 2),
            self.grid_extent[1] - grid_size_m / (self.grid_dim * 2),
            self.grid_dim)

        # Surrounding agent input representation: populate grid with velocity, acc, yaw-rate
        self.agent_ip = AgentMotionStatesOnGrid(self.helper,
                                                resolution=grid_size_m /
                                                img_size,
                                                meters_ahead=grid_extent[3],
                                                meters_behind=-grid_extent[2],
                                                meters_left=-grid_extent[0],
                                                meters_right=grid_extent[1])

        # Image extraction mode is used for extracting map images offline prior to training
        self.image_extraction_mode = image_extraction_mode
        if self.image_extraction_mode:

            # Raster map representation
            self.map_ip = StaticLayerRasterizer(self.helper,
                                                resolution=grid_size_m /
                                                img_size,
                                                meters_ahead=grid_extent[3],
                                                meters_behind=-grid_extent[2],
                                                meters_left=-grid_extent[0],
                                                meters_right=grid_extent[1])

            # Raster map with agent boxes. Only used for visualization
            static_layer_rasterizer = StaticLayerRasterizer(
                self.helper,
                resolution=grid_size_m / img_size,
                meters_ahead=grid_extent[3],
                meters_behind=-grid_extent[2],
                meters_left=-grid_extent[0],
                meters_right=grid_extent[1])

            agent_rasterizer = AgentBoxesWithFadedHistory(
                self.helper,
                seconds_of_history=1,
                resolution=grid_size_m / img_size,
                meters_ahead=grid_extent[3],
                meters_behind=-grid_extent[2],
                meters_left=-grid_extent[0],
                meters_right=grid_extent[1])

            self.map_ip_agents = InputRepresentation(static_layer_rasterizer,
                                                     agent_rasterizer,
                                                     Rasterizer())
    def __getitem__(self, test_idx):

        #get the scene
        scene = self.trainset[test_idx]

        #get all the tokens in the scene
        #List of scene tokens in the given scene where each item comprises of an instance token and a sample token seperated by underscore
        scene_tokens = self.prediction_scenes[scene]

        #Return if fewer than 2 tokens in this scene
        if len(scene_tokens) < 2:
            print("Not enough agents in the scene")
            return []

        #get the tokens in the scene: we will be using the instance tokens as that is the agent in the scene
        tokens = [scene_tok.split("_") for scene_tok in scene_tokens]

        #List of instance tokens and sample tokens
        instance_tokens, sample_tokens = list(list(zip(*tokens))[0]), list(
            list(zip(*tokens))[1])

        assert len(instance_tokens) == len(
            sample_tokens), "Instance and Sample tokens count does not match"
        '''
        1. Convert list of sample and instance tokens into an ordered dict where sample tokens are the keys
        2. Iterate over all combinations (of length TRAJECOTRY_TIME_INTERVAL) of consecutive samples 
        3. Form a list of data points where each data point has TRAJECOTRY_TIME_INTERVAL sample tokens where 
            each sample token has data for all instance tokens identified in step 2
        4. Create 3 numy arrays each for coordinates, heading_change_rate and map with appropriate shapes
        5. Iterate: per sample per instance and fill in numpy arrays with respective data
        6. Form a dict containing the 3 numpyarrays and return it
        '''

        ordered_tokens = OrderedDict(zip(sample_tokens, instance_tokens))

        print("Printing Ordered_tokens: ", ordered_tokens)
        return []

        #Dictionary containing count for number of samples per token
        token_count = Counter(instance_tokens)

        #used to find n agents with highest number of sample_tokens
        minCount = sorted(list(token_count.values()),
                          reverse=True)[NUM_AGENTS - 1]

        #Convert isntance and sample tokens to dict format
        instance_sample_tokens = {}
        for instance_token, sample_token in zip(instance_tokens,
                                                sample_tokens):
            if token_count[instance_token] >= minCount:
                try:
                    instance_sample_tokens[instance_token].append(sample_token)
                except:
                    instance_sample_tokens[instance_token] = [sample_token]

#         print("Instance:samples ===============================================================================")
#         print(instance_sample_tokens)

        if len(list(instance_sample_tokens.keys())) != NUM_AGENTS:
            print()
#             print("Instance_sample_tokens: \n", instance_sample_tokens)
        '''
        Format: 
        {coordinates: [[coord_at_t0, coord_at_t1, coord_at_t2, ..., coord_at_tTAJECTORY_TIME_INTERVAL],...numDatapointsInScene ], 
         heading_change_rate; [[h_at_t0, h_at_t1, h_at_t2, ..., h_at_tTAJECTORY_TIME_INTERVAL], ...numDatapointaInScene] 
        }
        '''

        #Initialize map rasterizers
        static_layer_rasterizer = StaticLayerRasterizer(self.helper)
        agent_rasterizer = AgentBoxesWithFadedHistory(self.helper,
                                                      seconds_of_history=2.5)
        mtp_input_representation = InputRepresentation(static_layer_rasterizer,
                                                       agent_rasterizer,
                                                       Rasterizer())

        #Initialize Output data
        output_data = {
            "coordinates":
            np.zeros((len(instance_sample_tokens.keys()), 1)),
            "heading_change_rate":
            np.zeros((len(instance_sample_tokens.keys()), 1)),
            "map": [0] * len(instance_sample_tokens.keys())
        }

        for t, instance_token in enumerate(instance_sample_tokens.keys()):

            instance_coordinates = np.zeros((int(
                len(instance_sample_tokens[instance_token]) /
                TRAJECTORY_TIME_INTERVAL), TRAJECTORY_TIME_INTERVAL, 3))
            instance_heading_change_rate = np.zeros((int(
                len(instance_sample_tokens[instance_token]) /
                TRAJECTORY_TIME_INTERVAL), TRAJECTORY_TIME_INTERVAL))

            print("Shape of instance_coordinates: ",
                  instance_coordinates.shape)
            idx = 0  #0 --> numData points for this instance (dimension 1)
            num = 0  #0 --> TRAJECTORY_TIME_INTERVAL (dimension 2)
            for sample_token in (instance_sample_tokens[instance_token]):
                #                 print(idx, "     ", num)
                #                 print(self.nusc.get('sample', sample_token)["timestamp"])

                #how to get the annotation for the instance in the sample
                annotation = self.helper.get_sample_annotation(
                    instance_token, sample_token)
                instance_coordinates[idx][num] = annotation["translation"]

                #get the heading change rate of the agent
                heading_change_rate = self.helper.get_heading_change_rate_for_agent(
                    instance_token, sample_token)
                instance_heading_change_rate[idx][num] = heading_change_rate

                num = num + 1

                #reached the number of records per sample
                if num == TRAJECTORY_TIME_INTERVAL:
                    idx = idx + 1
                    num = 0

                if idx == instance_coordinates.shape[0]:
                    break

                img = mtp_input_representation.make_input_representation(
                    instance_token, sample_token)
#                 cv2.imshow("map",img)

            output_data["map"][t] = (img)
            #             plt.imsave('test'+str(test_idx)+str(t)+'.jpg',img)
            output_data["coordinates"][t] = instance_coordinates
            output_data["heading_change_rate"][
                t] = instance_heading_change_rate

#         test = pd.DataFrame(output_data,columns=["coordinates", "heading_change_rate", "map"])
#         test.to_csv('test'+str(test_idx)+'.csv')

        print("Printing Output data")
        print((output_data["coordinates"]))
        print(len(output_data["heading_change_rate"]))
        print(len(output_data["coordinates"]))

        return output_data