コード例 #1
0
ファイル: data_generator.py プロジェクト: b-safwat/MHA-JAM
    def run(self, out_dir):
        if self.dataset_version.find("mini") != -1:
            train_agents = get_prediction_challenge_split(
                "mini_train", dataroot=self.DATAROOT)
            val_agents = get_prediction_challenge_split("mini_val",
                                                        dataroot=self.DATAROOT)
        else:
            train_agents = get_prediction_challenge_split(
                "train", dataroot=self.DATAROOT)
            train_agents.extend(
                get_prediction_challenge_split("train_val",
                                               dataroot=self.DATAROOT))
            val_agents = get_prediction_challenge_split("val",
                                                        dataroot=self.DATAROOT)

        ## Statistics
        # mx =-1
        # for  in train_agents:
        #     instance_token, sample_token = current_sample.split("_")
        #     past_samples_local = self.helper.get_past_for_agent(instance_token, sample_token, 100, True, True)[::-1]
        #     if len(past_samples_local) > mx:
        #         mx = len(past_samples_local)
        # print("max length of the past sequences for trainval is:",mx)
        # for instance_token, sample_token in train_agents:
        #     past_samples_local = self.helper.get_past_for_agent(instance_token, sample_token, 100, True, True)[::-1]
        #     if len(past_samples_local) > mx:
        #         mx = len(past_samples_local)
        # print("max length of the past sequence for val is:",mx)
        # return

        self.get_format_mha_jam(
            train_agents,
            os.path.join(out_dir,
                         "states_train_" + self.dataset_version + ".txt"))
        self.get_format_mha_jam_context(
            os.path.join(out_dir,
                         "states_train_" + self.dataset_version + ".txt"),
            os.path.join(out_dir, "context_train_" + self.dataset_version,
                         "context_train_.txt"))
        self.get_format_mha_jam_maps(
            os.path.join(out_dir,
                         "states_train_" + self.dataset_version + ".txt"),
            os.path.join(out_dir, "maps_train_" + self.dataset_version,
                         "maps_train_.jpg"))
        # 25
        self.get_format_mha_jam(
            val_agents,
            os.path.join(out_dir,
                         "states_val_" + self.dataset_version + ".txt"))
        self.get_format_mha_jam_context(
            os.path.join(out_dir,
                         "states_val_" + self.dataset_version + ".txt"),
            os.path.join(out_dir, "context_val_" + self.dataset_version,
                         "context_val_.txt"))
        self.get_format_mha_jam_maps(
            os.path.join(out_dir,
                         "states_val_" + self.dataset_version + ".txt"),
            os.path.join(out_dir, "maps_val_" + self.dataset_version,
                         "maps_val_.jpg"))
コード例 #2
0
def main(args):
    print("Args:")
    print(vars(args))

    print("Device:")
    print(device)

    # prepare output directories
    if not os.path.exists(args.experiment_dir):
        os.mkdir(args.experiment_dir)

    if not os.path.exists(os.path.join(args.experiment_dir, 'weights')):
        os.mkdir(os.path.join(args.experiment_dir, 'weights'))

    # store the arguments for reference
    config_fname = f'config_for_runtime_{RUN_TIME:%Y-%m-%d %Hh%Mm%Ss}.json'
    with open(os.path.join(args.experiment_dir, config_fname),
              'w') as json_file:
        json.dump(vars(args), json_file)

    # load data
    nusc = NuScenes(version=args.version, dataroot=args.data_root)
    helper = PredictHelper(nusc)
    train_tokens = get_prediction_challenge_split(args.train_split_name,
                                                  dataroot=args.data_root)
    val_tokens = get_prediction_challenge_split(args.val_split_name,
                                                dataroot=args.data_root)

    # apply downsampling
    train_tokens = np.random.choice(
        train_tokens,
        int(len(train_tokens) / args.train_downsample_factor),
        replace=False)
    val_tokens = np.random.choice(
        val_tokens,
        int(len(val_tokens) / args.val_downsample_factor),
        replace=False)

    # create data loaders
    train_dataset = get_dataset(train_tokens, helper, args)
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True)

    val_dataset = get_dataset(val_tokens, helper, args)
    val_dataloader = DataLoader(val_dataset,
                                batch_size=args.batch_size,
                                num_workers=args.num_workers,
                                shuffle=False)

    # run training
    train_epochs(train_dataloader=train_dataloader,
                 val_dataloader=val_dataloader,
                 args=args)
コード例 #3
0
def main(version: str,
         data_root: str,
         split_name: str,
         output_dir: str,
         config_name: str = 'predict_2020_icra.json') -> None:
    """
    Performs inference for all of the baseline models defined in the physics model module.
    :param version: nuScenes data set version.
    :param data_root: Directory where the NuScenes data is stored.
    :param split_name: nuScenes data split name, e.g. train, val, mini_train, etc.
    :param output_dir: Directory where predictions should be stored.
    :param config_name: Name of config file.
    """

    nusc = NuScenes(version=version, dataroot=data_root)
    helper = PredictHelper(nusc)
    dataset = get_prediction_challenge_split(split_name)
    config = load_prediction_config(helper, config_name)
    oracle = PhysicsOracle(config.seconds, helper)
    cv_heading = ConstantVelocityHeading(config.seconds, helper)

    cv_preds = []
    oracle_preds = []
    for token in dataset:
        cv_preds.append(cv_heading(token).serialize())
        oracle_preds.append(oracle(token).serialize())

    json.dump(cv_preds, open(os.path.join(output_dir, "cv_preds.json"), "w"))
    json.dump(oracle_preds,
              open(os.path.join(output_dir, "oracle_preds.json"), "w"))
コード例 #4
0
    def __init__(self):
        DATAROOT = '/home/patrick/datasets/nuscenes'  # This is the path where you stored your copy of the nuScenes dataset.
        self.nuscenes = NuScenes('v1.0-mini', dataroot=DATAROOT)
        self.mini_train = get_prediction_challenge_split("mini_train",
                                                         dataroot=DATAROOT)

        self.helper = PredictHelper(self.nuscenes)
        self.physics_oracle = PhysicsOracle(sec_from_now=6, helper=self.helper)

        self.map_rasterizer = StaticLayerRasterizer(self.helper,
                                                    meters_ahead=60,
                                                    meters_behind=10,
                                                    meters_left=35,
                                                    meters_right=35)
        self.agent_rasterizer = FutureAgentBoxesWithFadedHistory(
            self.helper,
            meters_ahead=60,
            meters_behind=10,
            meters_left=35,
            meters_right=35)

        self.json_path = 'manual_results.json'
        self.annotations = []
        if os.path.exists(self.json_path):
            with open(self.json_path) as json_file:
                self.annotations = json.load(
                    json_file)  # Load existing JSON file
コード例 #5
0
def main(args):
    print("Running with args:")
    print(vars(args))

    print("Device:")
    print(device)

    # load data
    nusc = NuScenes(version=args.version, dataroot=args.data_root)
    helper = PredictHelper(nusc)
    data_tokens = get_prediction_challenge_split(args.split_name, dataroot=args.data_root)

    if args.key == "covernet":
        dataset = CoverNetDataset(data_tokens, helper)
    elif args.key == "mtp":
        dataset = MTPDataset(data_tokens, helper)
    dataloader = DataLoader(dataset, batch_size=16, num_workers=0, shuffle=False)
    print(f"Loaded split {args.split_name}, length {len(dataset)}, in {len(dataloader)} batches.")

    # prepare model
    model = get_model(args)
    model.load_state_dict(
        torch.load(os.path.join(args.experiment_dir, 'weights', args.weights)))

    model.eval()

    predictions = get_predictions(args, dataloader, model)
    json.dump(predictions,
              open(os.path.join(args.experiment_dir, f'{args.key}_preds_{datetime.datetime.now():%Y-%m-%d %Hh%Mm%Ss}_{args.suffix}.json'), "w"))
コード例 #6
0
ファイル: info.py プロジェクト: b-safwat/nuscenes-devkit
def what_are_the_objects_types_used_in_prediction_tasks(
        DATAROOT='./data/sets/nuscenes', dataset_version='v1.0-mini'):
    nuscenes = NuScenes(dataset_version, DATAROOT)
    train_agents = get_prediction_challenge_split("train", dataroot=DATAROOT)
    val_agents = get_prediction_challenge_split("val", dataroot=DATAROOT)
    #
    # agents = get_prediction_challenge_split("train", dataroot=DATAROOT)
    agents = np.concatenate((train_agents, val_agents), axis=0)
    categories = {}
    token_to_name = {}

    for current_sample in agents:
        instance_token, sample_token = current_sample.split("_")
        category_token = nuscenes.get("instance",
                                      instance_token)["category_token"]
        category_name = nuscenes.get("category", category_token)["name"]
        categories[category_name] = category_token
        token_to_name[category_token] = category_name

    print(categories.items())
    print(token_to_name.items())
コード例 #7
0
    def run(self, format_for_model):
        if self.dataset_version.find("mini") != -1:
            train_agents = get_prediction_challenge_split(
                "mini_train", dataroot=self.DATAROOT)
            val_agents = get_prediction_challenge_split("mini_val",
                                                        dataroot=self.DATAROOT)
        else:
            train_agents = get_prediction_challenge_split(
                "train", dataroot=self.DATAROOT)
            train_agents.extend(
                get_prediction_challenge_split("train_val",
                                               dataroot=self.DATAROOT))
            val_agents = get_prediction_challenge_split("val",
                                                        dataroot=self.DATAROOT)

        self.get_new_format(
            train_agents, format_for_model,
            "/home/bassel/PycharmProjects/Trajectory-Transformer/datasets/nuscenes/bkup/transformer_train_"
            + self.dataset_version + ".txt")
        self.get_new_format(
            val_agents, format_for_model,
            "/home/bassel/PycharmProjects/Trajectory-Transformer/datasets/nuscenes/bkup/transformer_val_"
            + self.dataset_version + ".txt")
コード例 #8
0
    def save_cmu_dataset(self, save_dir, partition='all'):
        from nuscenes.eval.prediction.splits import get_prediction_challenge_split

        split_types = ['mini_train', 'mini_val', 'train', 'train_val', 'val']
        if partition == 'mini':
            split_types = ['mini_train', 'mini_val']

        if not os.path.isdir(save_dir):
            os.mkdir(save_dir)

        for split in tqdm(split_types, desc='split dataset'):
            partition_tokens = get_prediction_challenge_split(
                split, dataroot=self.root)
            tokens_dict = {}
            for token in partition_tokens:
                instance_token, sample_token = token.split('_')
                try:
                    tokens_dict[sample_token].append(instance_token)
                except KeyError:
                    tokens_dict[sample_token] = [instance_token]

            with open('{}/{}.tokens'.format(save_dir, split), 'wb') as f:
                pickle.dump(tokens_dict, f, pickle.HIGHEST_PROTOCOL)

            for sample_tk, instance_tks in tqdm(tokens_dict.items(),
                                                desc=split,
                                                total=len(tokens_dict)):
                sample_dir = os.path.join(save_dir, sample_tk)
                if not os.path.isdir(sample_dir):
                    os.mkdir(sample_dir)
                    scene_data = self.get_cmu_annotation(
                        instance_tks, sample_tk)
                    with open('{}/map.bin'.format(sample_dir), 'wb') as f:
                        pickle.dump(scene_data['episode_img'], f,
                                    pickle.HIGHEST_PROTOCOL)
                    with open('{}/viz.bin'.format(sample_dir), 'wb') as f:
                        pickle.dump(scene_data['img_show'], f,
                                    pickle.HIGHEST_PROTOCOL)
                    with open('{}/episode.bin'.format(sample_dir), 'wb') as f:
                        pickle.dump(scene_data['episode'], f,
                                    pickle.HIGHEST_PROTOCOL)
                    with open('{}/instance_tks.bin'.format(sample_dir),
                              'wb') as f:
                        pickle.dump(instance_tks, f, pickle.HIGHEST_PROTOCOL)
                    with open('{}/scene_txt.bin'.format(sample_dir),
                              'wb') as f:
                        pickle.dump(scene_data['scene_txt'], f,
                                    pickle.HIGHEST_PROTOCOL)
    def __init__(self,
                 nusc,
                 helper,
                 maps_dir,
                 save_maps_dataset=False,
                 config_name='predict_2020_icra.json',
                 history=1,
                 num_examples=None,
                 in_agent_frame=True):

        self.nusc = nusc
        self.helper = helper

        #initialize the data set
        if maps_dir == 'maps_train':
            dataset_version = "train"
        elif maps_dir == 'maps':
            dataset_version = "train_val"
        elif maps_dir == 'maps_val':
            dataset_version = "val"

        #initialize maps directory where everything will be saved
        self.maps_dir = os.path.join(os.getcwd(), maps_dir)
        self.data_set = get_prediction_challenge_split(
            dataset_version, dataroot=self.nusc.dataroot)

        if num_examples:
            self.data_set = self.data_set[:num_examples]

        #initialize rasterizers for map generation
        self.static_layer_rasterizer = StaticLayerRasterizer(self.helper)
        self.agent_rasterizer = AgentBoxesWithFadedHistory(
            self.helper, seconds_of_history=history)
        self.mtp_input_representation = InputRepresentation(
            self.static_layer_rasterizer, self.agent_rasterizer, Rasterizer())

        self.in_agent_frame = in_agent_frame

        self.config = load_prediction_config(self.helper, config_name)

        self.save_maps_dataset = save_maps_dataset

        if self.save_maps_dataset:
            self.save_maps()

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
コード例 #10
0
def main(version: str, data_root: str, split_name: str, output_dir: str, submission_name: str, config_name: str) \
        -> None:
    """
    Makes predictions for a submission to the nuScenes prediction challenge.
    :param version: NuScenes version.
    :param data_root: Directory storing NuScenes data.
    :param split_name: Data split to run inference on.
    :param output_dir: Directory to store the output file.
    :param submission_name: Name of the submission to use for the results file.
    :param config_name: Name of config file to use.
    """
    nusc = NuScenes(version=version, dataroot=data_root)
    helper = PredictHelper(nusc)
    dataset = get_prediction_challenge_split(split_name)
    config = load_prediction_config(helper, config_name)

    predictions = do_inference_for_submission(helper, config, dataset)
    predictions = [prediction.serialize() for prediction in predictions]
    json.dump(predictions, open(os.path.join(output_dir, f"{submission_name}_inference.json"), "w"))
コード例 #11
0
def main(version: str, data_root: str,
         split_name: str, output_dir: str, config_name: str = 'predict_2020_icra.json') -> None:
    """
    Performs inference for all of the baseline models defined in the physics model module.
    :param version: nuScenes data set version.
    :param data_root: Directory where the NuScenes data is stored.
    :param split_name: nuScenes data split name, e.g. train, val, mini_train, etc.
    :param output_dir: Directory where predictions should be stored.
    :param config_name: Name of config file.
    """

    print('timing point A')
    nusc = NuScenes(version=version, dataroot=data_root)
    print('timing point B')
    helper = PredictHelper(nusc)
    print('timing point C')
    dataset = get_prediction_challenge_split(split_name, dataroot=data_root)
    print('timing point D')
    config = load_prediction_config(helper, config_name)
    print('timing point E')

    # rasterization
    static_layer_rasterizer = StaticLayerRasterizer(helper)
    agent_rasterizer = AgentBoxesWithFadedHistory(helper, seconds_of_history=3)
    mtp_input_representation = InputRepresentation(static_layer_rasterizer, agent_rasterizer, Rasterizer())

    # loop through training tasks
    for token in dataset[40:60:2]:
        fig, axes = plt.subplots(1, 3, figsize=(18, 9))
        print(token)
        instance_token, sample_token = token.split('_')

        plot_cam_view(axes[1], nusc, token)
        plot_cam_view(axes[2], nusc, token, cam_name='CAM_FRONT_RIGHT')
        axes[0].imshow(mtp_input_representation.make_input_representation(instance_token, sample_token))
    plt.show()
コード例 #12
0
ファイル: process_data.py プロジェクト: EcustBoy/MATS
def process_data(data_path, version, output_path, half_dt, dynamic):
    nusc = NuScenes(version=version, dataroot=data_path, verbose=True)
    helper = PredictHelper(nusc)
    for data_class in [
            'val', 'train_val', 'train'
    ]:  # Change this to 'mini_train', 'mini_val' if you want to do the mini set.
        env = Environment(node_type_list=['VEHICLE', 'PEDESTRIAN'],
                          standardization=standardization)
        attention_radius = dict()
        attention_radius[(env.NodeType.PEDESTRIAN,
                          env.NodeType.PEDESTRIAN)] = 10.0
        attention_radius[(env.NodeType.PEDESTRIAN,
                          env.NodeType.VEHICLE)] = 20.0
        attention_radius[(env.NodeType.VEHICLE,
                          env.NodeType.PEDESTRIAN)] = 20.0
        attention_radius[(env.NodeType.VEHICLE, env.NodeType.VEHICLE)] = 30.0

        env.attention_radius = attention_radius
        env.robot_type = env.NodeType.VEHICLE
        scenes = []

        instance_sample_tokens = get_prediction_challenge_split(
            data_class, dataroot=data_path)
        processed_sample_tokens = set()
        for instance_sample_token in tqdm(instance_sample_tokens):
            _, sample_token = instance_sample_token.split("_")
            if sample_token in processed_sample_tokens:  # and data_class != 'val':
                continue
            scene = process_scene(sample_token, processed_sample_tokens, env,
                                  nusc, helper, data_path, data_class, half_dt,
                                  dynamic)
            if scene is not None:
                if 'train' == data_class:  # Change this to "in" if you want to do the mini set.
                    scene.augmented = list()
                    angles = np.arange(0, 360, 15)
                    for angle in angles:
                        scene.augmented.append(augment_scene(scene, angle))
                scenes.append(scene)

        print(f'Processed {len(scenes)} scenes')

        env.scenes = scenes

        doubled_str = '_doubled' if half_dt else ''
        dynamic_str = '_dynamic' if dynamic else ''

        if len(scenes) > 0:
            data_dict_path = os.path.join(
                output_path,
                f'nuScenes_{data_class}_full{doubled_str}{dynamic_str}.pkl')
            with open(data_dict_path, 'wb') as f:
                dill.dump(env, f, protocol=dill.HIGHEST_PROTOCOL)
            print('Saved Environment!')

        global total
        global curv_0_2
        global curv_0_1
        print(f"Total Nodes: {total}")
        print(f"Curvature > 0.1 Nodes: {curv_0_1}")
        print(f"Curvature > 0.2 Nodes: {curv_0_2}")
        total = 0
        curv_0_1 = 0
        curv_0_2 = 0
コード例 #13
0
    def __init__(self,
                 dataroot: str,
                 split: str,
                 t_h: float = 2,
                 t_f: float = 6,
                 grid_dim: int = 25,
                 img_size: int = 200,
                 horizon: int = 40,
                 grid_extent: Tuple[int, int, int, int] = (-25, 25, -10, 40),
                 num_actions: int = 4,
                 image_extraction_mode: bool = False):
        """
        Initializes dataset class for nuScenes prediction

        :param dataroot: Path to tables and data
        :param split: Dataset split for prediction benchmark ('train'/'train_val'/'val')
        :param t_h: Track history in seconds
        :param t_f: Prediction horizon in seconds
        :param grid_dim: Size of grid, default: 25x25
        :param img_size: Size of raster map image in pixels, default: 200x200
        :param horizon: MDP horizon
        :param grid_extent: Map extents in meters, (-left, right, -behind, front)
        :param num_actions: Number of actions for each state (4: [D,R,U,L] or 8: [D, R, U, L, DR, UR, DL, UL])
        :param image_extraction_mode: Whether dataset class is being used for image extraction
        """

        # Nuscenes dataset and predict helper
        self.dataroot = dataroot
        self.ns = NuScenes('v1.0-trainval', dataroot=dataroot)
        self.helper = PredictHelper(self.ns)
        self.token_list = get_prediction_challenge_split(split,
                                                         dataroot=dataroot)

        # Useful parameters
        self.grid_dim = grid_dim
        self.grid_extent = grid_extent
        self.img_size = img_size
        self.t_f = t_f
        self.t_h = t_h
        self.horizon = horizon
        self.num_actions = num_actions

        # Map row, column and velocity states to actual values
        grid_size_m = self.grid_extent[1] - self.grid_extent[0]
        self.row_centers = np.linspace(
            self.grid_extent[3] - grid_size_m / (self.grid_dim * 2),
            self.grid_extent[2] + grid_size_m / (self.grid_dim * 2),
            self.grid_dim)

        self.col_centers = np.linspace(
            self.grid_extent[0] + grid_size_m / (self.grid_dim * 2),
            self.grid_extent[1] - grid_size_m / (self.grid_dim * 2),
            self.grid_dim)

        # Surrounding agent input representation: populate grid with velocity, acc, yaw-rate
        self.agent_ip = AgentMotionStatesOnGrid(self.helper,
                                                resolution=grid_size_m /
                                                img_size,
                                                meters_ahead=grid_extent[3],
                                                meters_behind=-grid_extent[2],
                                                meters_left=-grid_extent[0],
                                                meters_right=grid_extent[1])

        # Image extraction mode is used for extracting map images offline prior to training
        self.image_extraction_mode = image_extraction_mode
        if self.image_extraction_mode:

            # Raster map representation
            self.map_ip = StaticLayerRasterizer(self.helper,
                                                resolution=grid_size_m /
                                                img_size,
                                                meters_ahead=grid_extent[3],
                                                meters_behind=-grid_extent[2],
                                                meters_left=-grid_extent[0],
                                                meters_right=grid_extent[1])

            # Raster map with agent boxes. Only used for visualization
            static_layer_rasterizer = StaticLayerRasterizer(
                self.helper,
                resolution=grid_size_m / img_size,
                meters_ahead=grid_extent[3],
                meters_behind=-grid_extent[2],
                meters_left=-grid_extent[0],
                meters_right=grid_extent[1])

            agent_rasterizer = AgentBoxesWithFadedHistory(
                self.helper,
                seconds_of_history=1,
                resolution=grid_size_m / img_size,
                meters_ahead=grid_extent[3],
                meters_behind=-grid_extent[2],
                meters_left=-grid_extent[0],
                meters_right=grid_extent[1])

            self.map_ip_agents = InputRepresentation(static_layer_rasterizer,
                                                     agent_rasterizer,
                                                     Rasterizer())
コード例 #14
0
parser.add_argument('--dataroot', required=True, type=str)
parser.add_argument('--split', required=True, type=str)

args = parser.parse_args('\
    --dataroot /home/vilab/data/Nuscenes/v1.0-trainval_meta \
    --split train_val'.split())
# args = parser.parse_args()

DATAPATH = os.path.join('./dataset', args.split)
dpathlist = ['image', 'state', 'traj']

nuscenes = NuScenes('v1.0-trainval', dataroot=args.dataroot)

FUTURE_SEC = 6

inst_samp_pair_list = get_prediction_challenge_split(args.split,
                                                     dataroot=args.dataroot)
print(f"size of data : {len(inst_samp_pair_list):d}")
helper = PredictHelper(nuscenes)

dpathlist_ = []
for dlist in dpathlist:
    dpath = os.path.join(DATAPATH, dlist)
    dpathlist_.append(dpath)
    if not os.path.exists(dpath):
        os.makedirs(dpath)

static_layer_rasterizer = StaticLayerRasterizer(helper)
agent_rasterizer = AgentBoxesWithFadedHistory(helper, seconds_of_history=1)
mtp_input_representation = InputRepresentation(static_layer_rasterizer,
                                               agent_rasterizer, Rasterizer())