Пример #1
0
def main(version: str,
         data_root: str,
         split_name: str,
         output_dir: str,
         config_name: str = 'predict_2020_icra.json') -> None:
    """
    Performs inference for all of the baseline models defined in the physics model module.
    :param version: nuScenes data set version.
    :param data_root: Directory where the NuScenes data is stored.
    :param split_name: nuScenes data split name, e.g. train, val, mini_train, etc.
    :param output_dir: Directory where predictions should be stored.
    :param config_name: Name of config file.
    """

    nusc = NuScenes(version=version, dataroot=data_root)
    helper = PredictHelper(nusc)
    dataset = get_prediction_challenge_split(split_name)
    config = load_prediction_config(helper, config_name)
    oracle = PhysicsOracle(config.seconds, helper)
    cv_heading = ConstantVelocityHeading(config.seconds, helper)

    cv_preds = []
    oracle_preds = []
    for token in dataset:
        cv_preds.append(cv_heading(token).serialize())
        oracle_preds.append(oracle(token).serialize())

    json.dump(cv_preds, open(os.path.join(output_dir, "cv_preds.json"), "w"))
    json.dump(oracle_preds,
              open(os.path.join(output_dir, "oracle_preds.json"), "w"))
Пример #2
0
    def compute_metrics(self,
                        config_file='predict_2020_icra.json',
                        result_file='metrics.json',
                        agent_frame=True):

        config = load_prediction_config(self.helper, config_file)
        predictions = json.load(
            open(os.path.join(self.output_dir, 'mtp_preds.json'), "r"))
        n_preds = len(predictions)
        result_output_path = os.path.join(self.output_dir, result_file)
        containers = {
            metric.name: np.zeros((n_preds, metric.shape))
            for metric in config.metrics
        }
        for i, prediction_str in enumerate(predictions):
            prediction = Prediction.deserialize(prediction_str)
            ground_truth = exp.helper.get_future_for_agent(
                prediction.instance,
                prediction.sample,
                config.seconds,
                in_agent_frame=agent_frame)
            for metric in config.metrics:
                containers[metric.name][i] = metric(ground_truth, prediction)
        aggregations: Dict[str, Dict[str, List[float]]] = defaultdict(dict)
        for metric in config.metrics:
            for agg in metric.aggregators:
                aggregations[metric.name][agg.name] = agg(
                    containers[metric.name])

        print(aggregations)
        json.dump(aggregations, open(result_output_path, "w"), indent=2)
Пример #3
0
def main(version: str, data_root: str, submission_path: str,
         config_name: str = 'predict_2020_icra.json') -> None:
    """
    Computes metrics for a submission stored in submission_path with a given submission_name with the metrics
    specified by the config_name.
    :param version: nuScenes data set version.
    :param data_root: Directory storing NuScenes data.
    :param submission_path: Directory storing submission.
    :param config_name: Name of config file.
    """
    predictions = json.load(open(submission_path, "r"))
    nusc = NuScenes(version=version, dataroot=data_root)
    helper = PredictHelper(nusc)
    config = load_prediction_config(helper, config_name)
    results = compute_metrics(predictions, helper, config)
    json.dump(results, open(submission_path.replace('.json', '_metrics.json'), "w"), indent=2)
    def __init__(self,
                 nusc,
                 helper,
                 maps_dir,
                 save_maps_dataset=False,
                 config_name='predict_2020_icra.json',
                 history=1,
                 num_examples=None,
                 in_agent_frame=True):

        self.nusc = nusc
        self.helper = helper

        #initialize the data set
        if maps_dir == 'maps_train':
            dataset_version = "train"
        elif maps_dir == 'maps':
            dataset_version = "train_val"
        elif maps_dir == 'maps_val':
            dataset_version = "val"

        #initialize maps directory where everything will be saved
        self.maps_dir = os.path.join(os.getcwd(), maps_dir)
        self.data_set = get_prediction_challenge_split(
            dataset_version, dataroot=self.nusc.dataroot)

        if num_examples:
            self.data_set = self.data_set[:num_examples]

        #initialize rasterizers for map generation
        self.static_layer_rasterizer = StaticLayerRasterizer(self.helper)
        self.agent_rasterizer = AgentBoxesWithFadedHistory(
            self.helper, seconds_of_history=history)
        self.mtp_input_representation = InputRepresentation(
            self.static_layer_rasterizer, self.agent_rasterizer, Rasterizer())

        self.in_agent_frame = in_agent_frame

        self.config = load_prediction_config(self.helper, config_name)

        self.save_maps_dataset = save_maps_dataset

        if self.save_maps_dataset:
            self.save_maps()

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
Пример #5
0
def main(version: str, data_root: str, split_name: str, output_dir: str, submission_name: str, config_name: str) \
        -> None:
    """
    Makes predictions for a submission to the nuScenes prediction challenge.
    :param version: NuScenes version.
    :param data_root: Directory storing NuScenes data.
    :param split_name: Data split to run inference on.
    :param output_dir: Directory to store the output file.
    :param submission_name: Name of the submission to use for the results file.
    :param config_name: Name of config file to use.
    """
    nusc = NuScenes(version=version, dataroot=data_root)
    helper = PredictHelper(nusc)
    dataset = get_prediction_challenge_split(split_name)
    config = load_prediction_config(helper, config_name)

    predictions = do_inference_for_submission(helper, config, dataset)
    predictions = [prediction.serialize() for prediction in predictions]
    json.dump(predictions, open(os.path.join(output_dir, f"{submission_name}_inference.json"), "w"))
Пример #6
0
def main(nusc: NuScenes,
         submission_path: str,
         config_name: str = 'predict_2020_icra.json') -> None:
    """
    Computes metrics for a submission stored in submission_path with a given submission_name with the metrics
    specified by the config_name.
    :param nusc: nuScenes data set object
    :param submission_path: Directory storing submission.
    :param config_name: Name of config file.
    """
    predictions = json.load(open(submission_path, "r"))
    helper = PredictHelper(nusc)
    config = load_prediction_config(helper, config_name)
    results, resultsfull = compute_metrics(predictions, helper, config)
    json.dump(results,
              open(submission_path.replace('.json', '_metrics.json'), "w"),
              indent=2)
    print('dumping full results...')
    np.savez(submission_path.replace('.json', '_metricsfull'), **resultsfull)

    print('Results from', submission_path)
    print(json.dumps(results, indent=2))
Пример #7
0
def main(version: str, data_root: str,
         split_name: str, output_dir: str, config_name: str = 'predict_2020_icra.json') -> None:
    """
    Performs inference for all of the baseline models defined in the physics model module.
    :param version: nuScenes data set version.
    :param data_root: Directory where the NuScenes data is stored.
    :param split_name: nuScenes data split name, e.g. train, val, mini_train, etc.
    :param output_dir: Directory where predictions should be stored.
    :param config_name: Name of config file.
    """

    print('timing point A')
    nusc = NuScenes(version=version, dataroot=data_root)
    print('timing point B')
    helper = PredictHelper(nusc)
    print('timing point C')
    dataset = get_prediction_challenge_split(split_name, dataroot=data_root)
    print('timing point D')
    config = load_prediction_config(helper, config_name)
    print('timing point E')

    # rasterization
    static_layer_rasterizer = StaticLayerRasterizer(helper)
    agent_rasterizer = AgentBoxesWithFadedHistory(helper, seconds_of_history=3)
    mtp_input_representation = InputRepresentation(static_layer_rasterizer, agent_rasterizer, Rasterizer())

    # loop through training tasks
    for token in dataset[40:60:2]:
        fig, axes = plt.subplots(1, 3, figsize=(18, 9))
        print(token)
        instance_token, sample_token = token.split('_')

        plot_cam_view(axes[1], nusc, token)
        plot_cam_view(axes[2], nusc, token, cam_name='CAM_FRONT_RIGHT')
        axes[0].imshow(mtp_input_representation.make_input_representation(instance_token, sample_token))
    plt.show()