Beispiel #1
0
def eval(gt, input_file, disable_collision, args):
    # Ground Truth
    reader_gt = trajnettools.Reader(gt, scene_type='paths')
    scenes_gt = [s for _, s in reader_gt.scenes()]
    scenes_id_gt = [s_id for s_id, _ in reader_gt.scenes()]

    # Scene Predictions
    reader_sub = trajnettools.Reader(input_file, scene_type='paths')
    scenes_sub = [s for _, s in reader_sub.scenes()]

    ## indexes is dictionary deciding which scenes are in which type
    indexes = {}
    for i in range(1, 5):
        indexes[i] = []
    ## sub-indexes
    sub_indexes = {}
    for i in range(1, 5):
        sub_indexes[i] = []
    for scene in reader_gt.scenes_by_id:
        tags = reader_gt.scenes_by_id[scene].tag
        main_tag = tags[0:1]
        sub_tags = tags[1]
        for ii in range(1, 5):
            if ii in main_tag:
                indexes[ii].append(scene)
            if ii in sub_tags:
                sub_indexes[ii].append(scene)

    # Evaluate
    evaluator = TrajnetEvaluator(reader_gt, scenes_gt, scenes_id_gt,
                                 scenes_sub, indexes, sub_indexes)
    evaluator.aggregate('kf', disable_collision)
    return evaluator.result()
Beispiel #2
0
def eval(input_file, predictor):
    print('dataset', input_file)

    sample = 0.05 if 'syi.ndjson' in input_file else None
    reader = trajnettools.Reader(input_file, scene_type='paths')
    file_name = []
    scenes = []
    sample_rate = []
    scene_instance = scene_funcs(device='cpu').to('cpu')
    for files, idx, s, rate in reader.scenes(sample=sample):
        scenes.append(s)
        file_name.append(files)
        sample_rate.append(rate)
    del scene_instance

    # non-linear scenes from high Kalman Average L2
    n_obs = predictor.n_obs
    n_pred = predictor.n_pred

    evaluator = Evaluator(scenes, file_name=file_name,
                          sample_rate=sample_rate)  # nonlinear_scene_index
    evaluator.n_obs = n_obs  # setting n_obs and n_pred values
    evaluator.n_pred = n_pred

    evaluator.aggregate(predictor, store_image=0)
    return evaluator.result()
Beispiel #3
0
def main():
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    scenes = list(
        trajnettools.Reader('data/train/biwi_hotel.ndjson').scenes(limit=1))

    pool = trajnetbaselines.lstm.Pooling(type_='social')
    model = trajnetbaselines.lstm.LSTM(pool=pool)
    trainer = trajnetbaselines.lstm.trainer.Trainer(model, device=device)
    with torch.autograd.profiler.profile(
            use_cuda=torch.cuda.is_available()) as prof:
        trainer.train(scenes, epoch=0)
    prof.export_chrome_trace('profile_trace.json')
Beispiel #4
0
def eval(input_file):
    print('dataset', input_file)

    sample = 0.05 if 'syi.ndjson' in input_file else None
    reader = trajnettools.Reader(input_file, scene_type='paths')
    scenes = [s for _, s in reader.scenes(sample=sample)]

    # non-linear scenes from high Kalman Average L2
    nonlinear_score = []
    for paths in scenes:
        kalman_prediction = trajnetbaselines.kalman.predict(paths)
        nonlinear_score.append(
            trajnettools.metrics.average_l2(paths[0], kalman_prediction))
    mean_nonlinear = sum(nonlinear_score) / len(scenes)
    nonlinear_scene_index = {
        i
        for i, nl in enumerate(nonlinear_score) if nl > mean_nonlinear
    }

    evaluator = Evaluator(scenes, nonlinear_scene_index)

    # Kalman Filter (Lin) and non-linear scenes
    evaluator.aggregate('kf', trajnetbaselines.kalman.predict)

    # LSTM
    lstm_predictor = trajnetbaselines.lstm.LSTMPredictor.load(
        'output/vanilla_lstm.pkl')
    evaluator.aggregate('lstm', lstm_predictor)

    # OLSTM
    olstm_predictor = trajnetbaselines.lstm.LSTMPredictor.load(
        'output/occupancy_lstm.pkl')
    evaluator.aggregate('olstm', olstm_predictor)

    # DLSTM
    dlstm_predictor = trajnetbaselines.lstm.LSTMPredictor.load(
        'output/directional_lstm.pkl')
    evaluator.aggregate('dlstm', dlstm_predictor)

    # Social LSTM
    slstm_predictor = trajnetbaselines.lstm.LSTMPredictor.load(
        'output/social_lstm.pkl')
    evaluator.aggregate('slstm', slstm_predictor)

    return evaluator.result()
Beispiel #5
0
def main(args):
    ## List of test files (.json) inside the test folder (waiting to be predicted by the prediction model)
    datasets = sorted([f for f in os.listdir(args.data.replace('_pred', '')) if not f.startswith('.') and f.endswith('.ndjson')])

    ## Handcrafted Baselines (if required to compare)
    if args.kf:
        args.output.append('/kf.pkl')
    if args.sf:
        args.output.append('/sf.pkl')
        args.output.append('/sf_opt.pkl')
    if args.orca:
        args.output.append('/orca.pkl')
        args.output.append('/orca_opt.pkl')

    ## Extract Model names from arguments and create its own folder in 'test_pred' for storing predictions
    ## WARNING: If Model predictions already exist from previous run, this process SKIPS WRITING
    for model in args.output:
        model_name = model.split('/')[-1].replace('.pkl', '')

        ## Check if model predictions already exist
        if not os.path.exists(args.data):
            os.makedirs(args.data)
        if not os.path.exists(args.data + model_name):
            os.makedirs(args.data + model_name)
        else:
            continue

        ## Start writing predictions in dataset/test_pred
        for dataset in datasets:
            # Model's name
            name = dataset.replace(args.data.replace('_pred', '') + 'test/', '')

            # Copy observations from test folder into test_pred folder
            shutil.copyfile(args.data.replace('_pred', '') + name, args.data + '{}/{}'.format(model_name, name))
            print('processing ' + name)

            # Read Scenes from 'test' folder
            reader = trajnettools.Reader(args.data.replace('_pred', '') + dataset, scene_type='paths')
            scenes = [s for s in reader.scenes()]

            # Loading the APPROPRIATE model
            ## Keep Adding Different Models to this List
            print("Model Name: ", model_name)
            if model_name == 'kf':
                print("Kalman")
                predictor = trajnetbaselines.classical.kalman.predict
            elif model_name == 'sf' or model_name == 'sf_opt':
                print("Social Force")
                predictor = trajnetbaselines.classical.socialforce.predict
            elif model_name == 'orca' or model_name == 'orca_opt':
                print("ORCA")
                predictor = trajnetbaselines.classical.orca.predict
            elif 'sgan' in model_name:
                print("SGAN")
                predictor = trajnetbaselines.sgan.SGANPredictor.load(model)
                # On CPU
                device = torch.device('cpu')
                predictor.model.to(device)
            else:
                print("LSTM")
                predictor = trajnetbaselines.lstm.LSTMPredictor.load(model)
                # On CPU
                device = torch.device('cpu')
                predictor.model.to(device)

            # Get the model prediction and write them in corresponding test_pred file
            """ 
            VERY IMPORTANT: Prediction Format

            The predictor function should output a dictionary. The keys of the dictionary should correspond to the prediction modes. 
            ie. predictions[0] corresponds to the first mode. predictions[m] corresponds to the m^th mode.... Multimodal predictions!
            Each modal prediction comprises of primary prediction and neighbour (surrrounding) predictions i.e. predictions[m] = [primary_prediction, neigh_predictions]
            Note: Return [primary_prediction, []] if model does not provide neighbour predictions

            Shape of primary_prediction: Tensor of Shape (Prediction length, 2)
            Shape of Neighbour_prediction: Tensor of Shape (Prediction length, n_tracks - 1, 2).
            (See LSTMPredictor.py for more details)
            """
            with open(args.data + '{}/{}'.format(model_name, name), "a") as myfile:
                for scene_id, paths in scenes:

                    ## Extract 1) first_frame, 2) frame_diff 3) ped_ids for writing predictions
                    observed_path = paths[0]
                    frame_diff = observed_path[1].frame - observed_path[0].frame
                    first_frame = observed_path[args.obs_length-1].frame + frame_diff
                    ped_id = observed_path[0].pedestrian
                    ped_id_ = []
                    for j, _ in enumerate(paths[1:]): ## Only need neighbour ids
                        ped_id_.append(paths[j+1][0].pedestrian)

                    ## For each scene, get predictions
                    if model_name == 'sf_opt':
                        predictions = predictor(paths, sf_params=[0.5, 1.0, 0.1], n_predict=args.pred_length, obs_length=args.obs_length) ## optimal sf_params
                    elif model_name == 'orca_opt':
                        predictions = predictor(paths, orca_params=[0.25, 1.0, 0.3], n_predict=args.pred_length, obs_length=args.obs_length) ## optimal orca_params
                    else:
                        predictions = predictor(paths, n_predict=args.pred_length, obs_length=args.obs_length)

                    for m in range(len(predictions)):
                        prediction, neigh_predictions = predictions[m]
                        ## Write Primary
                        for i in range(len(prediction)):
                            # print(i)
                            track = trajnettools.TrackRow(first_frame + i * frame_diff, ped_id,
                                                          prediction[i, 0].item(), prediction[i, 1].item(), m, scene_id)
                            myfile.write(trajnettools.writers.trajnet(track))
                            myfile.write('\n')

                        ## Write Neighbours (if non-empty)
                        for n in range(neigh_predictions.shape[1]):
                            neigh = neigh_predictions[:, n]
                            for j in range(len(neigh)):
                                track = trajnettools.TrackRow(first_frame + j * frame_diff, ped_id_[n],
                                                              neigh[j, 0].item(), neigh[j, 1].item(), m, scene_id)
                                myfile.write(trajnettools.writers.trajnet(track))
                                myfile.write('\n')
        print('')
Beispiel #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data',
                        default='trajdata',
                        help='directory of data to test')
    parser.add_argument('--output',
                        required=True,
                        nargs='+',
                        help='relative path to saved model')
    parser.add_argument('--obs_length',
                        default=9,
                        type=int,
                        help='observation length')
    parser.add_argument('--pred_length',
                        default=12,
                        type=int,
                        help='prediction length')
    parser.add_argument('--disable-write',
                        action='store_true',
                        help='disable writing new files')
    parser.add_argument('--disable-collision',
                        action='store_true',
                        help='disable collision metrics')
    parser.add_argument('--labels',
                        required=False,
                        nargs='+',
                        help='labels of models')
    args = parser.parse_args()

    ## Path to the data folder name to predict
    args.data = 'DATA_BLOCK/' + args.data + '/'

    ## Test_pred: Folders for saving model predictions
    args.data = args.data + 'test_pred/'

    ## Writes to Test_pred
    ## Does this overwrite existing predictions? No. ###
    datasets = sorted([
        f for f in os.listdir(args.data.replace('_pred', ''))
        if not f.startswith('.') and f.endswith('.ndjson')
    ])

    ## Model names are passed as arguments
    for model in args.output:
        model_name = model.split('/')[-1].replace('.pkl', '')
        # Loading the appropriate model (currently written only for LSTMs)
        print("Model Name: ", model_name)
        predictor = trajnetbaselines.lstm.LSTMPredictor.load(model)
        # On CPU
        device = torch.device('cpu')
        predictor.model.to(device)

        total_scenes = 0
        average = 0
        final = 0
        topk_average = 0
        topk_final = 0

        ## Start writing in dataset/test_pred
        for dataset in datasets:
            # Model's name
            name = dataset.replace(
                args.data.replace('_pred', '') + 'test/', '')

            # Copy file from test into test/train_pred folder
            print('processing ' + name)
            if 'collision_test' in name:
                continue

            # Read file from 'test'
            reader = trajnettools.Reader(args.data.replace('_pred', '') +
                                         dataset,
                                         scene_type='paths')
            scenes = [s for _, s in reader.scenes()]

            reader_gt = trajnettools.Reader(
                args.data.replace('_pred', '_private') + dataset,
                scene_type='paths')
            scenes_gt = [s for _, s in reader_gt.scenes()]
            scenes_id_gt = [i for i, _ in reader_gt.scenes()]
            total_scenes += len(scenes_gt)

            for i, paths in enumerate(scenes):
                ground_truth = scenes_gt[i]
                predictions = predictor(paths,
                                        n_predict=args.pred_length,
                                        obs_length=args.obs_length)

                ## Considers only the First MODE
                prediction, neigh_predictions = predictions[0]

                ## Convert numpy array to Track Rows ##
                ## Extract 1) first_frame, 2) frame_diff 3) ped_ids for writing predictions
                observed_path = paths[0]
                frame_diff = observed_path[1].frame - observed_path[0].frame
                first_frame = observed_path[args.obs_length -
                                            1].frame + frame_diff
                ped_id = observed_path[0].pedestrian

                ## make Track Rows
                prediction = [
                    trajnettools.TrackRow(first_frame + i * frame_diff, ped_id,
                                          prediction[i, 0], prediction[i,
                                                                       1], 0)
                    for i in range(len(prediction))
                ]

                primary_tracks = [
                    t for t in prediction if t.prediction_number == 0
                ]
                # frame_gt = [t.frame for t in ground_truth[0]][-args.pred_length:]
                frame_gt = [
                    t.frame for t in ground_truth[0]
                ][args.obs_length:args.obs_length + args.pred_length]
                frame_pred = [t.frame for t in primary_tracks]

                ## To verify if same scene
                if frame_gt != frame_pred:
                    raise Exception('frame numbers are not consistent')

                average_l2 = trajnettools.metrics.average_l2(
                    ground_truth[0][args.obs_length:args.obs_length +
                                    args.pred_length],
                    primary_tracks,
                    n_predictions=args.pred_length)
                final_l2 = trajnettools.metrics.final_l2(
                    ground_truth[0][args.obs_length:args.obs_length +
                                    args.pred_length], primary_tracks)

                # aggregate FDE and ADE
                average += average_l2
                final += final_l2

                ## TODO
                # if len(predictions) > 2:
                #     # print(predictions)
                #     primary_tracks_all = [t for mode in predictions for t in predictions[mode][0]]
                #     # import pdb
                #     # pdb.set_trace()
                #     topk_ade, topk_fde = trajnettools.metrics.topk(primary_tracks_all, ground_truth[0][args.obs_length:args.obs_length+args.pred_length], n_predictions=args.pred_length)
                #     topk_average += topk_ade
                #     topk_final += topk_fde

        ## Average ADE and FDE
        average /= total_scenes
        final /= total_scenes

        # ##Adding value to dict
        print('ADE: ', average)
        print('FDE: ', final)
def trajectory_type(rows, path, fps, track_id=0, args=None):
    """ Categorization of all scenes """

    ## Read
    reader = trajnettools.Reader(path, scene_type='paths')
    scenes = [s for _, s in reader.scenes()]
    ## Filtered Frames and Scenes
    new_frames = set()
    new_scenes = []

    ###########################################################################
    # scenes_test helps to handle both test and test_private simultaneously
    # scenes_test correspond to Test
    ###########################################################################
    test = 'test' in path
    if test:
        path_test = path.replace('test_private', 'test')
        reader_test = trajnettools.Reader(path_test, scene_type='paths')
        scenes_test = [s for _, s in reader_test.scenes()]
        ## Filtered Test Frames and Test Scenes
        new_frames_test = set()
        new_scenes_test = []

    ## Initialize Tag Stats to be collected
    tags = {1: [], 2: [], 3: [], 4: []}
    mult_tags = {1: [], 2: [], 3: [], 4: []}
    sub_tags = {1: [], 2: [], 3: [], 4: []}
    # col_count = 0

    if not scenes:
        raise Exception('No scenes found')

    for index, scene in enumerate(scenes):
        ## Primary Path
        ped_interest = scene[0]

        # Assert Test Scene length
        if test:
            assert len(scenes_test[index][0]) >= args.obs_len, \
                   'Scene Test not adequate length'

        ## Check Collision
        ## Used in CFF Datasets to account for imperfect tracking
        # if check_collision(scene, args.pred_len):
        #     col_count += 1
        #     continue

        ## Get Tag
        tag, mult_tag, sub_tag = get_type(scene, args)

        if np.random.uniform() < args.acceptance[tag - 1]:
            ## Update Tags
            tags[tag].append(track_id)
            for tt in mult_tag:
                mult_tags[tt].append(track_id)
            for st in sub_tag:
                sub_tags[st].append(track_id)

            ## Define Scene_Tag
            scene_tag = []
            scene_tag.append(tag)
            scene_tag.append(sub_tag)

            ## Filtered scenes and Frames
            new_frames |= set(ped_interest[i].frame
                              for i in range(len(ped_interest)))
            new_scenes.append(
                trajnettools.data.SceneRow(track_id,
                                           ped_interest[0].pedestrian,
                                           ped_interest[0].frame,
                                           ped_interest[-1].frame, fps,
                                           scene_tag))

            ## Append to list of scenes_test as well if Test Set
            if test:
                new_frames_test |= set(ped_interest[i].frame
                                       for i in range(args.obs_len))
                new_scenes_test.append(
                    trajnettools.data.SceneRow(track_id,
                                               ped_interest[0].pedestrian,
                                               ped_interest[0].frame,
                                               ped_interest[-1].frame, fps, 0))

            track_id += 1

    # Writes the Final Scenes and Frames
    write(rows, path, new_scenes, new_frames)
    if test:
        write(rows, path_test, new_scenes_test, new_frames_test)

    ## Stats

    # Number of collisions found
    # print("Col Count: ", col_count)

    if scenes:
        print("Total Scenes: ", index)

        # Types:
        print("Main Tags")
        print("Type 1: ", len(tags[1]), "Type 2: ", len(tags[2]), "Type 3: ",
              len(tags[3]), "Type 4: ", len(tags[4]))
        print("Sub Tags")
        print("LF: ", len(sub_tags[1]), "CA: ", len(sub_tags[2]), "Group: ",
              len(sub_tags[3]), "Others: ", len(sub_tags[4]))

    return track_id
Beispiel #8
0
def main(args, kf=False, sf=False):
    ## List of .json file inside the args.data (waiting to be predicted by the testing model)
    datasets = sorted([
        f for f in os.listdir(args.data.replace('_pred', ''))
        if not f.startswith('.') and f.endswith('.ndjson')
    ])

    if kf:
        args.output.append('/kf.pkl')
    if sf:
        args.output.append('/sf.pkl')

    ## Model names are passed as arguments
    for model in args.output:
        model_name = model.split('/')[-1].replace('.pkl', '')

        ## Make a directory in DATA_BLOCK which will contain the model outputs
        ## If model is already written, you skip writing
        if not os.path.exists(args.data):
            os.makedirs(args.data)
        if not os.path.exists(args.data + model_name):
            os.makedirs(args.data + model_name)
        else:
            continue

        ## Start writing in dataset/test_pred
        for dataset in datasets:
            # Model's name
            name = dataset.replace(
                args.data.replace('_pred', '') + 'test/', '')

            # Copy file from test into test/train_pred folder
            shutil.copyfile(
                args.data.replace('_pred', '') + name,
                args.data + '{}/{}'.format(model_name, name))
            print('processing ' + name)

            # Read file from 'test'
            reader = trajnettools.Reader(args.data.replace('_pred', '') +
                                         dataset,
                                         scene_type='paths')
            scenes = [s for s in reader.scenes()]

            print("Model Name: ", model_name)
            # Load the model
            if model_name == 'kf':
                predictor = trajnetbaselines.kalman.predict
            elif model_name == 'sf':
                predictor = trajnetbaselines.socialforce.predict
            else:
                predictor = trajnetbaselines.lstm.LSTMPredictor.load(model)
                # On CPU
                device = torch.device('cpu')
                predictor.model.to(device)

            # Write the prediction
            with open(args.data + '{}/{}'.format(model_name, name),
                      "a") as myfile:
                for scene_id, paths in scenes:
                    predictions = predictor(paths)
                    for m in range(len(predictions)):
                        prediction, neigh_predictions = predictions[m]

                        ## Write Primary
                        for i in range(len(prediction)):
                            track = trajnettools.TrackRow(
                                prediction[i].frame, prediction[i].pedestrian,
                                prediction[i].x.item(), prediction[i].y.item(),
                                m, scene_id)
                            myfile.write(trajnettools.writers.trajnet(track))
                            myfile.write('\n')

                        ## Write Neighbours
                        for n in range(len(neigh_predictions)):
                            neigh = neigh_predictions[n]
                            for j in range(len(neigh)):
                                track = trajnettools.TrackRow(
                                    neigh[j].frame, neigh[j].pedestrian,
                                    neigh[j].x.item(), neigh[j].y.item(), m,
                                    scene_id)
                                myfile.write(
                                    trajnettools.writers.trajnet(track))
                                myfile.write('\n')
        print('')