Exemple #1
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--data',
                        default='trajdata',
                        help='directory of data to test')
    parser.add_argument('--output',
                        required=True,
                        nargs='+',
                        help='relative path to saved model')
    parser.add_argument('--disable-write',
                        action='store_true',
                        help='disable writing new files')
    parser.add_argument('--disable-collision',
                        action='store_true',
                        help='disable collision metrics')
    args = parser.parse_args()

    ## Path to the data folder name to predict
    args.data = 'DATA_BLOCK/' + args.data + '/'

    ## Test_pred : Folders for saving model predictions
    args.data = args.data + 'test_pred/'

    ## Writes to Test_pred
    ### Does this overwrite existing predictions? No. ###
    if not args.disable_write:
        write.main(args)

    ## Evaluates test_pred with test_private
    names = []
    for model in args.output:
        names.append(model.split('/')[-1].replace('.pkl', ''))

    # Initiate Result Table
    table = Table()

    for name in names:
        list_sub = sorted(
            [f for f in os.listdir(args.data + name) if not f.startswith('.')])

        submit_datasets = [args.data + name + '/' + f for f in list_sub]
        true_datasets = [
            args.data.replace('pred', 'private') + f for f in list_sub
        ]
        print(name)

        ## Evaluate submitted datasets with True Datasets [The main eval function]
        results = {
            submit_datasets[i].replace(args.data, '').replace('.ndjson', ''):
            eval(true_datasets[i], submit_datasets[i], args.disable_collision,
                 args)
            for i in range(len(true_datasets))
        }

        ## Saves results in dict
        table.add_entry(name, results)

    ## Make Result Table
    table.print_table()
Exemple #2
0
def main():

    parser = argparse.ArgumentParser()
    parser.add_argument('--path',
                        default='trajdata',
                        help='directory of data to test')
    parser.add_argument('--output',
                        required=True,
                        nargs='+',
                        help='relative path to saved model')
    parser.add_argument('--obs_length',
                        default=9,
                        type=int,
                        help='observation length')
    parser.add_argument('--pred_length',
                        default=12,
                        type=int,
                        help='prediction length')
    parser.add_argument('--disable-write',
                        action='store_true',
                        help='disable writing new files')
    parser.add_argument('--disable-collision',
                        action='store_true',
                        help='disable collision metrics')
    parser.add_argument('--labels',
                        required=False,
                        nargs='+',
                        help='labels of models')
    parser.add_argument('--sf',
                        action='store_true',
                        help='consider socialforce in evaluation')
    parser.add_argument('--orca',
                        action='store_true',
                        help='consider orca in evaluation')
    parser.add_argument('--kf',
                        action='store_true',
                        help='consider kalman in evaluation')
    parser.add_argument('--cv',
                        action='store_true',
                        help='consider constant velocity in evaluation')
    parser.add_argument('--normalize_scene',
                        action='store_true',
                        help='augment scenes')
    parser.add_argument('--modes',
                        default=1,
                        type=int,
                        help='number of modes to predict')
    args = parser.parse_args()

    scipy.seterr('ignore')

    ## Path to the data folder name to predict
    args.path = 'DATA_BLOCK/' + args.path + '/'

    ## Test_pred : Folders for saving model predictions
    args.path = args.path + 'test_pred/'

    ## Writes to Test_pred
    ### Does this overwrite existing predictions? No. ###
    if not args.disable_write:
        write.main(args)

    ## Evaluates test_pred with test_private
    names = []
    for model in args.output:
        model_name = model.split('/')[-1].replace('.pkl', '')
        model_name = model_name + '_modes' + str(args.modes)
        names.append(model_name)

    ## labels
    if args.labels:
        labels = args.labels
    else:
        labels = names

    # Initiate Result Table
    table = Table()

    for num, name in enumerate(names):
        print(name)

        result_file = args.path.replace('pred', 'results') + name

        ## If result was pre-calculated and saved, Load
        if os.path.exists(result_file + '/results.pkl'):
            print("Loading Saved Results")
            with open(result_file + '/results.pkl', 'rb') as handle:
                [final_result, sub_final_result,
                 col_result] = pickle.load(handle)
            table.add_result(labels[num], final_result, sub_final_result)
            table.add_collision_entry(labels[num], col_result)

        # ## Else, Calculate results and save
        else:
            list_sub = sorted([
                f for f in os.listdir(args.path + name)
                if not f.startswith('.')
            ])

            ## Simple Collision Test
            col_result = collision_test(list_sub, name, args)
            table.add_collision_entry(labels[num], col_result)

            submit_datasets = [
                args.path + name + '/' + f for f in list_sub
                if 'collision_test.ndjson' not in f
            ]
            true_datasets = [
                args.path.replace('pred', 'private') + f for f in list_sub
                if 'collision_test.ndjson' not in f
            ]

            ## Evaluate submitted datasets with True Datasets [The main eval function]
            # results = {submit_datasets[i].replace(args.path, '').replace('.ndjson', ''):
            #             eval(true_datasets[i], submit_datasets[i], args)
            #            for i in range(len(true_datasets))}

            results_list = Parallel(n_jobs=4)(
                delayed(eval)(true_datasets[i], submit_datasets[i], args)
                for i in range(len(true_datasets)))
            results = {
                submit_datasets[i].replace(args.path,
                                           '').replace('.ndjson', ''):
                results_list[i]
                for i in range(len(true_datasets))
            }

            # print(results)
            ## Generate results
            final_result, sub_final_result = table.add_entry(
                labels[num], results)

            ## Save results as pkl (to avoid computation again)
            os.makedirs(result_file)
            with open(result_file + '/results.pkl', 'wb') as handle:
                pickle.dump([final_result, sub_final_result, col_result],
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)

    ## Make Result Table
    table.print_table()
Exemple #3
0
def evaluation_trajnetplusplus_other_models(args, table):
    ## Test_pred : Folders for saving model predictions
    args.path = args.path + '/test_pred/'
    args.output = args.output if args.output is not None else []
    ## assert length of output models is not None
    if (not args.sf) and (not args.orca) and (not args.kf) and (not args.cv):
        assert len(args.output), 'No output file is provided'
    print(args)
    # Generate predictions for the other models
    write.main(args)

    ## Evaluates test_pred with test_private
    names = []
    for model in args.output:
        model_name = model.split('/')[-1].replace('.pkl', '')
        model_name = model_name + '_modes' + str(args.modes)
        names.append(model_name)
    # For
    for num, name in enumerate(names):
        # Result file
        result_file = args.path.replace('pred', 'results') + name
        ## If result was pre-calculated and saved, Load
        if os.path.exists(result_file + '/results.pkl'):
            with open(result_file + '/results.pkl', 'rb') as handle:
                [final_result, sub_final_result,
                 col_result] = pickle.load(handle)
            table.add_result(names[num], final_result, sub_final_result)
            table.add_collision_entry(names[num], col_result)
        ## Else, Calculate results and save
        else:
            # List of datasets to process
            list_sub = []
            for f in os.listdir(args.path + name):
                if not f.startswith('.'):
                    list_sub.append(f)
            ## Simple Collision Test
            col_result = collision_test(list_sub, name, args)
            table.add_collision_entry(names[num], col_result)
            submit_datasets = [
                args.path + name + '/' + f for f in list_sub
                if 'collision_test.ndjson' not in f
            ]
            true_datasets = [
                args.path.replace('pred', 'private') + f for f in list_sub
                if 'collision_test.ndjson' not in f
            ]
            ## Evaluate submitted datasets with True Datasets [The main eval function]
            results = {
                submit_datasets[i].replace(args.path,
                                           '').replace('.ndjson', ''):
                eval(true_datasets[i], submit_datasets[i], args)
                for i in range(len(true_datasets))
            }

            #results_list = Parallel(n_jobs=4)(delayed(eval)(true_datasets[i], submit_datasets[i], args) for i in range(len(true_datasets)))
            #results = {submit_datasets[i].replace(args.path, '').replace('.ndjson', ''): results_list[i]
            #           for i in range(len(true_datasets))}

            # print(results)
            ## Generate results
            final_result, sub_final_result = table.add_entry(
                names[num], results)

            ## Save results as pkl (to avoid a new computation)
            os.makedirs(result_file)
            with open(result_file + '/results.pkl', 'wb') as handle:
                pickle.dump([final_result, sub_final_result, col_result],
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)

    return (table)