def test_task(task,
              gt_set,
              res_path,
              J_target=None,
              F_target=None,
              metric=('J', 'F')):
    dataset_eval = DAVISEvaluation(davis_root=davis_root,
                                   gt_set=gt_set,
                                   task=task,
                                   codalab=True)
    metrics_res = dataset_eval.evaluate(res_path, debug=False, metric=metric)

    num_seq = len(list(dataset_eval.dataset.get_sequences()))
    J = metrics_res['J'] if 'J' in metric else {
        'M': np.zeros(num_seq),
        'R': np.zeros(num_seq),
        'D': np.zeros(num_seq)
    }
    F = metrics_res['F'] if 'F' in metric else {
        'M': np.zeros(num_seq),
        'R': np.zeros(num_seq),
        'D': np.zeros(num_seq)
    }

    if gt_set == "val" or gt_set == "train" or gt_set == "test-dev":
        sys.stdout.write(
            "----------------Global results in CSV---------------\n")
        g_measures = [
            'J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall',
            'F-Decay'
        ]
        final_mean = (np.mean(J["M"]) + np.mean(F["M"])
                      ) / 2. if 'J' in metric and 'F' in metric else 0
        g_res = np.array([
            final_mean,
            np.mean(J["M"]),
            np.mean(J["R"]),
            np.mean(J["D"]),
            np.mean(F["M"]),
            np.mean(F["R"]),
            np.mean(F["D"])
        ])
        table_g = pandas.DataFrame(data=np.reshape(g_res, [1, len(g_res)]),
                                   columns=g_measures)
        table_g.to_csv(sys.stdout, index=False, float_format="%0.3f")
    if J_target is not None:
        assert check_results_similarity(
            J, J_target), f'J {print_error(J, J_target)}'
    if F_target is not None:
        assert check_results_similarity(
            F, F_target), f'F {print_error(F, F_target)}'
    return J, F
# unzipped submission data is always in the 'res' subdirectory
# https://github.com/codalab/codalab-competitions/wiki/User_Building-a-Scoring-Program-for-a-Competition#directory-structure-for-submissions
submission_path = os.path.join(input_dir, 'res')
if not os.path.exists(submission_path):
    sys.exit('Could not find submission file {0}'.format(submission_path))

# unzipped reference data is always in the 'ref' subdirectory
# https://github.com/codalab/codalab-competitions/wiki/User_Building-a-Scoring-Program-for-a-Competition#directory-structure-for-submissions
gt_path = os.path.join(input_dir, 'ref')
if not os.path.exists(gt_path):
    sys.exit('Could not find GT file {0}'.format(gt_path))

# Create dataset
dataset_eval = DAVISEvaluation(davis_root=gt_path,
                               gt_set=gt_set,
                               task=task,
                               codalab=True)

# Check directory structure
res_subfolders = os.listdir(submission_path)
if len(res_subfolders) == 1:
    sys.stdout.write(
        "Incorrect folder structure, the folders of the sequences have to be placed directly inside the "
        "zip.\nInside every folder of the sequences there must be an indexed PNG file for every frame.\n"
        "The indexes have to match with the initial frame.\n")
    sys.exit()

# Check that all sequences are there
missing = False
for seq in dataset_eval.dataset.get_sequences():
    if seq not in res_subfolders:
Пример #3
0
csv_name_per_sequence = f'per-sequence_results-{args.set}.csv'

# Check if the method has been evaluated before, if so read the results, otherwise compute the results
csv_name_global_path = os.path.join(args.results_path, csv_name_global)
csv_name_per_sequence_path = os.path.join(args.results_path,
                                          csv_name_per_sequence)
if os.path.exists(csv_name_global_path) and os.path.exists(
        csv_name_per_sequence_path):
    print('Using precomputed results...')
    table_g = pd.read_csv(csv_name_global_path)
    table_seq = pd.read_csv(csv_name_per_sequence_path)
else:
    print(f'Evaluating sequences for the {args.task} task...')
    # Create dataset and evaluate
    dataset_eval = DAVISEvaluation(davis_root=args.davis_path,
                                   task=args.task,
                                   gt_set=args.set)
    metrics_res = dataset_eval.evaluate(args.results_path)
    J, F = metrics_res['J'], metrics_res['F']

    # Generate dataframe for the general results
    g_measures = [
        'J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall',
        'F-Decay'
    ]
    final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2.
    g_res = np.array([
        final_mean,
        np.mean(J["M"]),
        np.mean(J["R"]),
        np.mean(J["D"]),
Пример #4
0
import pandas as pd
import sys, os
import numpy as np

CURR_DIR = os.path.dirname(__file__)
from davis2017.evaluation import DAVISEvaluation

DAVIS_ROOT = os.path.join(CURR_DIR, "DAVIS2017-trainval")
RES_PATH = sys.argv[1]
IMSET = "DAVIS17val"
csv_name_global_path = f'{RES_PATH}/global_results-{IMSET}.csv'
csv_name_per_sequence_path = f'{RES_PATH}/per-sequence_results-{IMSET}.csv'

time_start = time.time()
dataset_eval = DAVISEvaluation(davis_root=DAVIS_ROOT,
                               task="semi-supervised",
                               gt_set="val",
                               use_parallel=True)
metrics_res = dataset_eval.evaluate(RES_PATH)
# metrics_res = dataset_eval.evaluate_parallel(RES_PATH)
J, F = metrics_res['J'], metrics_res['F']

# Generate dataframe for the general results
g_measures = [
    'J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall',
    'F-Decay'
]
final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2.
g_res = np.array([
    final_mean,
    np.mean(J["M"]),
    np.mean(J["R"]),
Пример #5
0
def benchmark(args):
    time_start = time()
    csv_name_global = f'global_results-{args.set}.csv'
    csv_name_per_sequence = f'per-sequence_results-{args.set}.csv'

    # Check if the method has been evaluated before, if so read the results, otherwise compute the results
    csv_name_global_path = os.path.join(args.results_path, csv_name_global)
    csv_name_per_sequence_path = os.path.join(args.results_path,
                                              csv_name_per_sequence)
    if os.path.exists(csv_name_global_path) and os.path.exists(
            csv_name_per_sequence_path):
        print('Using precomputed results...')
        table_g = pd.read_csv(csv_name_global_path)
        table_seq = pd.read_csv(csv_name_per_sequence_path)
    else:
        print(f'Evaluating sequences for the {args.task} task...')
        # Create dataset and evaluate
        dataset_eval = DAVISEvaluation(davis_root=args.davis_path,
                                       task=args.task,
                                       gt_set=args.set)
        metrics_res = dataset_eval.evaluate(args.results_path)
        J, F = metrics_res['J'], metrics_res['F']

        # Generate dataframe for the general results
        g_measures = [
            'J&F-Mean', 'J-Mean', 'J-Recall', 'J-Decay', 'F-Mean', 'F-Recall',
            'F-Decay'
        ]
        final_mean = (np.mean(J["M"]) + np.mean(F["M"])) / 2.
        g_res = np.array([
            final_mean,
            np.mean(J["M"]),
            np.mean(J["R"]),
            np.mean(J["D"]),
            np.mean(F["M"]),
            np.mean(F["R"]),
            np.mean(F["D"])
        ])
        g_res = np.reshape(g_res, [1, len(g_res)])
        table_g = pd.DataFrame(data=g_res, columns=g_measures)
        with open(csv_name_global_path, 'w') as f:
            table_g.to_csv(f, index=False, float_format="%.3f")
        print(f'Global results saved in {csv_name_global_path}')

        # Generate a dataframe for the per sequence results
        seq_names = list(J['M_per_object'].keys())
        seq_measures = ['Sequence', 'J-Mean', 'F-Mean']
        J_per_object = [J['M_per_object'][x] for x in seq_names]
        F_per_object = [F['M_per_object'][x] for x in seq_names]
        table_seq = pd.DataFrame(data=list(
            zip(seq_names, J_per_object, F_per_object)),
                                 columns=seq_measures)
        with open(csv_name_per_sequence_path, 'w') as f:
            table_seq.to_csv(f, index=False, float_format="%.3f")
        print(f'Per-sequence results saved in {csv_name_per_sequence_path}')

    # Print the results
    sys.stdout.write(
        f"--------------------------- Global results for {args.set} ---------------------------\n"
    )
    print(table_g.to_string(index=False))
    sys.stdout.write(
        f"\n---------- Per sequence results for {args.set} ----------\n")
    print(table_seq.to_string(index=False))
    total_time = time() - time_start
    sys.stdout.write('\nTotal time:' + str(total_time))