Exemple #1
0
def eval_epoch(model, eval_dataset, opt, save_submission_filename,
               tasks=("SVMR",), max_after_nms=100):
    """max_after_nms: always set to 100, since the eval script only evaluate top-100"""
    model.eval()
    logger.info("Computing scores")
    # logger.info("Start timing")
    # times = []
    # for _ in range(3):
    #     st_time = time.time()
    #     eval_submission_raw = get_eval_res(model, eval_dataset, opt, tasks, max_after_nms=max_after_nms)
    #     times += [time.time() - st_time]
    # times = torch.FloatTensor(times)

    eval_submission_raw = get_eval_res(model, eval_dataset, opt, tasks, max_after_nms=max_after_nms)

    IOU_THDS = (0.5, 0.7)
    logger.info("Saving/Evaluating before nms results")
    submission_path = os.path.join(opt.results_dir, save_submission_filename)
    eval_submission = get_submission_top_n(eval_submission_raw, top_n=max_after_nms)
    save_json(eval_submission, submission_path)

    metrics = eval_retrieval(eval_submission, eval_dataset.query_data,
                             iou_thds=IOU_THDS, match_number=not opt.debug, verbose=opt.debug,
                             use_desc_type=opt.dset_name == "tvr")
    # metrics["time_avg"] = float(times.mean())
    # metrics["time_std"] = float(times.std())
    save_metrics_path = submission_path.replace(".json", "_metrics.json")
    save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False)
    latest_file_paths = [submission_path, save_metrics_path]

    if opt.nms_thd != -1:
        logger.info("Performing nms with nms_thd {}".format(opt.nms_thd))
        eval_submission_after_nms = dict(video2idx=eval_submission_raw["video2idx"])
        for k, nms_func in POST_PROCESSING_MMS_FUNC.items():
            if k in eval_submission_raw:
                eval_submission_after_nms[k] = nms_func(eval_submission_raw[k],
                                                        nms_thd=opt.nms_thd,
                                                        max_before_nms=opt.max_before_nms,
                                                        max_after_nms=max_after_nms)

        logger.info("Saving/Evaluating nms results")
        submission_nms_path = submission_path.replace(".json", "_nms_thd_{}.json".format(opt.nms_thd))
        save_json(eval_submission_after_nms, submission_nms_path)
        metrics_nms = eval_retrieval(eval_submission_after_nms, eval_dataset.query_data,
                                     iou_thds=IOU_THDS, match_number=not opt.debug, verbose=opt.debug)
        save_metrics_nms_path = submission_nms_path.replace(".json", "_metrics.json")
        save_json(metrics_nms, save_metrics_nms_path, save_pretty=True, sort_keys=False)
        latest_file_paths += [submission_nms_path, save_metrics_nms_path]
    else:
        metrics_nms = None
    return metrics, metrics_nms, latest_file_paths
Exemple #2
0
def eval_epoch(model,
               eval_dataset,
               opt,
               save_submission_filename,
               tasks=("SVMR", ),
               max_before_nms=1000,
               max_after_nms=100):
    model.eval()
    logger.info("Computing scores")
    logger.info("Start timing")
    # times = []
    # for _ in range(3):
    #     st_time = time.time()
    eval_res = compute_query2ctx_scores(model, eval_dataset, opt)
    logger.info("Generating predictions from scores")
    eval_submission_raw = dict(video2idx=eval_res["video2idx"])
    eval_submission_raw["VR"] = generate_vr_predictions_from_res(eval_res)
    # times += [time.time() - st_time]
    # times = torch.FloatTensor(times)
    IOU_THDS = (0.5, 0.7)

    logger.info("Saving/Evaluating before nms results")
    submission_path = os.path.join(opt.results_dir, save_submission_filename)
    eval_submission = get_submission_top_n(eval_submission_raw, top_n=100)
    save_json(eval_submission, submission_path)

    metrics = eval_retrieval(eval_submission,
                             eval_dataset.query_data,
                             iou_thds=IOU_THDS,
                             match_number=not opt.debug,
                             verbose=opt.debug)
    # metrics["time_avg"] = float(times.mean())
    # metrics["time_std"] = float(times.std())
    save_metrics_path = submission_path.replace(".json", "_metrics.json")
    save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False)
    latest_file_paths = [submission_path, save_metrics_path]

    metrics_nms = None
    return metrics, metrics_nms, latest_file_paths
Exemple #3
0
def eval_epoch(model,
               eval_dataset,
               opt,
               save_submission_filename,
               tasks=("SVMR", ),
               max_after_nms=100):
    """max_after_nms: always set to 100, since the eval script only evaluate top-100"""
    model.eval()
    logger.info("Computing scores")
    st_time = time.time()
    eval_submission_raw = get_eval_res(model, eval_dataset, opt, tasks)
    total_time = time.time() - st_time
    print("\n" + "\x1b[1;31m" + str(total_time) + "\x1b[0m", flush=True)

    IOU_THDS = (0.5, 0.7)  # (0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
    logger.info("Saving/Evaluating before nms results")
    submission_path = os.path.join(opt.results_dir, save_submission_filename)
    eval_submission = get_submission_top_n(eval_submission_raw,
                                           top_n=max_after_nms)
    save_json(eval_submission, submission_path)

    if opt.eval_split_name == "val":  # since test_public has no GT
        metrics = eval_retrieval(eval_submission,
                                 eval_dataset.query_data,
                                 iou_thds=IOU_THDS,
                                 match_number=not opt.debug,
                                 verbose=opt.debug,
                                 use_desc_type=opt.dset_name == "tvr")
        save_metrics_path = submission_path.replace(".json", "_metrics.json")
        save_json(metrics,
                  save_metrics_path,
                  save_pretty=True,
                  sort_keys=False)
        latest_file_paths = [submission_path, save_metrics_path]
    else:
        metrics = None
        latest_file_paths = [
            submission_path,
        ]

    if opt.nms_thd != -1:
        logger.info("Performing nms with nms_thd {}".format(opt.nms_thd))
        eval_submission_after_nms = dict(
            video2idx=eval_submission_raw["video2idx"])
        for k, nms_func in POST_PROCESSING_MMS_FUNC.items():
            if k in eval_submission_raw:
                eval_submission_after_nms[k] = nms_func(
                    eval_submission_raw[k],
                    nms_thd=opt.nms_thd,
                    max_before_nms=opt.max_before_nms,
                    max_after_nms=max_after_nms)
        logger.info("Saving/Evaluating nms results")
        submission_nms_path = submission_path.replace(
            ".json", "_nms_thd_{}.json".format(opt.nms_thd))
        save_json(eval_submission_after_nms, submission_nms_path)
        if opt.eval_split_name == "val":
            metrics_nms = eval_retrieval(eval_submission_after_nms,
                                         eval_dataset.query_data,
                                         iou_thds=IOU_THDS,
                                         match_number=not opt.debug,
                                         verbose=opt.debug)
            save_metrics_nms_path = submission_nms_path.replace(
                ".json", "_metrics.json")
            save_json(metrics_nms,
                      save_metrics_nms_path,
                      save_pretty=True,
                      sort_keys=False)
            latest_file_paths += [submission_nms_path, save_metrics_nms_path]
        else:
            metrics_nms = None
            latest_file_paths = [
                submission_nms_path,
            ]
    else:
        metrics_nms = None
    return metrics, metrics_nms, latest_file_paths
Exemple #4
0
def eval_epoch(model,
               eval_dataset,
               opt,
               save_submission_filename,
               tasks=("SVMR", ),
               max_before_nms=1000,
               max_after_nms=100):
    model.eval()
    logger.info("Computing scores")
    logger.info("Start timing")
    # times = []  # do not use
    # for _ in range(3):
    #     st_time = time.time()
    if opt.use_intermediate:
        intermediate_cache_path = os.path.join(
            opt.results_dir, "{}_eval_res.pt".format(opt.eval_split_name))
        if not os.path.exists(intermediate_cache_path):
            logger.info("Saving intermediate results {}.".format(
                intermediate_cache_path))
            eval_res = compute_query_proposal_distance(model,
                                                       eval_dataset,
                                                       opt,
                                                       tasks=tasks)
            torch.save(eval_res, intermediate_cache_path)
        else:
            logger.info("Loading intermediate results {}.".format(
                intermediate_cache_path))
            eval_res = torch.load(intermediate_cache_path)
    else:
        logger.info(
            "Running without saving intermediate results, you might want to turn on --use_intermediate."
        )
        eval_res = compute_query_proposal_distance(model,
                                                   eval_dataset,
                                                   opt,
                                                   tasks=tasks)
    # del model  # We dont need model anymore

    # eval_res = compute_query_proposal_distance(model, eval_dataset, opt, tasks=tasks)

    logger.info("Generating predictions from scores")
    eval_submission_raw = dict(video2idx=eval_res["video2idx"])
    if "SVMR" in tasks:
        eval_submission_raw["SVMR"] = generate_svmr_predictions_from_res(
            eval_res, max_prop_per_query=max_before_nms)
    # vcmr_loading_time = 0
    if "VCMR" in tasks:
        if opt.external_inference_vr_res_path is not None:
            logger.info("Using external VR results from {}".format(
                opt.external_inference_vr_res_path))
            # vcmr_loading_time = time.time()
            eval_res["external_query2video"] = load_external_vr_res(
                opt.external_inference_vr_res_path, top_n_vr_videos=5)
            # vcmr_loading_time = time.time() - vcmr_loading_time
            vcmr_res, vr_res = generate_vcmr_predictions_from_res_with_external(
                eval_res, max_prop_per_query=max_before_nms)
        else:
            vcmr_res, vr_res = generate_vcmr_predictions_from_res(
                eval_res, max_prop_per_query=max_before_nms)
        eval_submission_raw["VCMR"] = vcmr_res
        eval_submission_raw["VR"] = vr_res
        # times += [time.time() - st_time - vcmr_loading_time]
    # times = torch.FloatTensor(times)
    IOU_THDS = (0.5, 0.7)

    logger.info("Saving/Evaluating before nms results")
    submission_path = os.path.join(opt.results_dir, save_submission_filename)
    eval_submission = get_submission_top_n(eval_submission_raw,
                                           top_n=max_after_nms)
    if max_after_nms < 1000:
        save_json(eval_submission, submission_path)
    else:
        torch.save(eval_submission, submission_path.replace(".json", ".pt"))

    metrics = eval_retrieval(eval_submission,
                             eval_dataset.query_data,
                             iou_thds=IOU_THDS,
                             match_number=not opt.debug,
                             verbose=opt.debug,
                             use_desc_type=opt.dset_name == "tvr")
    # metrics["time_avg"] = float(times.mean())
    # metrics["time_std"] = float(times.std())
    save_metrics_path = submission_path.replace(".json", "_metrics.json")
    save_json(metrics, save_metrics_path, save_pretty=True, sort_keys=False)
    latest_file_paths = [submission_path, save_metrics_path]

    if opt.nms_thd != -1:
        logger.info("Performing nms with nms_thd {}".format(opt.nms_thd))
        eval_submission_after_nms = dict(
            video2idx=eval_submission_raw["video2idx"])
        for k, nms_func in POST_PROCESSING_MMS_FUNC.items():
            if k in eval_submission_raw:
                eval_submission_after_nms[k] = nms_func(
                    eval_submission_raw[k],
                    nms_thd=opt.nms_thd,
                    max_before_nms=max_before_nms,
                    max_after_nms=max_after_nms)

        logger.info("Saving/Evaluating nms results")
        submission_nms_path = submission_path.replace(
            ".json", "_nms_thd_{}.json".format(opt.nms_thd))
        save_json(eval_submission_after_nms, submission_nms_path)
        metrics_nms = eval_retrieval(eval_submission_after_nms,
                                     eval_dataset.query_data,
                                     iou_thds=IOU_THDS,
                                     match_number=not opt.debug,
                                     verbose=opt.debug)
        save_metrics_nms_path = submission_nms_path.replace(
            ".json", "_metrics.json")
        save_json(metrics_nms,
                  save_metrics_nms_path,
                  save_pretty=True,
                  sort_keys=False)
        latest_file_paths += [submission_nms_path, save_metrics_nms_path]
    else:
        metrics_nms = None
    return metrics, metrics_nms, latest_file_paths