def validate_json(json_path, datasets, logger):
    # check if json file exist
    if not os.path.isfile(json_path):
        logger.add_new_log('Submission does not contain json file')
        return
    # load json
    try:
        method_list = load_json(json_path)
    except:
        logger.add_new_log(
            'Following error occurs when loading json : \n   {}'.format(
                sys.exc_info()))
        return

    # validate json
    if not type(method_list) is list:
        logger.add_new_log(
            'Json should contain a list of method, please refer to the example json file.'
        )
        return

    for i, method in enumerate(method_list):
        print('Validating method {}/{}: "{}"'.format(
            i + 1, len(method_list), method['config_common']['json_label']))
        try:
            validate_method(method, is_challenge=True, datasets=datasets)
        except:
            logger.add_new_log(
                'Following error occurs when validating json : \n   {}'.format(
                    sys.exc_info()))
def main(cfg):
    ''' Main routine for the benchmark '''

    # Read data and splits
    for dataset in ['phototourism']:
        for subset in ['val', 'test']:
            setattr(cfg, 'scenes_{}_{}'.format(dataset, subset),
                    './json/data/{}_{}.json'.format(dataset, subset))
            setattr(cfg, 'splits_{}_{}'.format(dataset, subset),
                    './json/bag_size/{}_{}.json'.format(dataset, subset))

    # Read the list of methods and datasets
    method_list = load_json(cfg.json_method)
    for i, method in enumerate(method_list):
        print('Validating method {}/{}: "{}"'.format(
            i + 1, len(method_list), method['config_common']['json_label']))
        validate_method(method, is_challenge=cfg.is_challenge)

    # Back up original config
    cfg_orig = deepcopy(cfg)
    job_dict = {}

    # Loop over methods, datasets/scenes, and tasks
    for method in method_list:
        # accumulate packing dependencies over datasets and runs
        all_stereo_jobs = []
        all_multiview_jobs = []
        all_relocalization_jobs = []

        for dataset in ['phototourism']:
            # Load data config
            scene_list = load_json(
                getattr(cfg_orig,
                        'scenes_{}_{}'.format(dataset, cfg_orig.subset)))
            bag_size_json = load_json(
                getattr(cfg_orig,
                        'splits_{}_{}'.format(dataset, cfg_orig.subset)))
            bag_size_list = [b['bag_size'] for b in bag_size_json]
            bag_size_num = [b['num_in_bag'] for b in bag_size_json]

            for scene in scene_list:
                print('Working on {}: {}/{}'.format(
                    method['config_common']['json_label'], dataset, scene))

                # For each task
                for task in ['stereo', 'multiview', 'relocalization']:
                    # Skip if the key does not exist or it is empty
                    cur_key = 'config_{}_{}'.format(dataset, task)
                    if cur_key not in method or not method[cur_key]:
                        print(
                            'Empty config for "{}", skipping!'.format(cur_key))
                        continue

                    # Append method to config
                    cfg = deepcopy(cfg_orig)
                    cfg.method_dict = deepcopy(method)
                    cfg.dataset = dataset
                    cfg.task = task
                    cfg.scene = scene

                    # Features
                    feature_jobs = create_eval_jobs([], 'feature', cfg,
                                                    job_dict)

                    # Matches
                    match_jobs = create_eval_jobs(feature_jobs, 'match', cfg,
                                                  job_dict)

                    # Filter
                    match_inlier_jobs = create_eval_jobs(
                        match_jobs, 'filter', cfg, job_dict)

                    # Empty dependencies
                    stereo_jobs = []
                    multiview_jobs = []
                    relocalization_jobs = []

                    num_runs = getattr(
                        cfg, 'num_runs_{}_{}'.format(cfg.subset, task))
                    for run in range(num_runs):
                        cfg.run = run

                        # Pose estimation and stereo evaluation
                        if task == 'stereo' and cfg.eval_stereo:
                            geom_model_jobs = create_eval_jobs(
                                match_inlier_jobs, 'model', cfg, job_dict)
                            stereo_jobs += create_eval_jobs(
                                geom_model_jobs, 'stereo', cfg, job_dict)
                            all_stereo_jobs += stereo_jobs

                        # Visualization for stereo
                        if task == 'stereo' and cfg.run_viz:
                            eval_viz_stereo(stereo_jobs, cfg)

                        # Multiview
                        if task == 'multiview' and cfg.eval_multiview:
                            multiview_jobs += eval_multiview(
                                match_inlier_jobs, cfg, bag_size_list,
                                bag_size_num)
                            all_multiview_jobs += multiview_jobs

                        # Visualization for colmap
                        if task == 'multiview' and cfg.run_viz:
                            eval_viz_colmap(multiview_jobs, cfg)

                        if task == 'relocalization' and cfg.eval_relocalization:
                            raise NotImplementedError(
                                'TODO relocalization task')

        # Packing -- can be skipped with --skip_packing=True
        # For instance, when only generating visualizations
        if not cfg.skip_packing:
            cfg = deepcopy(cfg_orig)
            cfg.method_dict = deepcopy(method)
            eval_packing(
                all_stereo_jobs + all_multiview_jobs + all_relocalization_jobs,
                cfg)