Exemple #1
0
 def __init__(self, name):
     assert dataset_catalog.contains(name), \
         'Unknown dataset name: {}'.format(name)
     assert os.path.exists(dataset_catalog.get_im_dir(name)), \
         'Im dir \'{}\' not found'.format(dataset_catalog.get_im_dir(name))
     assert os.path.exists(dataset_catalog.get_ann_fn(name)), \
         'Ann fn \'{}\' not found'.format(dataset_catalog.get_ann_fn(name))
     logger.debug('Creating: {}'.format(name))
     self.name = name
     self.image_directory = dataset_catalog.get_im_dir(name)
     self.image_prefix = dataset_catalog.get_im_prefix(name)
     self.COCO = COCO(dataset_catalog.get_ann_fn(name))
     self.debug_timer = Timer()
     # Set up dataset classes
     category_ids = self.COCO.getCatIds()
     categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
     self.category_to_id_map = dict(zip(categories, category_ids))
     self.classes = ['__background__'] + categories
     self.num_classes = len(self.classes)
     self.json_category_id_to_contiguous_id = {
         v: i + 1
         for i, v in enumerate(self.COCO.getCatIds())
     }
     self.contiguous_category_id_to_json_id = {
         v: k
         for k, v in self.json_category_id_to_contiguous_id.items()
     }
     self._init_keypoints()
Exemple #2
0
 def __init__(self, name):
     assert dataset_catalog.contains(name), \
         'Unknown dataset name: {}'.format(name)
     assert os.path.exists(dataset_catalog.get_im_dir(name)), \
         'Im dir \'{}\' not found'.format(dataset_catalog.get_im_dir(name))
     assert os.path.exists(dataset_catalog.get_ann_fn(name)), \
         'Ann fn \'{}\' not found'.format(dataset_catalog.get_ann_fn(name))
     logger.debug('Creating: {}'.format(name))
     self.name = name
     self.image_directory = dataset_catalog.get_im_dir(name)
     self.image_prefix = dataset_catalog.get_im_prefix(name)
     self.COCO = COCO(dataset_catalog.get_ann_fn(name))
     self.debug_timer = Timer()
     # Set up dataset classes
     category_ids = self.COCO.getCatIds()
     categories = [c['name'] for c in self.COCO.loadCats(category_ids)]
     self.category_to_id_map = dict(zip(categories, category_ids))
     self.classes = ['__background__'] + categories
     self.num_classes = len(self.classes)
     self.json_category_id_to_contiguous_id = {
         v: i + 1
         for i, v in enumerate(self.COCO.getCatIds())
     }
     self.contiguous_category_id_to_json_id = {
         v: k
         for k, v in self.json_category_id_to_contiguous_id.items()
     }
     self._init_keypoints()
def main(args):
    logger = logging.getLogger(__name__)
      
    merge_cfg_from_file(args.cfg)
    cfg.NUM_GPUS = 1
    if "mot-classes" in args.opts:
        dummy_dataset = dummy_datasets.get_mot_dataset()
        cfg.NUM_CLASSES = 14
    else:
        dummy_dataset = dummy_datasets.get_coco_dataset()
        cfg.NUM_CLASSES = 81
    for i, weights_file in enumerate(args.weights_pre_list):
        args.weights_pre_list[i] = cache_url(weights_file, cfg.DOWNLOAD_CACHE)
    for i, weights_file in enumerate(args.weights_post_list):
        args.weights_post_list[i] = cache_url(weights_file, cfg.DOWNLOAD_CACHE)
    assert_and_infer_cfg(cache_urls=False)
    logger.info('Testing with config:')
    logger.info(pprint.pformat(cfg))

    # If True: Evaluation with respect to specified parameters (model from
    # specifig training iterations or inference hyper-parameters)
    # If False: Infer test sequence for evaluation on the MOT benchmark server
    EVAL = "eval" in args.opts 

    train_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
    train_dir_split = train_dir.split("/")
    train_dir = os.path.join("/".join(train_dir_split[:-1]) + \
        args.model_suffix, train_dir_split[-1])

    model_list = []
    files = os.listdir(train_dir)

    if EVAL:
        test_dir = "/".join(get_output_dir(cfg.TEST.DATASETS,
            training=False).split("/")[:-1]) + args.model_suffix
        # Evaluation with respect to inference hyper-parameters
        if HYPER_PARAM is not None:
            test_dir = os.path.join(test_dir, HYPER_PARAM.lower())
            model_param = ((args.model, param) for param in PARAM_RANGE)
        # Evaluation with respect to weights from specific training iterations
        else:
            model_param = []
            for f in files:
                if f.startswith("model_") and f.endswith(".pkl"):
                    iter_string = re.findall(r'(?<=model_iter)\d+(?=\.pkl)', f)
                    if len(iter_string) > 0:
                        model_param.append((f, int(iter_string[0])))
            model_param.sort(key=lambda tup: tup[1])
            if "model_final.pkl" in files:
                model_param.append(("model_final.pkl", "final"))
        # Tracking evaluation by interface to matlab engine
        seq_map_path = os.path.join(test_dir, "seq_map.txt")
        if not os.path.exists(test_dir):
            os.makedirs(test_dir)
        with open(seq_map_path, "w+") as seq_map:
            seq_map.write("name\n")
            for dataset in cfg.TEST.DATASETS:
                seq_map.write(get_im_dir(dataset).split("/")[-2] + '\n')
        seq_map_path = os.path.relpath(os.path.abspath(seq_map_path),
            os.path.expanduser(os.path.join(args.devkit_path, "seqmaps")))
        matlab_eng = get_matlab_engine(args.devkit_path)
        eval_datections = lambda res_dir, gt_dir: eval_detections_matlab(
            matlab_eng, seq_map_path, res_dir, gt_dir, 'MOT17')
    else:
        if args.model is not None:
            model_param = ((args.model, None),)
        else:
            model_param = (("model_final.pkl", "final"),)

    # Iterate through (model, parameter) tuples
    for i, (model, param) in enumerate(model_param):
        if EVAL and (i + 1 + args.offset) % (args.skip + 1) != 0:
            logger.info("Skipping {}".format(model))
            continue
        # Hyper parameter inference
        elif HYPER_PARAM is not None:
                cfg.immutable(False)
                setattr(cfg.TRCNN, HYPER_PARAM, param)
                assert_and_infer_cfg(cache_urls=False)
                print(cfg.TRCNN)
        if not EVAL or param >= args.start_at:
            weights_list = args.weights_pre_list + [os.path.join(train_dir, model)] + \
                args.weights_post_list
            preffix_list = args.preffix_list if len(args.preffix_list) \
                else [""] * (len(args.weights_pre_list) + len(args.weights_post_list) + 1)
            workspace.ResetWorkspace()
            model = infer_engine.initialize_mixed_model_from_cfg(weights_list,
                preffix_list=preffix_list)
            logger.info("Processing {}".format(param))
            timing = []
            # iterate through test sequences
            for dataset in cfg.TEST.DATASETS:
                tracking = Tracking(args.thresh, cfg.TRCNN.MAX_BACK_TRACK)
                logger.info("Processing dataset {}".format(dataset))
                im_dir = get_im_dir(dataset)
                vis = None
                if EVAL:
                    output_file = os.path.join(test_dir, str(param),
                        im_dir.split("/")[-2] + ".txt")
                # Visualize detections along with tracking detection file creation
                else:
                    output_dir = os.path.join("outputs/MOT17", im_dir.split("/")[-2])
                    if "vis" in args.opts:
                        vis = {
                            "output-dir": output_dir,
                            "dummy-dataset": dummy_dataset,
                            "show-class": "show-class" in args.opts,
                            "show-track": "show-track" in args.opts,
                            "thresh": args.thresh,
                            "track-thresh": cfg.TRCNN.DETECTION_THRESH,
                            "n-colors": 15,
                        }
                    output_file = os.path.join("outputs/MOT17",
                        im_dir.split("/")[-2] + ".txt")
                # Use custom proposals if provided
                if "proposals" in args.opts:
                    proposals = pickle.load(open(os.path.join('/', *(im_dir.split("/")[:-1] + \
                        ["det/proposals.pkl"])), 'r'))
                else:
                    proposals = None
                head, tail = os.path.split(output_file) 
                if not os.path.exists(head):
                    os.makedirs(head)
                start = time.time()
                # Run inference
                infer_track_sequence(model, im_dir, tracking, proposals=proposals,
                    vis=vis, det_file=output_file)
                delta = time.time() - start
                freq = float(len(os.listdir(im_dir))) / delta
                timing.append(freq)

            # Save evaluation results
            if EVAL:
                val_directory = os.path.abspath(head) + "/"
                eval_datections(val_directory,
                    os.path.abspath(os.path.join(*im_dir.split("/")[:-2])) + "/")
                with open(val_directory + "eval.txt", "r") as f:
                    temp = f.readline().strip()
                with open(val_directory + "eval.txt", "w+") as f:
                    f.write("{},{}".format(temp, np.average(timing)))
Exemple #4
0
                        dest='dataset_name',
                        help='Dataset name according to dataset_catalog',
                        default='nucoco_train')

    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse_args()
    # fig = plt.figure(figsize=(16, 6))

    # Load the nucoco dataset
    dataset_name = args.dataset_name
    ann_file = dataset_catalog.get_ann_fn(dataset_name)
    img_dir = dataset_catalog.get_im_dir(dataset_name)
    coco = COCO_PLUS(ann_file, img_dir)

    # Load the proposals
    proposals = rrpn_loader(args.proposals_file)

    for i in range(1, len(coco.dataset['images']), 10):
        fig = plt.figure(figsize=(16, 6))
        img_id = coco.dataset['images'][i]['id']
        scores = proposals[img_id]['scores']
        boxes = proposals[img_id]['boxes']
        points = coco.imgToPointcloud[img_id]['points']

        img_path = os.path.join(img_dir, coco.imgs[img_id]["file_name"])
        # print(img_path)