def multi_gpu_test_net_on_dataset(num_images, output_dir):
    binary = os.path.join('tools/test_net.py')
    assert os.path.exists(binary), 'Binary {} not found'.format(binary)

    # Run inference in parallel in subprocesses
    outputs = subprocess_utils.process_in_parallel('detection', num_images,
                                                   binary, output_dir)

    # Collate the results from each subprocess
    all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    for det_data in outputs:
        all_boxes_batch = det_data['all_boxes']
        all_segms_batch = det_data['all_segms']
        all_keyps_batch = det_data['all_keyps']
        for j in range(1, cfg.MODEL.NUM_CLASSES):
            all_boxes[j] += all_boxes_batch[j]
            all_segms[j] += all_segms_batch[j]
            all_keyps[j] += all_keyps_batch[j]
    det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    robust_pickle_dump(
        dict(all_boxes=all_boxes,
             all_segms=all_segms,
             all_keyps=all_keyps,
             cfg=cfg_yaml), det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_boxes, all_segms, all_keyps
def multi_gpu_test_net_on_dataset(num_images, output_dir):
    binary = os.path.join('tools/test_net.py')
    assert os.path.exists(binary), 'Binary {} not found'.format(binary)

    # Run inference in parallel in subprocesses
    outputs = subprocess_utils.process_in_parallel(
        'detection', num_images, binary, output_dir)

    # Collate the results from each subprocess
    all_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_segms = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    all_keyps = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
    for det_data in outputs:
        all_boxes_batch = det_data['all_boxes']
        all_segms_batch = det_data['all_segms']
        all_keyps_batch = det_data['all_keyps']
        for j in range(1, cfg.MODEL.NUM_CLASSES):
            all_boxes[j] += all_boxes_batch[j]
            all_segms[j] += all_segms_batch[j]
            all_keyps[j] += all_keyps_batch[j]
    det_file = os.path.join(output_dir, 'detections.pkl')
    cfg_yaml = yaml.dump(cfg)
    robust_pickle_dump(
        dict(all_boxes=all_boxes,
             all_segms=all_segms,
             all_keyps=all_keyps,
             cfg=cfg_yaml),
        det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))

    return all_boxes, all_segms, all_keyps
Exemple #3
0
def main(name_scope, gpu_dev, num_images, args):
    t=args.t
    model = initialize_model_from_cfg()
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    temp_frame_folder = osp.join(args.out_path,args.vid_name + '_frames/',str(t))
    imgs = glob.glob(temp_frame_folder+'/*.jpg')
    for i in range(len(imgs)):
        if i%100==0:
          print('Processing Detection for Frame %d'%(i+1))
        im_ = cv2.imread(imgs[i])
        assert im_ is not None
        im_ = np.expand_dims(im_, 0)
        with core.NameScope(name_scope):
            with core.DeviceScope(gpu_dev):
                cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                    model, im_, None)                                        #TODO: Parallelize detection

        extend_results(i, all_boxes, cls_boxes_i)
        if cls_segms_i is not None:
            extend_results(i, all_segms, cls_segms_i)
        if cls_keyps_i is not None:
            extend_results(i, all_keyps, cls_keyps_i)

    det_name = args.vid_name + '_' + str(args.t) + '_detections.pkl'
    det_file = osp.join(args.out_path, det_name)
    robust_pickle_dump(dict(all_keyps=all_keyps),det_file)
    shutil.rmtree(osp.join(args.out_path, args.vid_name + '_frames'))
Exemple #4
0
def generate_rpn_on_range(ind_range=None):
    assert cfg.TEST.WEIGHTS != '', \
        'TEST.WEIGHTS must be set to the model file to test'
    assert cfg.TEST.DATASET != '', \
        'TEST.DATASET must be set to the dataset name to test'
    assert cfg.MODEL.RPN_ONLY or cfg.MODEL.FASTER_RCNN

    im_list, start_ind, end_ind, total_num_images = get_image_list(ind_range)
    output_dir = get_output_dir(training=False)
    logger.info(
        'Output will be saved to: {:s}'.format(os.path.abspath(output_dir)))

    model = model_builder.create(cfg.MODEL.TYPE, train=False)
    model_builder.add_inputs(model)
    nu.initialize_from_weights_file(model, cfg.TEST.WEIGHTS)
    workspace.CreateNet(model.net)

    boxes, scores, ids = im_list_proposals(
        model,
        im_list,
        start_ind=start_ind,
        end_ind=end_ind,
        total_num_images=total_num_images)

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        rpn_name = 'rpn_proposals_range_%s_%s.pkl' % tuple(ind_range)
    else:
        rpn_name = 'rpn_proposals.pkl'
    rpn_file = os.path.join(output_dir, rpn_name)
    robust_pickle_dump(
        dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file)
    logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
    return boxes, scores, ids, rpn_file
Exemple #5
0
def multi_gpu_generate_rpn_on_dataset(num_images, output_dir):
    # TODO(rbg): Need to have non-FB specific code path for OSS
    if cfg.CLUSTER.ON_CLUSTER:
        binary_dir = os.path.abspath(os.getcwd())
        binary = os.path.join(binary_dir, 'test_net.xar')
    else:
        assert parutil.is_lpar(), 'Binary must be inplace package style'
        binary_dir = os.path.dirname(parutil.get_runtime_path())
        binary = os.path.join(binary_dir, 'test_net.par')
    assert os.path.exists(binary), 'Binary {} not found'.format(binary)

    # Run inference in parallel in subprocesses
    outputs = subprocess_utils.process_in_parallel(
        'rpn_proposals', num_images, binary, output_dir)

    # Collate the results from each subprocess
    boxes, scores, ids = [], [], []
    for rpn_data in outputs:
        boxes += rpn_data['boxes']
        scores += rpn_data['scores']
        ids += rpn_data['ids']
    rpn_file = os.path.join(output_dir, 'rpn_proposals.pkl')
    cfg_yaml = yaml.dump(cfg)
    robust_pickle_dump(
        dict(boxes=boxes, scores=scores, ids=ids, cfg=cfg_yaml), rpn_file)
    logger.info('Wrote RPN proposals to {}'.format(os.path.abspath(rpn_file)))
    return boxes, scores, ids, rpn_file
Exemple #6
0
def _do_detection_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'detection_results.pkl')
    robust_pickle_dump(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
def _do_segmentation_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'segm')
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'segmentation_results.pkl')
    robust_pickle_dump(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
Exemple #8
0
def _do_keypoint_eval(json_dataset, res_file, output_dir):
    ann_type = 'keypoints'
    imgIds = json_dataset.COCO.getImgIds()
    imgIds.sort()
    coco_dt = json_dataset.COCO.loadRes(res_file)
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
    coco_eval.params.imgIds = imgIds
    coco_eval.evaluate()
    coco_eval.accumulate()
    eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
    robust_pickle_dump(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    coco_eval.summarize()
def _do_keypoint_eval(json_dataset, res_file, output_dir):
    ann_type = 'keypoints'
    imgIds = json_dataset.COCO.getImgIds()
    imgIds.sort()
    coco_dt = json_dataset.COCO.loadRes(res_file)
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
    coco_eval.params.imgIds = imgIds
    coco_eval.evaluate()
    coco_eval.accumulate()
    eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
    robust_pickle_dump(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    coco_eval.summarize()
Exemple #10
0
def save_model_to_weights_file(weights_file, model):
    """Stash model weights in a dictionary and pickle them to a file. We map
    GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
    'conv1_w').
    """
    logger.info(
        'Saving parameters and momentum to {}'.format(
            os.path.abspath(weights_file)))
    blobs = {}
    # Save all parameters
    for param in model.params:
        scoped_name = str(param)
        unscoped_name = utils.blob.unscope_name(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save momentum
    for param in model.TrainableParams():
        scoped_name = str(param) + '_momentum'
        unscoped_name = utils.blob.unscope_name(scoped_name)
        if unscoped_name not in blobs:
            logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
            blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save preserved blobs
    for scoped_name in workspace.Blobs():
        if scoped_name.startswith('__preserve__/'):
            unscoped_name = utils.blob.unscope_name(scoped_name)
            if unscoped_name not in blobs:
                logger.debug(
                    ' {:s} -> {:s} (preserved)'.format(
                        scoped_name, unscoped_name))
                blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    # Save the _rm/_riv for the models with batch norm
    for scoped_name in workspace.Blobs():
        if scoped_name.endswith('_rm') or scoped_name.endswith('_riv'):
            unscoped_name = utils.blob.unscope_name(scoped_name)
            if unscoped_name not in blobs:
                logger.debug(
                    ' {:s} -> {:s} (preserved)'.format(
                        scoped_name, unscoped_name))
                blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
    cfg_yaml = yaml.dump(cfg)
    robust_pickle_dump(dict(blobs=blobs, cfg=cfg_yaml), weights_file)
Exemple #11
0
def evaluate_proposal_file(dataset, proposal_file, output_dir):
    roidb = dataset.get_roidb(gt=True, proposal_file=proposal_file)
    logger.info('~~~~ Summary metrics ~~~~')
    recall_files = []
    for l in [300, 1000, 2000]:
        print(' res@{:d} proposals / image:'.format(l))
        res = {}
        for a in ['all', 'small', 'medium', 'large']:
            res[a] = json_dataset_evaluator.evaluate_recall(
                dataset, roidb, area=a, limit=l)
            print(' area={:8s} | ar={:.3f}'.format(a, res[a]['ar']))

        # Index 4 is for iou thresh of 0.7
        for a in ['all', 'small', 'medium', 'large']:
            print(
                ' iou=[.7]     | area={:8s} | ar={:.3f}'.
                format(a, res[a]['recalls'][4]))

        recall_file = os.path.join(
            output_dir, 'at{:d}'.format(l) + 'rpn_proposal_recall.pkl')
        robust_pickle_dump(res, recall_file)
        recall_files.append(recall_file)
    logger.info('Evaluating proposals is done!')
    return recall_files
def test_net(ind_range=None):
    assert cfg.TEST.WEIGHTS != '', \
        'TEST.WEIGHTS must be set to the model file to test'
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'
    assert cfg.TEST.DATASET != '', \
        'TEST.DATASET must be set to the dataset name to test'

    output_dir = get_output_dir(training=False)
    roidb, dataset, start_ind, end_ind, total_num_images = \
        get_roidb_and_dataset(ind_range)
    model = initialize_model_from_cfg()
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    timers = defaultdict(Timer)
    gpu_dev = core.DeviceOption(caffe2_pb2.CUDA, cfg.ROOT_GPU_ID)
    name_scope = 'gpu_{}'.format(cfg.ROOT_GPU_ID)
    for i, entry in enumerate(roidb):
        if cfg.MODEL.FASTER_RCNN:
            box_proposals = None
        else:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select only the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = entry['boxes'][entry['gt_classes'] == 0]
            if len(box_proposals) == 0:
                continue

        im = image_utils.read_image_video(entry)
        with core.NameScope(name_scope):
            with core.DeviceScope(gpu_dev):
                cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                    model, im, box_proposals, timers)

        extend_results(i, all_boxes, cls_boxes_i)
        if cls_segms_i is not None:
            extend_results(i, all_segms, cls_segms_i)
        if cls_keyps_i is not None:
            extend_results(i, all_keyps, cls_keyps_i)

        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (timers['im_detect_bbox'].average_time +
                        timers['im_detect_mask'].average_time +
                        timers['im_detect_keypoints'].average_time)
            misc_time = (timers['misc_bbox'].average_time +
                         timers['misc_mask'].average_time +
                         timers['misc_keypoints'].average_time)
            logger.info(('im_detect: range [{:d}, {:d}] of {:d}: '
                         '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})').format(
                             start_ind + 1, end_ind, total_num_images,
                             start_ind + i + 1, start_ind + num_images,
                             det_time, misc_time, eta))

        if cfg.VIS:
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            vis_utils.vis_one_image(im[:, :, ::-1],
                                    '{:d}_{:s}'.format(i, im_name),
                                    os.path.join(output_dir, 'vis'),
                                    cls_boxes_i,
                                    segms=cls_segms_i,
                                    keypoints=cls_keyps_i,
                                    thresh=cfg.VIS_THR,
                                    box_alpha=0.8,
                                    dataset=dataset,
                                    show_class=True)

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    robust_pickle_dump(
        dict(all_boxes=all_boxes,
             all_segms=all_segms,
             all_keyps=all_keyps,
             cfg=cfg_yaml), det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps
def test_net(ind_range=None):
    assert cfg.TEST.WEIGHTS != '', \
        'TEST.WEIGHTS must be set to the model file to test'
    assert not cfg.MODEL.RPN_ONLY, \
        'Use rpn_generate to generate proposals from RPN-only models'
    assert cfg.TEST.DATASET != '', \
        'TEST.DATASET must be set to the dataset name to test'

    output_dir = get_output_dir(training=False)
    roidb, dataset, start_ind, end_ind, total_num_images = \
        get_roidb_and_dataset(ind_range)
    model = initialize_model_from_cfg()
    num_images = len(roidb)
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)
    timers = defaultdict(Timer)
    gpu_dev = core.DeviceOption(caffe2_pb2.CUDA, cfg.ROOT_GPU_ID)
    name_scope = 'gpu_{}'.format(cfg.ROOT_GPU_ID)
    for i, entry in enumerate(roidb):
        if cfg.MODEL.FASTER_RCNN:
            box_proposals = None
        else:
            # The roidb may contain ground-truth rois (for example, if the roidb
            # comes from the training or val split). We only want to evaluate
            # detection on the *non*-ground-truth rois. We select only the rois
            # that have the gt_classes field set to 0, which means there's no
            # ground truth.
            box_proposals = entry['boxes'][entry['gt_classes'] == 0]
            if len(box_proposals) == 0:
                continue

        im = image_utils.read_image_video(entry)
        with core.NameScope(name_scope):
            with core.DeviceScope(gpu_dev):
                cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                    model, im, box_proposals, timers)

        extend_results(i, all_boxes, cls_boxes_i)
        if cls_segms_i is not None:
            extend_results(i, all_segms, cls_segms_i)
        if cls_keyps_i is not None:
            extend_results(i, all_keyps, cls_keyps_i)

        if i % 10 == 0:  # Reduce log file size
            ave_total_time = np.sum([t.average_time for t in timers.values()])
            eta_seconds = ave_total_time * (num_images - i - 1)
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            det_time = (timers['im_detect_bbox'].average_time +
                        timers['im_detect_mask'].average_time +
                        timers['im_detect_keypoints'].average_time)
            misc_time = (timers['misc_bbox'].average_time +
                         timers['misc_mask'].average_time +
                         timers['misc_keypoints'].average_time)
            logger.info(
                ('im_detect: range [{:d}, {:d}] of {:d}: '
                 '{:d}/{:d} {:.3f}s + {:.3f}s (eta: {})').format(
                    start_ind + 1, end_ind, total_num_images,
                    start_ind + i + 1, start_ind + num_images,
                    det_time, misc_time, eta))

        if cfg.VIS:
            im_name = os.path.splitext(os.path.basename(entry['image']))[0]
            vis_utils.vis_one_image(
                im[:, :, ::-1], '{:d}_{:s}'.format(i, im_name),
                os.path.join(output_dir, 'vis'), cls_boxes_i,
                segms=cls_segms_i, keypoints=cls_keyps_i,
                thresh=cfg.VIS_THR,
                box_alpha=0.8, dataset=dataset, show_class=True)

    cfg_yaml = yaml.dump(cfg)
    if ind_range is not None:
        det_name = 'detection_range_%s_%s.pkl' % tuple(ind_range)
    else:
        det_name = 'detections.pkl'
    det_file = os.path.join(output_dir, det_name)
    robust_pickle_dump(
        dict(all_boxes=all_boxes,
             all_segms=all_segms,
             all_keyps=all_keyps,
             cfg=cfg_yaml),
        det_file)
    logger.info('Wrote detections to: {}'.format(os.path.abspath(det_file)))
    return all_boxes, all_segms, all_keyps
Exemple #14
0
def main(name_scope, gpu_dev, num_images, args):

    model = initialize_model_from_cfg()
    num_classes = cfg.MODEL.NUM_CLASSES
    all_boxes, all_segms, all_keyps = empty_results(num_classes, num_images)

    if '2d_best' in args.cfg_file:
        for i in range(num_images):
            print('Processing Detection for Frame %d'%(i+1))
            im_ = _read_video_frames(args.out_path, args.vid_name, i)
            im_ = np.expand_dims(im_, 0)
            with core.NameScope(name_scope):
                with core.DeviceScope(gpu_dev):
                    cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                        model, im_, None)                                        #TODO: Parallelize detection

            extend_results(i, all_boxes, cls_boxes_i)
            if cls_segms_i is not None:
                extend_results(i, all_segms, cls_segms_i)
            if cls_keyps_i is not None:
                extend_results(i, all_keyps, cls_keyps_i)
    elif '3d' in args.cfg_file:
        for i in range(num_images-2):
            print('Processing Detection for Frame %d to Frame %d' % (i + 1, i + 2))
            ims_ = _read_video_3frames(args.out_path, args.vid_name, i)
            # ims_ = np.expand_dims(ims_, 0)
            with core.NameScope(name_scope):
                with core.DeviceScope(gpu_dev):
                    cls_boxes_i, cls_segms_i, cls_keyps_i = im_detect_all(
                        model, ims_, None)     

            # extend boxes for 3 frames
            tmp_boxes_i2 = deepcopy(cls_boxes_i)
            tmp_boxes_i2[1] = tmp_boxes_i2[1][:, 8:]
            extend_results(i+2, all_boxes, tmp_boxes_i2)
            tmp_boxes_i1 = deepcopy(cls_boxes_i)
            tmp_boxes_i1[1] = tmp_boxes_i1[1][:, [4, 5, 6, 7, -1]]
            extend_results(i+1, all_boxes, tmp_boxes_i1)
            tmp_boxes_i0 = deepcopy(cls_boxes_i)
            tmp_boxes_i0[1] = tmp_boxes_i0[1][:, [0, 1, 2, 3, -1]]
            extend_results(i, all_boxes, tmp_boxes_i0)
            # extend segms for 3 frames
            if cls_segms_i is not None:
                extend_results(i+2, all_segms, cls_segms_i)
            # extend keyps for 3 frames
            if cls_keyps_i is not None:
                # extend the i+2 th one
                tmp_keyps_i2 = deepcopy(cls_keyps_i)
                for idx in range(len(tmp_keyps_i2[1])):
                    tmp_keyps_i2[1][idx] = tmp_keyps_i2[1][idx][:, 34:]
                extend_results(i+2, all_keyps, tmp_keyps_i2)
                # extend the i+1 th one
                tmp_keyps_i1 = deepcopy(cls_keyps_i)
                for idx in range(len(tmp_keyps_i1[1])):
                    tmp_keyps_i1[1][idx] = tmp_keyps_i1[1][idx][:, 17:34]
                extend_results(i + 1, all_keyps, tmp_keyps_i1)
                # extend the i th one
                tmp_keyps_i0 = deepcopy(cls_keyps_i)
                for idx in range(len(tmp_keyps_i0[1])):
                    tmp_keyps_i0[1][idx] = tmp_keyps_i0[1][idx][:, :17]
                extend_results(i, all_keyps, tmp_keyps_i0)


    cfg_yaml = yaml.dump(cfg)

    det_name = args.vid_name + '_detections.pkl'
    det_file = osp.join(args.out_path, det_name)
    robust_pickle_dump(
        dict(all_boxes=all_boxes,
             all_segms=all_segms,
             all_keyps=all_keyps,
             cfg=cfg_yaml),
        det_file)

    frames = sorted(glob.glob(osp.join(args.out_path,args.vid_name + '_frames','*.jpg')))

    out_detrack_file = osp.join(args.out_path, args.vid_name + '_detections_withTracks.pkl')

    # Debug configurations
    if cfg.TRACKING.DEBUG.UPPER_BOUND_2_GT_KPS:  # if this is true
        cfg.TRACKING.DEBUG.UPPER_BOUND = True  # This must be set true

    # Set include_gt True when using the roidb to evalute directly. Not doing
    # that currently
    dets = _load_det_file(det_file)
    if cfg.TRACKING.KEEP_CENTER_DETS_ONLY:
        _center_detections(dets)

    conf = cfg.TRACKING.CONF_FILTER_INITIAL_DETS
    dets = _prune_bad_detections(dets, conf)
    if cfg.TRACKING.LSTM_TEST.LSTM_TRACKING_ON:
        # Needs torch, only importing if we need to run LSTM tracking
        from lstm.lstm_track import lstm_track_utils
        lstm_model = lstm_track_utils.init_lstm_model(
            cfg.TRACKING.LSTM_TEST.LSTM_WEIGHTS)
        lstm_model.cuda()
    else:
        lstm_model = None

    dets_withTracks = compute_matches_tracks(frames, dets, lstm_model)
    _write_det_file(dets_withTracks, out_detrack_file)

    for i in range(num_images):
        vis_im = _generate_visualizations(frames[i], i, dets['all_boxes'], dets['all_keyps'], dets['all_tracks'])
        cv2.imwrite(osp.join(args.out_path, args.vid_name + '_vis','%08d.jpg'%(i+1)),vis_im)