Esempio n. 1
0
def my_main(tag, cfg_file, set_cfgs, imdb_name, _config):

    # Already set everything here, so the path can be determined correctly
    if cfg_file:
        cfg_from_file(cfg_file)
    if set_cfgs:
        cfg_from_list(set_cfgs)

    print('Called with args:')
    print(_config)

    # if not already present save the configuration into a file in the output folder
    outdir = osp.abspath(
        osp.join(cfg.ROOT_DIR, 'output', 'frcnn', cfg.EXP_DIR, imdb_name, tag))
    sacred_config = osp.join(outdir, 'sacred_config.yaml')
    if not osp.isfile(sacred_config):
        if not os.path.exists(outdir):
            os.makedirs(outdir)
        with open(sacred_config, 'w') as outfile:
            yaml.dump({'frcnn': _config}, outfile, default_flow_style=False)

    frcnn_trainval()
Esempio n. 2
0
def my_main(imdbtest_name, clip_bbox, output_name, nms_thresh, frcnn,
            write_images, _config):

    # Clip bboxes after bbox reg to image boundary
    cfg_from_list(
        ['TEST.BBOX_CLIP',
         str(clip_bbox), 'TEST.NMS',
         str(nms_thresh)])
    #cfg_from_list(["APPLY_CLAHE", "True"])

    # Already set everything here, so the path can be determined correctly
    if frcnn['cfg_file']:
        cfg_from_file(frcnn['cfg_file'])
    if frcnn['set_cfgs']:
        cfg_from_list(frcnn['set_cfgs'])

    model_dir = osp.abspath(
        osp.join(cfg.ROOT_DIR, 'output', 'frcnn', cfg.EXP_DIR,
                 frcnn['imdb_name'], frcnn['tag']))
    model = osp.join(
        model_dir, cfg.TRAIN.SNAPSHOT_PREFIX +
        '_iter_{:d}'.format(frcnn['max_iters']) + '.pth')
    # model = osp.join(model_dir, cfg.TRAIN.SNAPSHOT_PREFIX + '_iter_35000' + '.pth')
    if output_name:
        output_dir = osp.join(model_dir, output_name)
    else:
        output_dir = osp.join(model_dir, imdbtest_name)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    print('Called with args:')
    print(_config)

    frcnn_test(model=model,
               output_dir=output_dir,
               network=frcnn['network'],
               write_images=write_images)
Esempio n. 3
0
from __future__ import absolute_import, division, print_function

import os
import os.path as osp

# Change paths in config file
from frcnn.model import config
from frcnn.model.config import cfg, cfg_from_file, cfg_from_list
from frcnn_test import frcnn_test
from sacred import Experiment

this_dir = osp.dirname(__file__)
root_path = osp.join(this_dir, '..', '..')
data_path = osp.join(root_path, 'data')
config.cfg_from_list(['ROOT_DIR', root_path, 'DATA_DIR', data_path])

ex = Experiment()

frcnn_test = ex.capture(frcnn_test)


@ex.config
def default():
    score_thresh = 0.05
    nms_thresh = 0.3
    clip_bbox = False
    max_per_image = 100
    output_name = None
    write_images = False

    # Added so that sacred doesn't throw a key error
Esempio n. 4
0
def my_main(tracktor, siamese, _config):
    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    print("[*] Building object detector")
    if tracktor['network'].startswith('frcnn'):
        # FRCNN
        from tracktor.frcnn import FRCNN
        from frcnn.model import config

        if _config['frcnn']['cfg_file']:
            config.cfg_from_file(_config['frcnn']['cfg_file'])
        if _config['frcnn']['set_cfgs']:
            config.cfg_from_list(_config['frcnn']['set_cfgs'])

        obj_detect = FRCNN(num_layers=101)
        obj_detect.create_architecture(2,
                                       tag='default',
                                       anchor_scales=config.cfg.ANCHOR_SCALES,
                                       anchor_ratios=config.cfg.ANCHOR_RATIOS)
        obj_detect.load_state_dict(torch.load(tracktor['obj_detect_weights']))
    elif tracktor['network'].startswith('fpn'):
        # FPN
        from tracktor.fpn import FPN
        from fpn.model.utils import config
        config.cfg.TRAIN.USE_FLIPPED = False
        config.cfg.CUDA = True
        config.cfg.TRAIN.USE_FLIPPED = False
        checkpoint = torch.load(tracktor['obj_detect_weights'])

        if 'pooling_mode' in checkpoint.keys():
            config.cfg.POOLING_MODE = checkpoint['pooling_mode']

        set_cfgs = [
            'ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]'
        ]
        config.cfg_from_file(_config['tracktor']['obj_detect_config'])
        config.cfg_from_list(set_cfgs)

        obj_detect = FPN(('__background__', 'pedestrian'),
                         101,
                         pretrained=False)
        obj_detect.create_architecture()

        obj_detect.load_state_dict(checkpoint['model'])
    else:
        raise NotImplementedError(
            f"Object detector type not known: {tracktor['network']}")

    pprint.pprint(config.cfg)
    obj_detect.eval()
    obj_detect.cuda()

    # reid
    reid_network = resnet50(pretrained=False, **siamese['cnn'])
    reid_network.load_state_dict(torch.load(tracktor['reid_network_weights']))
    reid_network.eval()
    reid_network.cuda()

    # tracktor
    if 'oracle' in tracktor:
        tracker = OracleTracker(obj_detect, reid_network, tracktor['tracker'],
                                tracktor['oracle'])
    else:
        tracker = Tracker(obj_detect, reid_network, tracktor['tracker'])

    print("[*] Beginning evaluation...")

    time_total = 0
    for sequence in Datasets(tracktor['dataset']):
        tracker.reset()

        now = time.time()

        print("[*] Evaluating: {}".format(sequence))

        data_loader = DataLoader(sequence, batch_size=1, shuffle=False)
        for i, frame in enumerate(data_loader):
            # frame_split =  [0.0, 1.0]
            if i >= len(sequence) * tracktor['frame_split'][0] and i <= len(
                    sequence) * tracktor['frame_split'][1]:

                tracker.step(frame)
        results = tracker.get_results()

        time_total += time.time() - now

        print("[*] Tracks found: {}".format(len(results)))
        print("[*] Time needed for {} evaluation: {:.3f} s".format(
            sequence,
            time.time() - now))

        if tracktor['interpolate']:
            results = interpolate(results)

        sequence.write_results(results, osp.join(output_dir))

        if tracktor['write_images']:
            plot_sequence(
                results, sequence,
                osp.join(output_dir, tracktor['dataset'], str(sequence)))

    print("[*] Evaluation for all sets (without image generation): {:.3f} s".
          format(time_total))
Esempio n. 5
0
def my_main(tracktor, siamese, _config):
    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection

    print("[*] Building object detector")
    print("tracktor['network'] is: ", tracktor['network'])

    if tracktor['network'].startswith('frcnn'):
        # FRCNN
        from tracktor.frcnn import FRCNN
        from frcnn.model import config

        if _config['frcnn']['cfg_file']:
            config.cfg_from_file(_config['frcnn']['cfg_file'])
        if _config['frcnn']['set_cfgs']:
            config.cfg_from_list(_config['frcnn']['set_cfgs'])

        obj_detect = FRCNN(num_layers=101)
        obj_detect.create_architecture(2,
                                       tag='default',
                                       anchor_scales=config.cfg.ANCHOR_SCALES,
                                       anchor_ratios=config.cfg.ANCHOR_RATIOS)
        state_dict_person = torch.load(tracktor['obj_detect_weights_person'])
        obj_detect.load_state_dict(state_dict_person)
        # loading head-detection model
        obj_detect_head = FRCNN(num_layers=101)
        obj_detect_head.create_architecture(
            2,
            tag='default',
            anchor_scales=config.cfg.ANCHOR_SCALES,
            anchor_ratios=config.cfg.ANCHOR_RATIOS)
        state_dict_head = torch.load(tracktor['obj_detect_weights_head'])
        state_dict_head = my_transform(state_dict_head)
        obj_detect_head.load_state_dict(state_dict_head)

    elif tracktor['network'].startswith('mask-rcnn'):
        # MASK-RCNN
        pass

    elif tracktor['network'].startswith('fpn'):
        # FPN
        from tracktor.fpn import FPN
        from fpn.model.utils import config
        config.cfg.TRAIN.USE_FLIPPED = False
        config.cfg.CUDA = True
        config.cfg.TRAIN.USE_FLIPPED = False
        checkpoint = torch.load(tracktor['obj_detect_weights'])

        if 'pooling_mode' in checkpoint.keys():
            config.cfg.POOLING_MODE = checkpoint['pooling_mode']

        set_cfgs = [
            'ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]'
        ]
        config.cfg_from_file(_config['tracktor']['obj_detect_config'])
        config.cfg_from_list(set_cfgs)

        obj_detect = FPN(('__background__', 'pedestrian'),
                         101,
                         pretrained=False)
        obj_detect.create_architecture()

        obj_detect.load_state_dict(checkpoint['model'])
    else:
        raise NotImplementedError(
            f"Object detector type not known: {tracktor['network']}")

    pprint.pprint(config.cfg)
    obj_detect.eval()
    obj_detect.cuda()
    obj_detect_head.eval()
    obj_detect_head.cuda()

    # reid
    reid_network = resnet50(pretrained=False, **siamese['cnn'])
    reid_network.load_state_dict(torch.load(tracktor['reid_network_weights']))
    reid_network.eval()
    reid_network.cuda()

    # tracktor
    if 'oracle' in tracktor:
        tracker = OracleTracker(obj_detect, reid_network, tracktor['tracker'],
                                tracktor['oracle'])
    else:
        print(tracktor['tracker'])
        tracker = Tracker(obj_detect, reid_network, tracktor['tracker'])

    print("[*] Beginning evaluation...")

    time_total = 0
    tracker.reset()
    now = time.time()

    cv2.namedWindow("test", cv2.WINDOW_NORMAL)
    cv2.resizeWindow("test", 800, 600)

    seq_name = 'MOT-2'
    video_file = osp.join(cfg.ROOT_DIR, 'video/' + seq_name + '.mp4')
    print("[*] Evaluating: {}".format(video_file))

    # ===============================================
    # transform each video frame to main frame format
    # ===============================================
    transforms = Compose(
        [ToTensor(),
         Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])

    vdo = cv2.VideoCapture()
    vdo.open(video_file)
    im_width = int(vdo.get(cv2.CAP_PROP_FRAME_WIDTH))
    im_height = int(vdo.get(cv2.CAP_PROP_FRAME_HEIGHT))
    area = 0, 0, im_width, im_height

    print("===video frame's area:", area)
    # video = cv2.VideoCapture(video_file)
    # if not video.isOpened():
    #     print("error opening video stream or file!")

    # while (video.isOpened()):
    while vdo.grab():
        _, frame = vdo.retrieve()

        # success, frame = video.read()
        # if not success:
        #     break
        # print(frame)  # (540, 960, 3)

        blobs, im_scales = test._get_blobs(frame)
        data = blobs['data']

        # print(data.shape)  # (1, 562, 1000, 3)
        # print(im_scales)  # [1.04166667]

        sample = {}
        sample['image'] = cv2.resize(frame, (0, 0),
                                     fx=im_scales,
                                     fy=im_scales,
                                     interpolation=cv2.INTER_NEAREST)
        sample['im_path'] = video_file
        sample['data'] = torch.from_numpy(data).unsqueeze(0)
        im_info = np.array([data.shape[1], data.shape[2], im_scales[0]],
                           dtype=np.float32)
        sample['im_info'] = torch.from_numpy(im_info).unsqueeze(0)

        # convert to siamese input
        frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        frame = Image.fromarray(frame)
        frame = transforms(frame)
        # print(frame.shape)  # torch.Size([3, 540, 960])

        sample['app_data'] = frame.unsqueeze(0).unsqueeze(0)
        # print(sample['app_data'].size())  # torch.Size([1, 1, 3, 540, 960])

        # additional info
        # sample['gt'] = {}
        # sample['vis'] = {}
        # sample['dets'] = []

        # print('frame begin')
        # print(sample)
        # print('frame end')

        tracker.step(sample)
        tracker.show_tracks(area)

    video.release()
    print('the current video' + video_file + ' is done')

    results = tracker.get_results()
    time_total += time.time() - now
    print("[*] Tracks found: {}".format(len(results)))
    print("[*] Time needed for {} evaluation: {:.3f} s".format(
        seq_name,
        time.time() - now))

    # print('this is : ' + tracktor['dataset'])

    # for sequence in Datasets(tracktor['dataset']):
    # #for sequence in Datasets('MOT-02'):
    #
    #     print('sequence---------', type(sequence), len(sequence))
    #
    #     tracker.reset()
    #     now = time.time()
    #
    #     print("[*] Evaluating: {}".format(sequence))
    #
    #     data_loader = DataLoader(sequence, batch_size=1, shuffle=False)
    #     for i, frame in enumerate(data_loader):
    #
    #         print('frame begin')
    #         print(frame)
    #         print('frame end')
    #
    #         if i >= len(sequence) * tracktor['frame_split'][0] and i <= len(sequence) * tracktor['frame_split'][1]:
    #
    #             tracker.step(frame)
    #     results = tracker.get_results()
    #
    #
    #     time_total += time.time() - now
    #
    #     print("[*] Tracks found: {}".format(len(results)))
    #     print("[*] Time needed for {} evaluation: {:.3f} s".format(sequence, time.time() - now))
    #
    #     if tracktor['interpolate']:
    #         results = interpolate(results)
    #
    #     plot_tracks(sequence, results)
    #     sequence.write_results(results, osp.join(output_dir))
    #
    #     if tracktor['write_images']:
    #        plot_sequence(results, sequence, osp.join(output_dir, tracktor['dataset'], str(sequence)))
    print("[*] Evaluation for all sets (without image generation): {:.3f} s".
          format(time_total))
Esempio n. 6
0
def frcnn_trainval(imdb_name, imdbval_name, max_iters, pretrained_model,
                   pretrained_full_model, cfg_file, set_cfgs, network, tag):
    """
  args = {'imdb_name':imdb_name,
      'imdbval_name':imdbval_name,
      'max_iters':max_iters,
      'net':network,
      'cfg_file':cfg_file,
      'set_cfgs':set_cfgs,
      'weights':weights,
      'tag':tag}
  """

    if cfg_file:
        cfg_from_file(cfg_file)
    if set_cfgs:
        cfg_from_list(set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)

    np.random.seed(cfg.RNG_SEED)

    # train set
    imdb, roidb = combined_roidb(imdb_name)
    print('{:d} roidb entries'.format(len(roidb)))

    # output directory where the models are saved
    output_dir = get_output_dir(imdb, tag)
    print('Output will be saved to `{:s}`'.format(output_dir))

    # tensorboard directory where the summaries are saved during training
    tb_dir = get_output_tb_dir(imdb, tag)
    print('TensorFlow summaries will be saved to `{:s}`'.format(tb_dir))

    # also add the validation set, but with no flipping images
    orgflip = cfg.TRAIN.USE_FLIPPED
    cfg.TRAIN.USE_FLIPPED = False
    #-, valroidb = combined_roidb(args['imdbval_name'])
    valimdb, valroidb = combined_roidb(imdbval_name)
    print('{:d} validation roidb entries'.format(len(valroidb)))
    cfg.TRAIN.USE_FLIPPED = orgflip

    # load network
    if network == 'vgg16':
        net = vgg16()
    elif network == 'res50':
        net = resnetv1(num_layers=50)
    elif network == 'res101':
        net = resnetv1(num_layers=101)
    elif network == 'res152':
        net = resnetv1(num_layers=152)
    elif network == 'mobile':
        net = mobilenetv1()
    else:
        raise NotImplementedError

    train_net(net,
              imdb,
              roidb,
              valroidb,
              output_dir,
              tb_dir,
              pretrained_model=pretrained_model,
              pretrained_full_model=pretrained_full_model,
              max_iters=max_iters)
Esempio n. 7
0
def my_main(tracktor, siamese, _config):
    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    print("[*] Building object detector")
    if tracktor['network'].startswith('frcnn'):
        # FRCNN
        from tracktor.frcnn import FRCNN
        from frcnn.model import config

        if _config['frcnn']['cfg_file']:
            config.cfg_from_file(_config['frcnn']['cfg_file'])
        if _config['frcnn']['set_cfgs']:
            config.cfg_from_list(_config['frcnn']['set_cfgs'])

        obj_detect = FRCNN(num_layers=101)
        obj_detect.create_architecture(2,
                                       tag='default',
                                       anchor_scales=config.cfg.ANCHOR_SCALES,
                                       anchor_ratios=config.cfg.ANCHOR_RATIOS)
        obj_detect.load_state_dict(torch.load(tracktor['obj_detect_weights']))
    elif tracktor['network'].startswith('fpn'):
        # FPN
        from tracktor.fpn import FPN
        from fpn.model.utils import config
        config.cfg.TRAIN.USE_FLIPPED = False
        config.cfg.CUDA = True
        config.cfg.TRAIN.USE_FLIPPED = False
        checkpoint = torch.load(tracktor['obj_detect_weights'])

        if 'pooling_mode' in checkpoint.keys():
            config.cfg.POOLING_MODE = checkpoint['pooling_mode']

        set_cfgs = [
            'ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]'
        ]
        config.cfg_from_file(_config['tracktor']['obj_detect_config'])
        config.cfg_from_list(set_cfgs)

        obj_detect = FPN(('__background__', 'pedestrian'),
                         101,
                         pretrained=False)
        obj_detect.create_architecture()

        obj_detect.load_state_dict(checkpoint['model'])
    else:
        raise NotImplementedError(
            f"Object detector type not known: {tracktor['network']}")

    obj_detect.eval()
    obj_detect.cuda()

    print("[*] Beginning operation...")

    layers = ['p2', 'p3', 'p4', 'p5']

    f_hdf5 = h5py.File(
        '/usr/stud/beckera/tracking_wo_bnw/data/motion/im_features.hdf5', 'w')
    i_hdf5 = h5py.File(
        '/usr/stud/beckera/tracking_wo_bnw/data/motion/images.hdf5', 'w')

    for sequence in Datasets(tracktor['dataset']):
        print("[*] Storing sequence: {}".format(sequence))
        f_group = f_hdf5.create_group(sequence._seq_name)
        i_group = i_hdf5.create_group(sequence._seq_name)

        data_loader = DataLoader(sequence, batch_size=1, shuffle=False)
        for i, frame in enumerate(data_loader):
            if i == 0:
                i_group.create_dataset('data',
                                       shape=(len(data_loader),
                                              *frame['data'][0].shape[1:]),
                                       dtype='float16')
                i_group.create_dataset('app_data',
                                       shape=(len(data_loader),
                                              *frame['app_data'][0].shape[1:]),
                                       dtype='float16')
                i_group.create_dataset('im_info',
                                       shape=(len(data_loader), 3),
                                       dtype='float16')
            i_group['data'][i] = frame['data'][0].cpu().numpy()
            i_group['app_data'][i] = frame['app_data'][0].cpu().numpy()
            i_group['im_info'][i] = frame['im_info'].cpu().numpy()

            image = Variable(frame['data'][0].permute(0, 3, 1, 2).cuda(),
                             volatile=True)
            features = obj_detect.get_features(image)

            for j, layer in enumerate(layers):
                if i == 0:
                    f_group.create_dataset(layer,
                                           shape=(len(data_loader),
                                                  *features[j].shape[1:]),
                                           dtype='float16')
                f_group[layer][i] = features[j].data.cpu().numpy().astype(
                    'float16')

    f_hdf5.close()
    i_hdf5.close()
Esempio n. 8
0
        imdb = datasets.imdb.imdb(imdb_names, tmp.classes)
    else:
        imdb = get_imdb(imdb_names)
    return imdb, roidb


if __name__ == '__main__':
    args = parse_args()

    print('Called with args:')
    print(args)

    if args.cfg_file is not None:
        cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    print('Using config:')
    pprint.pprint(cfg)

    np.random.seed(cfg.RNG_SEED)

    # train set
    imdb, roidb = combined_roidb(args.imdb_name)
    print('{:d} roidb entries'.format(len(roidb)))

    # output directory where the models are saved
    output_dir = get_output_dir(imdb, args.tag)
    print('Output will be saved to `{:s}`'.format(output_dir))

    # tensorboard directory where the summaries are saved during training
Esempio n. 9
0
def main(tracktor, siamese, _config):
    # set all seeds

    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')
    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)
    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    print("[*] Building object detector")
    if tracktor['network'].startswith('frcnn'):
        # FRCNN
        from tracktor.frcnn import FRCNN
        from frcnn.model import config

        if _config['frcnn']['cfg_file']:
            config.cfg_from_file(_config['frcnn']['cfg_file'])
        if _config['frcnn']['set_cfgs']:
            config.cfg_from_list(_config['frcnn']['set_cfgs'])

        obj_detect = FRCNN(num_layers=101)
        obj_detect.create_architecture(2,
                                       tag='default',
                                       anchor_scales=config.cfg.ANCHOR_SCALES,
                                       anchor_ratios=config.cfg.ANCHOR_RATIOS)
        obj_detect.load_state_dict(torch.load(tracktor['obj_detect_weights']))
    else:
        raise NotImplementedError(
            f"Object detector type not known: {tracktor['network']}")
    obj_detect.eval()
    obj_detect.cuda()

    # tracktor
    tracker = Tracker(obj_detect, tracktor['tracker'])
    tracker.reset()  # init tracker

    print("[*] Beginning evaluation...")
    time_total = 0
    cap = cv2.VideoCapture(webcam)
    num_images = 0
    images = []
    try:
        begin = time.time()
        while (cap.isOpened()):
            ret, frame = cap.read()
            images.append(frame)
            time.time()
            try:
                blob = data_handle.data_process(frame)
            except:
                print('over')
                break
            tracker.step(blob)
            num_images += 1
            if num_images % 10 == 0:
                print('now is :', num_images)
        results = tracker.get_results()
        end = time.time()
        print("[*] Tracks found: {}".format(len(results)))
        print('It takes: {:.3f} s'.format((end - begin)))
        if tracktor['write_images']:
            plot_sequence(
                results, images,
                '/home/longshuz/project/tracking_wo_bnw/output/tracktor/results'
            )
        cap.release()

    except:
        raise KeyboardInterrupt