Exemplo n.º 1
0
def my_main(tracktor, _config):
    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    print("[*] Beginning process...")

    for seq in Datasets(tracktor['dataset']):

        print(f"[*] Processing sequence {seq}")

        img_output_dir = osp.join(output_dir, tracktor['dataset'], str(seq))
        if tracktor['write_images'] and not osp.exists(img_output_dir):
            os.makedirs(img_output_dir)

        data_loader = DataLoader(seq, batch_size=1, shuffle=False)
        flows = []
        for i, frame in enumerate(tqdm(data_loader)):
            current_img = np.transpose(frame['img'][0].cpu().numpy(),
                                       (1, 2, 0))

            if i == 0:
                prev_img = current_img

            current_gray = cv2.cvtColor(current_img, cv2.COLOR_RGB2GRAY)
            prev_gray = cv2.cvtColor(prev_img, cv2.COLOR_RGB2GRAY)
            flow = cv2.calcOpticalFlowFarneback(prev_gray, current_gray, None,
                                                0.5, 3, 15, 3, 5, 1.2,
                                                cv2.OPTFLOW_FARNEBACK_GAUSSIAN)
            flows.append(flow)

            if tracktor['write_images']:
                mask = np.zeros_like(current_img)
                mask[..., 1] = 255
                magnitude, angle = cv2.cartToPolar(flow[..., 0], flow[..., 1])
                mask[..., 0] = angle * 180 / np.pi / 2
                mask[..., 2] = cv2.normalize(magnitude, None, 0, 255,
                                             cv2.NORM_MINMAX)
                rgb = cv2.cvtColor(mask, cv2.COLOR_HSV2RGB)
                save_path = osp.join(img_output_dir,
                                     osp.basename(frame['img_path'][0]))
                Image.fromarray(rgb.astype('uint8')).save(save_path)

            prev_img = current_img

        #write_optical_flow(results, warps, sequence, osp.join(output_dir, tracktor['dataset'], str(sequence)))

    print("[*] Evaluation for all sets (without image generation): {:.3f} s".
          format(time_total))
Exemplo n.º 2
0
    def validate_tracktor(motion_network, epoch):
        # inject current network into tracker
        tracker.motion_network = motion_network

        time_total = 0
        num_frames = 0
        mot_accums = []
        dataset = Datasets(train['tracktor_val_dataset'])
        for seq in dataset:
            tracker.reset()

            start = time.time()

            _log.info(f"Tracking: {seq}")

            data_loader = DataLoader(seq, batch_size=1, shuffle=False)
            for i, frame in enumerate(tqdm(data_loader)):
                if len(seq) * tracktor['frame_split'][0] <= i <= len(
                        seq) * tracktor['frame_split'][1]:
                    tracker.step(frame)
                    num_frames += 1
            results = tracker.get_results()

            time_total += time.time() - start

            _log.info(f"Tracks found: {len(results)}")
            _log.info(f"Runtime for {seq}: {time.time() - start :.1f} s.")

            if seq.no_gt:
                _log.info(f"No GT data for evaluation available.")
            else:
                mot_accums.append(get_mot_accum(results, seq))

            _log.info(f"Writing predictions to: {output_dir}")
            seq.write_results(results, output_dir)

            if tracktor['write_images']:
                plot_sequence(
                    results, seq,
                    osp.join(output_dir, tracktor['dataset'], str(epoch),
                             str(seq)))

        _log.info(
            f"Tracking runtime for all sequences (without evaluation or image writing): "
            f"{time_total:.1f} s ({num_frames / time_total:.1f} Hz)")

        metrics = {}
        if mot_accums:
            summary = evaluate_mot_accums(
                mot_accums, [str(s) for s in dataset if not s.no_gt],
                generate_overall=True,
                return_summary=True,
                metrics=train['tracktor_val_metrics'])
            metrics = {
                m: summary.loc['OVERALL', m]
                for m in train['tracktor_val_metrics']
            }

        return metrics
Exemplo n.º 3
0
def my_main(_config, reid):
    # set all seeds
    torch.manual_seed(reid['seed'])
    torch.cuda.manual_seed(reid['seed'])
    np.random.seed(reid['seed'])
    torch.backends.cudnn.deterministic = True

    print(_config)

    output_dir = 'D:/emartins/reiddd'
    tb_dir = 'D:/emartins/reiddd'

    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    #########################
    # Initialize dataloader #
    #########################
    print("[*] Initializing Dataloader")

    db_train = Datasets(reid['db_train'], reid['dataloader'])
    db_train = DataLoader(db_train, batch_size=1, shuffle=True)

    if reid['db_val']:
        db_val = None
        #db_val = DataLoader(db_val, batch_size=1, shuffle=True)
    else:
        db_val = None

    ##########################
    # Initialize the modules #
    ##########################
    print("[*] Building CNN")
    network = resnet50(pretrained=False, **reid['cnn'])
    network.train()
    network.cuda()

    ##################
    # Begin training #
    ##################
    print("[*] Solving ...")

    # build scheduling like in "In Defense of the Triplet Loss for Person Re-Identification"
    # from Hermans et al.
    lr = reid['solver']['optim_args']['lr']
    iters_per_epoch = len(db_train)
    # we want to keep lr until iter 15000 and from there to iter 25000 a exponential decay
    l = eval("lambda epoch: 1 if epoch*{} < 15000 else 0.001**((epoch*{} - 15000)/(25000-15000))".format(
                                                                iters_per_epoch,  iters_per_epoch))
    #else:
    #   l = None
    max_epochs = 25000 // len(db_train.dataset) + 1 if 25000 % len(db_train.dataset) else 25000 // len(db_train.dataset)
    solver = Solver(output_dir, tb_dir, lr_scheduler_lambda=l)
    solver.train(network, db_train, db_val, max_epochs, 100, model_args=reid['model_args'])
Exemplo n.º 4
0
def my_main(plotter, _config):
    output_dir = Path(get_output_dir(plotter['module_name'])) / plotter['name']

    for sequence in Datasets(plotter['dataset']):
        for file in Path(plotter['boxes_dir']).glob('*.pkl'):
            with file.open('rb') as fh:
                data = pickle.load(fh)[sequence._seq_name + '-FRCNN']

            plot_sequence(
                data, sequence, output_dir / plotter['dataset'] /
                str(sequence) / str(file.stem))
Exemplo n.º 5
0
def main(fg_detector, _config, _log, _run):

    torch.manual_seed(fg_detector['seed'])
    torch.cuda.manual_seed(fg_detector['seed'])
    np.random.seed(fg_detector['seed'])
    sacred.commands.print_config(_run)
    output_dir = os.path.join(get_output_dir(fg_detector['module_name']),
                              fg_detector['name'])

    sacred_config = os.path.join(output_dir, 'sacred_config.yaml')
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)
    # object detection
    _log.info("Initializing foreground detector.")
    dataset = Datasets(fg_detector['dataset'])
    fg_det = FGDetector(fg_detector, dataset[3])
    fg_det.calc_average_image()
    grid_points = fg_det.calc_grid_points()
    fg_det.calc_positions(grid_points)
def main(tracktor, reid, _config, _log, _run):

    sacred.commands.print_config(_run)

    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'], tracktor['output_subdir'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    _log.info("Initializing object detector.")

    obj_detect = FRCNN_FPN(num_classes=2).to(device)
    obj_detect.load_state_dict(
        torch.load(_config['tracktor']['obj_detect_model'],
                   map_location=lambda storage, loc: storage))

    obj_detect.eval()

    # reid
    reid_network = resnet50(pretrained=False, **reid['cnn']).to(device)
    reid_network.load_state_dict(
        torch.load(tracktor['reid_weights'],
                   map_location=lambda storage, loc: storage))
    reid_network.eval()

    # tracktor
    if 'oracle' in tracktor:
        tracker = OracleTracker(obj_detect, reid_network, tracktor['tracker'],
                                tracktor['oracle'])
    else:
        tracker = Tracker(obj_detect, reid_network, tracktor['tracker'])

    time_total = 0
    num_frames = 0
    mot_accums = []
    dataset = Datasets(tracktor['dataset'])

    for seq in dataset:

        tracker.reset()

        start = time.time()

        _log.info(f"Tracking: {seq}")

        data_loader = DataLoader(seq, batch_size=1, shuffle=False)
        for i, frame in enumerate(tqdm(data_loader)):
            if len(seq) * tracktor['frame_split'][0] <= i <= len(
                    seq) * tracktor['frame_split'][1]:
                tracker.step(frame, i)
                num_frames += 1

        results = tracker.get_results()

        time_total += time.time() - start

        _log.info(f"Tracks found: {len(results)}")
        _log.info(f"Runtime for {seq}: {time.time() - start :.1f} s.")

        if tracktor['interpolate']:
            results = interpolate(results)

        if seq.no_gt:
            _log.info(f"No GT data for evaluation available.")
        else:
            mot_accums.append(get_mot_accum(results, seq))

        _log.info(f"Writing predictions to: {output_dir}")

        seq.write_results(results, output_dir)

        if tracktor['write_images']:
            plot_sequence(results, seq,
                          osp.join(output_dir, tracktor['dataset'], str(seq)))

    _log.info(
        f"Tracking runtime for all sequences (without evaluation or image writing): "
        f"{time_total:.1f} s ({num_frames / time_total:.1f} Hz)")

    if mot_accums:
        summary = evaluate_mot_accums(mot_accums,
                                      [str(s) for s in dataset if not s.no_gt],
                                      generate_overall=True)
        summary.to_pickle(
            "output/finetuning_results/results_{}_{}_{}_{}_{}.pkl".format(
                tracktor['output_subdir'],
                tracktor['tracker']['finetuning']['max_displacement'],
                tracktor['tracker']['finetuning']['batch_size'],
                tracktor['tracker']['finetuning']['learning_rate'],
                tracktor['tracker']['finetuning']['iterations']))
Exemplo n.º 7
0
def my_main(tracktor, siamese, _config):
    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    print("[*] Building object detector")
    if tracktor['network'].startswith('frcnn'):
        # FRCNN
        from tracktor.frcnn import FRCNN
        from frcnn.model import config

        if _config['frcnn']['cfg_file']:
            config.cfg_from_file(_config['frcnn']['cfg_file'])
        if _config['frcnn']['set_cfgs']:
            config.cfg_from_list(_config['frcnn']['set_cfgs'])

        obj_detect = FRCNN(num_layers=101)
        obj_detect.create_architecture(2,
                                       tag='default',
                                       anchor_scales=config.cfg.ANCHOR_SCALES,
                                       anchor_ratios=config.cfg.ANCHOR_RATIOS)
        obj_detect.load_state_dict(torch.load(tracktor['obj_detect_weights']))
    elif tracktor['network'].startswith('fpn'):
        # FPN
        from tracktor.fpn import FPN
        from fpn.model.utils import config
        config.cfg.TRAIN.USE_FLIPPED = False
        config.cfg.CUDA = True
        config.cfg.TRAIN.USE_FLIPPED = False
        checkpoint = torch.load(tracktor['obj_detect_weights'])

        if 'pooling_mode' in checkpoint.keys():
            config.cfg.POOLING_MODE = checkpoint['pooling_mode']

        set_cfgs = [
            'ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]'
        ]
        config.cfg_from_file(_config['tracktor']['obj_detect_config'])
        config.cfg_from_list(set_cfgs)

        obj_detect = FPN(('__background__', 'pedestrian'),
                         101,
                         pretrained=False)
        obj_detect.create_architecture()

        obj_detect.load_state_dict(checkpoint['model'])
    else:
        raise NotImplementedError(
            f"Object detector type not known: {tracktor['network']}")

    pprint.pprint(config.cfg)
    obj_detect.eval()
    obj_detect.cuda()

    # reid
    reid_network = resnet50(pretrained=False, **siamese['cnn'])
    reid_network.load_state_dict(torch.load(tracktor['reid_network_weights']))
    reid_network.eval()
    reid_network.cuda()

    # tracktor
    if 'oracle' in tracktor:
        tracker = OracleTracker(obj_detect, reid_network, tracktor['tracker'],
                                tracktor['oracle'])
    else:
        tracker = Tracker(obj_detect, reid_network, tracktor['tracker'])

    print("[*] Beginning evaluation...")

    time_total = 0
    for sequence in Datasets(tracktor['dataset']):
        tracker.reset()

        now = time.time()

        print("[*] Evaluating: {}".format(sequence))

        data_loader = DataLoader(sequence, batch_size=1, shuffle=False)
        for i, frame in enumerate(data_loader):
            # frame_split =  [0.0, 1.0]
            if i >= len(sequence) * tracktor['frame_split'][0] and i <= len(
                    sequence) * tracktor['frame_split'][1]:

                tracker.step(frame)
        results = tracker.get_results()

        time_total += time.time() - now

        print("[*] Tracks found: {}".format(len(results)))
        print("[*] Time needed for {} evaluation: {:.3f} s".format(
            sequence,
            time.time() - now))

        if tracktor['interpolate']:
            results = interpolate(results)

        sequence.write_results(results, osp.join(output_dir))

        if tracktor['write_images']:
            plot_sequence(
                results, sequence,
                osp.join(output_dir, tracktor['dataset'], str(sequence)))

    print("[*] Evaluation for all sets (without image generation): {:.3f} s".
          format(time_total))
Exemplo n.º 8
0
def main(module_name, name, seed, obj_detect_models, reid_models, tracker,
         oracle, dataset, load_results, frame_range, interpolate, write_images,
         _config, _log, _run):
    sacred.commands.print_config(_run)

    # set all seeds
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(module_name), name)
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(copy.deepcopy(_config), outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    _log.info("Initializing object detector(s).")

    obj_detects = []
    for obj_detect_model in obj_detect_models:
        obj_detect = FRCNN_FPN(num_classes=2)
        obj_detect.load_state_dict(
            torch.load(obj_detect_model,
                       map_location=lambda storage, loc: storage))
        obj_detects.append(obj_detect)

        obj_detect.eval()
        if torch.cuda.is_available():
            obj_detect.cuda()

    # reid
    _log.info("Initializing reID network(s).")

    reid_networks = []
    for reid_model in reid_models:
        reid_cfg = os.path.join(os.path.dirname(reid_model),
                                'sacred_config.yaml')
        reid_cfg = yaml.safe_load(open(reid_cfg))

        reid_network = ReIDNetwork_resnet50(pretrained=False,
                                            **reid_cfg['model_args'])
        reid_network.load_state_dict(
            torch.load(reid_model, map_location=lambda storage, loc: storage))
        reid_network.eval()
        if torch.cuda.is_available():
            reid_network.cuda()

        reid_networks.append(reid_network)

    # tracktor
    if oracle is not None:
        tracker = OracleTracker(obj_detect, reid_network, tracker, oracle)
    else:
        tracker = Tracker(obj_detect, reid_network, tracker)

    time_total = 0
    num_frames = 0
    mot_accums = []
    dataset = Datasets(dataset)

    for seq, obj_detect, reid_network in zip(dataset, obj_detects,
                                             reid_networks):
        tracker.obj_detect = obj_detect
        tracker.reid_network = reid_network
        tracker.reset()

        _log.info(f"Tracking: {seq}")

        start_frame = int(frame_range['start'] * len(seq))
        end_frame = int(frame_range['end'] * len(seq))

        seq_loader = DataLoader(
            torch.utils.data.Subset(seq, range(start_frame, end_frame)))
        num_frames += len(seq_loader)

        results = {}
        if load_results:
            results = seq.load_results(output_dir)
        if not results:
            start = time.time()

            for frame_data in tqdm(seq_loader):
                with torch.no_grad():
                    tracker.step(frame_data)

            results = tracker.get_results()

            time_total += time.time() - start

            _log.info(f"Tracks found: {len(results)}")
            _log.info(f"Runtime for {seq}: {time.time() - start :.2f} s.")

            if interpolate:
                results = interpolate_tracks(results)

            _log.info(f"Writing predictions to: {output_dir}")
            seq.write_results(results, output_dir)

        if seq.no_gt:
            _log.info("No GT data for evaluation available.")
        else:
            mot_accums.append(get_mot_accum(results, seq_loader))

        if write_images:
            plot_sequence(results, seq,
                          osp.join(output_dir, str(dataset), str(seq)),
                          write_images)

    if time_total:
        _log.info(
            f"Tracking runtime for all sequences (without evaluation or image writing): "
            f"{time_total:.2f} s for {num_frames} frames ({num_frames / time_total:.2f} Hz)"
        )
    if mot_accums:
        _log.info("Evaluation:")
        evaluate_mot_accums(mot_accums,
                            [str(s) for s in dataset if not s.no_gt],
                            generate_overall=True)
Exemplo n.º 9
0
def main(tracktor, reid, _config, _log, _run):
    sacred.commands.print_config(_run)

    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    _log.info("Initializing object detector.")

    obj_detect = FRCNN_FPN(num_classes=2)
    obj_detect.load_state_dict(
        torch.load(_config['tracktor']['obj_detect_model'],
                   map_location=lambda storage, loc: storage))

    obj_detect.eval()
    obj_detect.cuda()

    # reid
    reid_network = resnet50(pretrained=False, **reid['cnn'])
    reid_network.load_state_dict(
        torch.load(tracktor['reid_weights'],
                   map_location=lambda storage, loc: storage))
    reid_network.eval()
    reid_network.cuda()

    # motion network
    motion_network = None
    if tracktor['tracker']['motion_model_enabled'] and not tracktor['motion'][
            'use_cva_model']:
        motion_network = eval(
            tracktor['motion']['model'])(**tracktor['motion']['model_args'])
        motion_network.load_state_dict(
            torch.load(tracktor['motion']['network_weights'])['model'])
        motion_network.eval().cuda()

    # tracktor
    if 'oracle' in tracktor:
        tracker = OracleTracker(obj_detect, reid_network, tracktor['tracker'],
                                tracktor['oracle'])
    else:
        tracker = Tracker(obj_detect, reid_network, motion_network,
                          tracktor['tracker'], tracktor['motion'], 2)

    time_total = 0
    num_frames = 0
    mot_accums = []
    dataset = Datasets(tracktor['dataset'])
    for seq in dataset:
        tracker.reset()
        _log.info(f"Tracking: {seq}")
        data_loader = DataLoader(seq, batch_size=1, shuffle=False)

        start = time.time()
        all_mm_times = []
        all_warp_times = []
        for i, frame in enumerate(tqdm(data_loader)):
            if len(seq) * tracktor['frame_split'][0] <= i <= len(
                    seq) * tracktor['frame_split'][1]:
                with torch.no_grad():
                    mm_time, warp_time = tracker.step(frame)
                    if mm_time is not None:
                        all_mm_times.append(mm_time)
                    if warp_time is not None:
                        all_warp_times.append(warp_time)
                num_frames += 1
        results = tracker.get_results()

        time_total += time.time() - start

        _log.info(f"Tracks found: {len(results)}")
        _log.info(f"Runtime for {seq}: {time.time() - start :.1f} s.")
        _log.info(
            f"Average FPS for {seq}: {len(data_loader) / (time.time() - start) :.3f}"
        )
        _log.info(
            f"Average MM time for {seq}: {float(np.array(all_mm_times).mean()) :.3f} s"
        )
        if all_warp_times:
            _log.info(
                f"Average warp time for {seq}: {float(np.array(all_warp_times).mean()) :.3f} s"
            )

        if tracktor['interpolate']:
            results = interpolate(results)

        if 'semi_online' in tracktor and tracktor['semi_online']:
            for i, track in results.items():
                for frame in sorted(track, reverse=True):
                    if track[frame][5] == 0:
                        break
                    del track[frame]

        if tracktor['write_images']:
            plot_sequence(results, seq,
                          osp.join(output_dir, tracktor['dataset'], str(seq)),
                          tracktor['tracker']['plot_mm'])

        if seq.no_gt:
            _log.info(f"No GT data for evaluation available.")
        else:
            mot_accums.append(get_mot_accum(results, seq))

        _log.info(f"Writing predictions to: {output_dir}")
        seq.write_results(results, output_dir)

    _log.info(
        f"Tracking runtime for all sequences (without evaluation or image writing): "
        f"{time_total:.2f} s for {num_frames} frames ({num_frames / time_total:.2f} Hz)"
    )
    if mot_accums:
        evaluate_mot_accums(mot_accums,
                            [str(s) for s in dataset if not s.no_gt],
                            generate_overall=True)
Exemplo n.º 10
0
def main(tracktor, reid, _config, _log, _run):
    target = Target()
    targetpath = target.Folder()
    targetname = target.TargetName()

    vottpath = target.GetVottPath()
    vottfile = target.GetVottContent()
    dictid, timelist = target.GetTagTime(vottfile)
    print(f"{len(timelist)} frames were tagged")

    timedict = target.ExtractByTimeList(timelist)
    bbdict = target.GetbbWithTime(vottfile)

    sacred.commands.print_config(_run)

    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    _log.info("Initializing object detector.")

    obj_detect = FRCNN_FPN(num_classes=2)
    obj_detect.load_state_dict(
        torch.load(_config['tracktor']['obj_detect_model'],
                   map_location=lambda storage, loc: storage))

    obj_detect.eval()
    obj_detect.cuda()

    # reid
    reid_network = resnet50(pretrained=False, **reid['cnn'])
    reid_network.load_state_dict(
        torch.load(tracktor['reid_weights'],
                   map_location=lambda storage, loc: storage))
    reid_network.eval()
    reid_network.cuda()

    # tracktor
    print("Tracktor初始化完成")
    tracker = Tracker(obj_detect, reid_network, tracktor['tracker'])

    time_total = 0
    num_frames = 0
    mot_accums = []
    dataset = Datasets(tracktor['dataset'])

    for seq in dataset:
        tracker.reset()

        start = time.time()

        _log.info(f"Tracking: {seq}")

        data_loader = DataLoader(seq, batch_size=1, shuffle=False)
        print(f"{seq}加載完成, tracking開始")
        for i, frame in enumerate(tqdm(data_loader)):
            if len(seq) * tracktor['frame_split'][0] <= i <= len(
                    seq) * tracktor['frame_split'][1]:
                id = tracker.step(frame, bbdict[timedict["%06d" % num_frames]])
                target.WriteID2asset(id, dictid[timedict["%06d" % num_frames]])
                num_frames += 1
        results = tracker.get_results()
        ids = list(results.keys())
        target.WriteID2vott(ids, vottfile=vottfile)

        time_total += time.time() - start

        _log.info(f"Tracks found: {len(results)}")
        _log.info(f"Runtime for {seq}: {time.time() - start :.1f} s.")

        target.CleanImg()

        if tracktor['interpolate']:
            results = interpolate(results)

        if seq.no_gt:
            _log.info(f"No GT data for evaluation available.")
        else:
            mot_accums.append(get_mot_accum(results, seq))

        _log.info(f"Writing predictions to: {output_dir}")
        seq.write_results(results, output_dir)

        if tracktor['write_images']:
            plot_sequence(results, seq,
                          osp.join(output_dir, tracktor['dataset'], str(seq)))

        if tracktor['write_videos']:
            plot_sequence_video(
                results, seq,
                osp.join(output_dir, tracktor['dataset'], str(seq)))

    _log.info(
        f"Tracking runtime for all sequences (without evaluation or image writing): "
        f"{time_total:.1f} s ({num_frames / time_total:.1f} Hz)")
    if mot_accums:
        evaluate_mot_accums(mot_accums,
                            [str(s) for s in dataset if not s.no_gt],
                            generate_overall=True)
Exemplo n.º 11
0
def my_main(tracktor, siamese, _config):
    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    print("[*] Building object detector")
    if tracktor['network'].startswith('frcnn'):
        # FRCNN
        from tracktor.frcnn import FRCNN
        from frcnn.model import config

        if _config['frcnn']['cfg_file']:
            config.cfg_from_file(_config['frcnn']['cfg_file'])
        if _config['frcnn']['set_cfgs']:
            config.cfg_from_list(_config['frcnn']['set_cfgs'])

        obj_detect = FRCNN(num_layers=101)
        obj_detect.create_architecture(2,
                                       tag='default',
                                       anchor_scales=config.cfg.ANCHOR_SCALES,
                                       anchor_ratios=config.cfg.ANCHOR_RATIOS)
        obj_detect.load_state_dict(torch.load(tracktor['obj_detect_weights']))
    elif tracktor['network'].startswith('fpn'):
        # FPN
        from tracktor.fpn import FPN
        from fpn.model.utils import config
        config.cfg.TRAIN.USE_FLIPPED = False
        config.cfg.CUDA = True
        config.cfg.TRAIN.USE_FLIPPED = False
        checkpoint = torch.load(tracktor['obj_detect_weights'])

        if 'pooling_mode' in checkpoint.keys():
            config.cfg.POOLING_MODE = checkpoint['pooling_mode']

        set_cfgs = [
            'ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]'
        ]
        config.cfg_from_file(_config['tracktor']['obj_detect_config'])
        config.cfg_from_list(set_cfgs)

        obj_detect = FPN(('__background__', 'pedestrian'),
                         101,
                         pretrained=False)
        obj_detect.create_architecture()

        obj_detect.load_state_dict(checkpoint['model'])
    else:
        raise NotImplementedError(
            f"Object detector type not known: {tracktor['network']}")

    obj_detect.eval()
    obj_detect.cuda()

    print("[*] Beginning operation...")

    layers = ['p2', 'p3', 'p4', 'p5']

    f_hdf5 = h5py.File(
        '/usr/stud/beckera/tracking_wo_bnw/data/motion/im_features.hdf5', 'w')
    i_hdf5 = h5py.File(
        '/usr/stud/beckera/tracking_wo_bnw/data/motion/images.hdf5', 'w')

    for sequence in Datasets(tracktor['dataset']):
        print("[*] Storing sequence: {}".format(sequence))
        f_group = f_hdf5.create_group(sequence._seq_name)
        i_group = i_hdf5.create_group(sequence._seq_name)

        data_loader = DataLoader(sequence, batch_size=1, shuffle=False)
        for i, frame in enumerate(data_loader):
            if i == 0:
                i_group.create_dataset('data',
                                       shape=(len(data_loader),
                                              *frame['data'][0].shape[1:]),
                                       dtype='float16')
                i_group.create_dataset('app_data',
                                       shape=(len(data_loader),
                                              *frame['app_data'][0].shape[1:]),
                                       dtype='float16')
                i_group.create_dataset('im_info',
                                       shape=(len(data_loader), 3),
                                       dtype='float16')
            i_group['data'][i] = frame['data'][0].cpu().numpy()
            i_group['app_data'][i] = frame['app_data'][0].cpu().numpy()
            i_group['im_info'][i] = frame['im_info'].cpu().numpy()

            image = Variable(frame['data'][0].permute(0, 3, 1, 2).cuda(),
                             volatile=True)
            features = obj_detect.get_features(image)

            for j, layer in enumerate(layers):
                if i == 0:
                    f_group.create_dataset(layer,
                                           shape=(len(data_loader),
                                                  *features[j].shape[1:]),
                                           dtype='float16')
                f_group[layer][i] = features[j].data.cpu().numpy().astype(
                    'float16')

    f_hdf5.close()
    i_hdf5.close()
Exemplo n.º 12
0
def main(seed, module_name, name, db_train, db_val, solver_cfg, model_args,
         dataset_kwargs, _run, _config, _log):
    # set all seeds
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False

    sacred.commands.print_config(_run)

    output_dir = osp.join(get_output_dir(module_name), name)
    tb_dir = osp.join(get_tb_dir(module_name), name)

    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(copy.deepcopy(_config), outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################
    _log.info("[*] Building CNN")
    model = ReIDNetwork_resnet50(pretrained=True, **model_args)
    model.train()
    model.cuda()

    #########################
    # Initialize dataloader #
    #########################
    _log.info("[*] Initializing Datasets")

    _log.info("[*] Train:")
    dataset_kwargs = copy.deepcopy(dataset_kwargs)
    dataset_kwargs['logger'] = _log.info
    dataset_kwargs['mot_dir'] = db_train['mot_dir']
    dataset_kwargs['transform'] = db_train['transform']
    dataset_kwargs['random_triplets'] = db_train['random_triplets']

    db_train = Datasets(db_train['split'], dataset_kwargs)
    db_train = DataLoader(db_train, batch_size=1, shuffle=True)

    if db_val is not None:
        _log.info("[*] Val:")

        dataset_kwargs['mot_dir'] = db_val['mot_dir']
        dataset_kwargs['transform'] = db_val['transform']
        dataset_kwargs['random_triplets'] = db_val['random_triplets']
        db_val = Datasets(db_val['split'], dataset_kwargs)
        db_val = DataLoader(db_val, batch_size=1, shuffle=False)

    ##################
    # Begin training #
    ##################
    _log.info("[*] Solving ...")

    # build scheduling like in "In Defense of the Triplet Loss
    # for Person Re-Identification" from Hermans et al.
    def lr_scheduler(epoch):
        if epoch < 1 / 2 * solver_cfg['num_epochs']:
            return 1
        return 0.001**(2 * epoch / solver_cfg['num_epochs'] - 1)
        # return 0.1 ** (epoch // 30)
        # return 0.9 ** epoch

    solver = Solver(output_dir,
                    tb_dir,
                    lr_scheduler_lambda=lr_scheduler,
                    logger=_log.info,
                    optim=solver_cfg['optim'],
                    optim_args=solver_cfg['optim_args'])
    solver.train(model, db_train, db_val, solver_cfg['num_epochs'],
                 solver_cfg['log_nth'])
Exemplo n.º 13
0
def my_main(_config):

    print(_config)

    dataset = "mot_train_"
    detections = "FRCNN"

    ##########################
    # Initialize the modules #
    ##########################

    print("[*] Beginning evaluation...")
    module_dir = get_output_dir('MOT17')
    results_dir = module_dir
    module_dir = osp.join(module_dir, 'eval/video_fp')
    #output_dir = osp.join(results_dir, 'plots')
    #if not osp.exists(output_dir):
    #    os.makedirs(output_dir)

    #sequences_raw = ["MOT17-13", "MOT17-11", "MOT17-10", "MOT17-09", "MOT17-05", "MOT17-04", "MOT17-02", ]

    #sequences = ["{}-{}".format(s, detections) for s in sequences_raw]
    #sequences = sequences[:1]

    # tracker = ["FRCNN_Base", "HAM_SADF17", "MOTDT17", "EDMT17", "IOU17", "MHT_bLSTM", "FWT_17", "jCC", "MHT_DAM_17"]
    # tracker = ["Baseline", "BnW", "FWT_17", "jCC", "MOTDT17", "MHT_DAM_17"]
    tracker = ["Tracktor", "FWT", "jCC", "MOTDT17"]
    #tracker = ["Baseline"]

    for t in tracker:
        print("[*] Evaluating {}".format(t))
        if True:
            #for db in Datasets(dataset):
            ################################
            # Make videos for each tracker #
            ################################
            db = Datasets(dataset)[2]

            s = "{}-{}".format(db, detections)

            gt_file = osp.join(cfg.DATA_DIR, "MOT17Labels", "train", s, "gt",
                               "gt.txt")
            res_file = osp.join(results_dir, t, s + ".txt")

            stDB = read_txt_to_struct(res_file)
            gtDB = read_txt_to_struct(gt_file)

            gtDB, distractor_ids = extract_valid_gt_data(gtDB)
            _, M, gtDB, stDB = evaluate_new(stDB, gtDB, distractor_ids)
            #gt_ids_res = np.unique(gtDB[:, 1])

            #gtDB = read_txt_to_struct(gt_file)
            # filter out so that confidence and id = 1
            #gtDB = gtDB[gtDB[:,7] == 1]
            #gtDB = gtDB[gtDB[:,6] == 1]

            st_ids = np.unique(stDB[:, 1])
            #gt_ids = np.unique(gtDB[:, 1])
            gt_frames = np.unique(gtDB[:, 0])
            f_gt = len(gt_frames)

            #gt_inds = [{} for i in range(f_gt)]
            st_inds = [{} for i in range(f_gt)]

            # hash the indices to speed up indexing
            #for i in range(gtDB.shape[0]):
            #    frame = np.where(gt_frames == gtDB[i, 0])[0][0]
            #gid = np.where(gt_ids == gtDB[i, 1])[0][0]
            #    gt_id = int(gtDB[i,1])
            #    gt_inds[frame][gt_id] = i

            gt_frames_list = list(gt_frames)
            for i in range(stDB.shape[0]):
                # sometimes detection missed in certain frames, thus should be assigned to groundtruth frame id for alignment
                frame = gt_frames_list.index(stDB[i, 0])
                sid = np.where(st_ids == stDB[i, 1])[0][0]
                st_inds[frame][sid] = i

            #stDB = read_txt_to_struct(res_file)

            results = []
            for frame in range(f_gt):
                # get gt_ids in res
                m = M[frame]
                matched_sids = list(m.values())

                #frame_sids = list(st_inds[frame].keys())

                f = gt_frames_list[frame]
                st_frame = stDB[stDB[:, 0] == f]
                st_uniq_ids = np.unique(st_frame[:, 1])

                for st_id in st_uniq_ids:
                    sid = -1
                    si = np.where(st_ids == st_id)[0]
                    if len(si) > 0:
                        sid = si[0]
                    if sid not in matched_sids:
                        res = np.zeros(6)
                        res[0] = frame + 1
                        st_track = st_frame[st_frame[:, 1] == st_id]
                        res[2:6] = st_track[0, 2:6]
                        results.append(res)
                    else:
                        matched_sids.remove(sid)

            results = np.array(results)
            print(results.shape[0])

            output_dir = osp.join(module_dir, t, s)
            if not osp.exists(output_dir):
                os.makedirs(output_dir)

            print("[*] Plotting whole sequence to {}".format(output_dir))

            # infinte color loop
            cyl = cy('ec', colors)
            loop_cy_iter = cyl()
            styles = defaultdict(lambda: next(loop_cy_iter))

            for frame, v in enumerate(db, 1):
                im_path = v['im_path']
                im_name = osp.basename(im_path)
                im_output = osp.join(output_dir, im_name)
                im = cv2.imread(im_path)
                im = im[:, :, (2, 1, 0)]

                sizes = np.shape(im)
                height = float(sizes[0])
                width = float(sizes[1])

                fig = plt.figure()
                #fig.set_size_inches(w,h)
                #fig.set_size_inches(width/height, 1, forward=False)
                #fig.set_size_inches(width/100, height/100)
                scale = width / 640
                #fig.set_size_inches(640/100, height*scale/100)
                fig.set_size_inches(width / 100, height / 100)
                ax = plt.Axes(fig, [0., 0., 1., 1.])
                ax.set_axis_off()
                fig.add_axes(ax)
                ax.imshow(im)

                res_frame = results[results[:, 0] == frame]

                for j in range(res_frame.shape[0]):
                    box = res_frame[j, 2:6]
                    gt_id = int(res_frame[j, 1])
                    ax.add_patch(
                        plt.Rectangle((box[0], box[1]),
                                      box[2] - box[0],
                                      box[3] - box[1],
                                      fill=False,
                                      linewidth=1.3 * scale,
                                      color='blue'))

                ax.annotate(t, (width - 250, height - 100),
                            color='white',
                            weight='bold',
                            fontsize=72,
                            ha='center',
                            va='center')

                plt.axis('off')
                plt.draw()
                plt.savefig(im_output, dpi=100)
                plt.close()
Exemplo n.º 14
0
def main(dataset_names, prepr_w_tracktor, frcnn_prepr_params, tracktor_params,
         frcnn_weights, _config, _log, _run):
    sacred.commands.print_config(_run)

    if prepr_w_tracktor:
        prepr_params = tracktor_params

    else:
        prepr_params = frcnn_prepr_params

    make_deterministic(prepr_params['seed'])
    MOV_CAMERA_DICT = {**MOT15_MOV_CAMERA_DICT, **MOT17_MOV_CAMERA_DICT}

    # object detection
    _log.info("Initializing object detector.")
    obj_detect = FRCNN_FPN(num_classes=2)
    obj_detect.load_state_dict(
        torch.load(osp.join(OUTPUT_PATH, frcnn_weights),
                   map_location=lambda storage, loc: storage))
    obj_detect.eval()
    obj_detect.cuda()

    if prepr_w_tracktor:
        preprocessor = Tracker(obj_detect, None, prepr_params['tracker'])
    else:
        preprocessor = FRCNNPreprocessor(obj_detect, prepr_params)

    _log.info(
        f"Starting  preprocessing of datasets {dataset_names} with {'Tracktor' if prepr_w_tracktor else 'FRCNN'} \n"
    )

    for dataset_name in dataset_names:
        dataset = Datasets(dataset_name)
        _log.info(
            f"Preprocessing {len(dataset)} sequences from dataset {dataset_name} \n"
        )

        time_total = 0
        num_frames = 0
        for seq in dataset:
            preprocessor.reset()

            start = time.time()
            _log.info(f"Preprocessing : {seq}")
            if prepr_w_tracktor:
                preprocessor.do_align = tracktor_params['tracker'][
                    'do_align'] and (MOV_CAMERA_DICT[str(seq)])

            data_loader = DataLoader(seq,
                                     batch_size=1,
                                     shuffle=False,
                                     num_workers=8,
                                     pin_memory=True)
            for i, frame in enumerate(tqdm(data_loader)):
                with torch.no_grad():
                    preprocessor.step(frame)
                num_frames += 1

            time_total += time.time() - start
            _log.info(f"Runtime for {seq}: {time.time() - start :.1f} s.")

            output_file_path = osp.join(seq.seq_path, 'det',
                                        prepr_params['det_file_name'])
            if prepr_w_tracktor:
                results = preprocessor.get_results()
                seq.write_results(results, output_file_path)
            else:
                _log.info(f"Writing predictions in: {output_file_path}")
                preprocessor.save_results(output_file_path)

        _log.info(
            f"Tracking runtime for all sequences (without evaluation or image writing): "
            f"{time_total:.1f} s ({num_frames / time_total:.1f} Hz)")
def main(tracktor, reid, _config, _log, _run):
    sacred.commands.print_config(_run)

    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    _log.info("Initializing object detector.")

    # object detection
    obj_detect = FRCNN_FPN(num_classes=2, correlation_head=CorrelationHead())
    obj_detect_model = torch.load(_config['tracktor']['obj_detect_model'],
                                  map_location=lambda storage, loc: storage)
    correlation_weights = torch.load(
        _config['tracktor']['correlation_weights'],
        map_location=lambda storage, loc: storage)
    for k in correlation_weights:
        obj_detect_model.update(
            {"correlation_head." + k: correlation_weights[k]})
    obj_detect.load_state_dict(obj_detect_model)
    obj_detect.eval()
    obj_detect.cuda()

    # reid
    reid_network = resnet50(pretrained=False, **reid['cnn'])
    reid_network.load_state_dict(
        torch.load(tracktor['reid_weights'],
                   map_location=lambda storage, loc: storage))
    reid_network.eval()
    reid_network.cuda()

    # tracktor
    if 'oracle' in tracktor:
        tracker = OracleTracker(obj_detect, reid_network, tracktor['tracker'],
                                tracktor['oracle'])
    else:
        tracker = Tracker(obj_detect, reid_network, tracktor['tracker'])

    time_total = 0
    num_frames = 0
    mot_accums = []
    dataset = Datasets(tracktor['dataset'])
    for seq in dataset:
        tracker.reset()

        start = time.time()

        _log.info(f"Tracking: {seq}")

        data_loader = DataLoader(seq, batch_size=1, shuffle=False)
        for i, frame in enumerate(tqdm(data_loader)):
            if len(seq) * tracktor['frame_split'][0] <= i <= len(
                    seq) * tracktor['frame_split'][1]:
                with torch.no_grad():
                    tracker.step(frame)
                num_frames += 1
        results = tracker.get_results()

        time_total += time.time() - start

        _log.info(f"Tracks found: {len(results)}")
        _log.info(f"Runtime for {seq}: {time.time() - start :.2f} s.")

        if tracktor['interpolate']:
            results = interpolate(results)

        if seq.no_gt:
            _log.info(f"No GT data for evaluation available.")
        else:
            mot_accums.append(get_mot_accum(results, seq))

        _log.info(f"Writing predictions to: {output_dir}")
        seq.write_results(results, output_dir)

        if tracktor['write_images']:
            plot_sequence(results, seq,
                          osp.join(output_dir, tracktor['dataset'], str(seq)))

        score_killed_tracks = tracker.get_score_killed_tracks()
        _log.info(f"Score Killed Tracks: ({len(score_killed_tracks)})")
        for kill in score_killed_tracks:
            _log.info(
                f"Track [ {kill['id']:3d} ] killed in frame [ {kill['frame']:3d} ]"
            )

        nms_killed_tracks = tracker.get_nms_killed_tracks()
        _log.info(f"NMS Killed Tracks ({len(nms_killed_tracks)}):")
        for kill in nms_killed_tracks:
            _log.info(
                f"Track [ {kill['id']:3d} ] killed in frame [ {kill['frame']:3d} ]"
            )

    _log.info(
        f"Tracking runtime for all sequences (without evaluation or image writing): "
        f"{time_total:.2f} s for {num_frames} frames ({num_frames / time_total:.2f} Hz)"
    )
    if mot_accums:
        evaluate_mot_accums(mot_accums,
                            [str(s) for s in dataset if not s.no_gt],
                            generate_overall=True)
Exemplo n.º 16
0
def main(tracktor, reid, _config, _log, _run):
    sacred.commands.print_config(_run)

    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']),
                          tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    _log.info("Initializing object detector.")

    obj_detect = FRCNN_FPN(num_classes=2)
    obj_detect.load_state_dict(
        torch.load(_config['tracktor']['obj_detect_model'],
                   map_location=lambda storage, loc: storage))

    obj_detect.eval()
    obj_detect.cuda()

    # reid
    reid_network = resnet50(pretrained=False, **reid['cnn'])
    reid_network.load_state_dict(
        torch.load(tracktor['reid_weights'],
                   map_location=lambda storage, loc: storage))
    reid_network.eval()
    reid_network.cuda()

    # tracktor
    if 'oracle' in tracktor:
        tracker = OracleTracker(obj_detect, reid_network, tracktor['tracker'],
                                tracktor['oracle'])
    else:
        tracker = Tracker(obj_detect, reid_network, tracktor['tracker'])

    time_total = 0
    num_frames = 0
    mot_accums = []
    dataset = Datasets(tracktor['dataset'])

    for seq in dataset:

        tracker.reset()

        start = time.time()

        _log.info(f"Tracking: {seq}")

        data_loader = DataLoader(seq, batch_size=1, shuffle=False)
        for i, frame in enumerate(tqdm(data_loader)):
            if len(seq) * tracktor['frame_split'][0] <= i <= len(
                    seq) * tracktor['frame_split'][1]:
                tracker.step(frame)
                num_frames += 1
        results = tracker.get_results()

        time_total += time.time() - start

        _log.info(f"Tracks found: {len(results)}")
        _log.info(f"Runtime for {seq}: {time.time() - start :.1f} s.")

        if tracktor['interpolate']:
            results = interpolate(results)

        if seq.no_gt:
            _log.info(f"No GT data for evaluation available.")
        else:
            mot_accums.append(get_mot_accum(results, seq))

        _log.info(f"Writing predictions to: {output_dir}")
        seq.write_results(results, output_dir)

        if tracktor['write_images']:
            plot_sequence(results, seq,
                          osp.join(output_dir, tracktor['dataset'], str(seq)))

            img_array = []
            dir = osp.join(output_dir, tracktor['dataset'], str(seq), "*.jpg")
            files = glob.glob(dir)
            sorted_files = natsorted(files)

            for filename in sorted_files:
                img = cv2.imread(filename)
                height, width, layers = img.shape
                size = (width, height)
                img_array.append(img)

            out = cv2.VideoWriter(
                osp.join(output_dir, tracktor['dataset'],
                         str(seq), "result_video.avi"),
                cv2.VideoWriter_fourcc(*'DIVX'), 10, size)

            for i in range(len(img_array)):
                out.write(img_array[i])
            out.release()

    _log.info(
        f"Tracking runtime for all sequences (without evaluation or image writing): "
        f"{time_total:.1f} s ({num_frames / time_total:.1f} Hz)")
    if mot_accums:
        evaluate_mot_accums(mot_accums,
                            [str(s) for s in dataset if not s.no_gt],
                            generate_overall=True)
Exemplo n.º 17
0
from PIL import Image

from tracktor.config import get_output_dir
from tracktor.datasets.factory import Datasets

dataset = "mot_train_"
detections = "FRCNN"

module_dir = get_output_dir('MOT17')
results_dir = module_dir
module_dir = osp.join(module_dir, 'eval/video_fp')

tracker = ["Tracktor", "FWT", "jCC", "MOTDT17"]

for db in Datasets(dataset):
    seq_path = osp.join(module_dir, f"{tracker[0]}/{db}-{detections}")
    if not osp.exists(seq_path):
        continue
    for frame, v in enumerate(db, 1):
        file_name = osp.basename(v['im_path'])
        output_dir = osp.join(module_dir, 'combined', f"{db}-{detections}")
        if not osp.exists(output_dir):
            os.makedirs(output_dir)
        im_output = osp.join(output_dir, file_name)

        tracker_frames = []
        for t in tracker:
            im_path = osp.join(module_dir,
                               f"{t}/{db}-{detections}/{file_name}")
            tracker_frames.append(Image.open(im_path))
def my_main(_config):

    print(_config)

    dataset = "mot_train_"
    detections = "FRCNN"

    ##########################
    # Initialize the modules #
    ##########################

    print("[*] Beginning evaluation...")
    module_dir = get_output_dir('MOT17')
    results_dir = module_dir
    module_dir = osp.join(module_dir, 'eval/video_red_green')
    #output_dir = osp.join(results_dir, 'plots')
    #if not osp.exists(output_dir):
    #    os.makedirs(output_dir)

    #sequences_raw = ["MOT17-13", "MOT17-11", "MOT17-10", "MOT17-09", "MOT17-05", "MOT17-04", "MOT17-02", ]

    #sequences = ["{}-{}".format(s, detections) for s in sequences_raw]
    #sequences = sequences[:1]

    # tracker = ["FRCNN_Base", "HAM_SADF17", "MOTDT17", "EDMT17", "IOU17", "MHT_bLSTM", "FWT_17", "jCC", "MHT_DAM_17"]
    # tracker = ["Baseline", "BnW", "FWT_17", "jCC", "MOTDT17", "MHT_DAM_17"]
    tracker = ["FWT", "jCC", "MOTDT17"]#, "Tracktor++"]
    baseline = "Tracktor"

    for t in tracker:
        print("[*] Evaluating {}".format(t))
        for db in Datasets(dataset):
            ################################
            # Make videos for each tracker #
            ################################

            s = "{}-{}".format(db, detections)

            gt_file = osp.join(cfg.DATA_DIR, "MOT17Labels", "train", s, "gt", "gt.txt")
            res_file = osp.join(results_dir, t, s+".txt")
            base_file = osp.join(results_dir, baseline, s+".txt")

            stDB = read_txt_to_struct(res_file)
            gtDB = read_txt_to_struct(gt_file)

            gtDB, distractor_ids = extract_valid_gt_data(gtDB)
            _, M_res, gtDB, stDB = evaluate_new(stDB, gtDB, distractor_ids)
            gt_ids_res = np.unique(gtDB[:, 1])

            bsDB = read_txt_to_struct(base_file)
            gtDB = read_txt_to_struct(gt_file)

            gtDB, distractor_ids = extract_valid_gt_data(gtDB)
            _, M_bs, gtDB, stDB = evaluate_new(bsDB, gtDB, distractor_ids)
            gt_ids_base = np.unique(gtDB[:, 1])


            gtDB = read_txt_to_struct(gt_file)
            # filter out so that confidence and id = 1
            gtDB = gtDB[gtDB[:,7] == 1]
            gtDB = gtDB[gtDB[:,6] == 1]

            #st_ids = np.unique(stDB[:, 1])
            #gt_ids = np.unique(gtDB[:, 1])
            gt_frames = np.unique(gtDB[:, 0])
            f_gt = len(gt_frames)

            gt_inds = [{} for i in range(f_gt)]
            #st_inds = [{} for i in range(f_gt)]

            # hash the indices to speed up indexing
            for i in range(gtDB.shape[0]):
                frame = np.where(gt_frames == gtDB[i, 0])[0][0]
                #gid = np.where(gt_ids == gtDB[i, 1])[0][0]
                gt_id = int(gtDB[i,1])
                gt_inds[frame][gt_id] = i

            #gt_frames_list = list(gt_frames)
            #for i in range(stDB.shape[0]):
                # sometimes detection missed in certain frames, thus should be assigned to groundtruth frame id for alignment
            #    frame = gt_frames_list.index(stDB[i, 0])
            #    sid = np.where(st_ids == stDB[i, 1])[0][0]
            #    st_inds[frame][sid] = i

            results = []
            for frame in range(f_gt):
                # get gt_ids in res
                m_res = M_res[frame]
                gids = list(m_res.keys())
                res_gt = []
                for gid in gids:
                    res_gt.append(gt_ids_res[gid])

                # get gt_ids in base
                m_bs = M_bs[frame]
                gids = list(m_bs.keys())
                base_gt = []
                for gid in gids:
                    base_gt.append(gt_ids_base[gid])

                # get unique gt ids
                unique_gt = np.unique(res_gt + base_gt)
                #print("res gt: {}".format(res_gt))
                #print("base gt: {}".format(base_gt))

                for gt in unique_gt:
                    gt = int(gt)
                    #print(gt)
                    res = np.zeros(6)
                    res[0] = frame+1
                    res[2:6] = gtDB[gt_inds[frame][gt], 2:6]
                    if gt in res_gt and gt in base_gt:
                        res[1] = 1
                    elif gt in base_gt:
                        res[1] = 2
                    elif gt in res_gt:
                        res[1] = 3
                    results.append(res)

            results = np.array(results)

            output_dir = osp.join(module_dir, t, s)
            if not osp.exists(output_dir):
                os.makedirs(output_dir)

            print("[*] Plotting whole sequence to {}".format(output_dir))

            # infinte color loop
            cyl = cy('ec', colors)
            loop_cy_iter = cyl()
            styles = defaultdict(lambda : next(loop_cy_iter))

            for frame,v in enumerate(db,1):
                im_path = v['im_path']
                im_name = osp.basename(im_path)
                im_output = osp.join(output_dir, im_name)
                im = cv2.imread(im_path)
                im = im[:, :, (2, 1, 0)]

                sizes = np.shape(im)
                height = float(sizes[0])
                width = float(sizes[1])

                fig = plt.figure()
                #fig.set_size_inches(w,h)
                #fig.set_size_inches(width/height, 1, forward=False)
                #fig.set_size_inches(width/100, height/100)
                scale = width/640
                #fig.set_size_inches(640/100, height*scale/100)
                fig.set_size_inches(width/100, height/100)
                ax = plt.Axes(fig, [0., 0., 1., 1.])
                ax.set_axis_off()
                fig.add_axes(ax)
                ax.imshow(im)

                res_frame = results[results[:,0]==frame]

                for j in range(res_frame.shape[0]):
                    box = res_frame[j,2:6]
                    gt_id = int(res_frame[j,1])
                    ax.add_patch(
                        plt.Rectangle((box[0], box[1]),
                            box[2] - box[0],
                            box[3] - box[1], fill=False,
                            linewidth=1.3*scale, color=colors[gt_id])
                            #**styles[gt_id])
                    )

                plt.axis('off')
                #plt.tight_layout()
                plt.draw()
                plt.savefig(im_output, dpi=100)
                plt.close()
Exemplo n.º 19
0
def main(tracktor, reid, _config, _log, _run):
    sacred.commands.print_config(_run)

    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']), tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    _log.info("Initializing object detector.")

    obj_detect = FRCNN_FPN(num_classes=2)
    obj_detect.load_state_dict(torch.load(_config['tracktor']['obj_detect_model'],
                               map_location=lambda storage, loc: storage))

    obj_detect.eval()
    obj_detect.cuda()

    # reid
    reid_network = resnet50(pretrained=False, **reid['cnn'])
    reid_network.load_state_dict(torch.load(tracktor['reid_weights'],
                                 map_location=lambda storage, loc: storage))
    reid_network.eval()
    reid_network.cuda()

    # neural motion model 

    vis_model = VisSimpleReID()

    motion_model = MotionModelV3(vis_model)
    motion_model.load_state_dict(torch.load('output/motion/finetune_motion_model_v3.pth')) 

    motion_model.eval()
    motion_model.cuda()

    save_vis_results = False

    # tracktor
    if 'oracle' in tracktor:
        tracker = OracleTracker(obj_detect, reid_network, tracktor['tracker'], tracktor['oracle'])
    else:
        # tracker = Tracker(obj_detect, reid_network, tracktor['tracker'])
        tracker = TrackerNeuralMM(obj_detect, reid_network, motion_model, tracktor['tracker'], save_vis_results=save_vis_results, vis_model=None)

    time_total = 0
    num_frames = 0
    mot_accums = []
    dataset = Datasets(tracktor['dataset'], {'use_val_split':True})
    for seq in dataset:
        tracker.reset()

        start = time.time()

        _log.info(f"Tracking: {seq}")

        data_loader = DataLoader(seq, batch_size=1, shuffle=False)
        for i, frame in enumerate(tqdm(data_loader)):
            if len(seq) * tracktor['frame_split'][0] <= i <= len(seq) * tracktor['frame_split'][1]:
                with torch.no_grad():
                    tracker.step(frame)
                num_frames += 1
        results = tracker.get_results()

        time_total += time.time() - start

        _log.info(f"Tracks found: {len(results)}")
        _log.info(f"Runtime for {seq}: {time.time() - start :.1f} s.")

        if tracktor['interpolate']:
            results = interpolate(results)

        if seq.no_gt:
            _log.info(f"No GT data for evaluation available.")
        else:
            mot_accums.append(get_mot_accum(results, seq))

        _log.info(f"Writing predictions to: {output_dir}")
        seq.write_results(results, output_dir)
        if save_vis_results:
            vis_results = tracker.get_vis_results()
            seq.write_vis_results(vis_results, output_dir)

        if tracktor['write_images']:
            plot_sequence(results, seq, osp.join(output_dir, tracktor['dataset'], str(seq)))

    _log.info(f"Tracking runtime for all sequences (without evaluation or image writing): "
              f"{time_total:.1f} s ({num_frames / time_total:.1f} Hz)")
    if mot_accums:
        evaluate_mot_accums(mot_accums, [str(s) for s in dataset if not s.no_gt], generate_overall=True)
Exemplo n.º 20
0
def main(tracktor, reid, _config, _log, _run):
    sacred.commands.print_config(_run)

    # set all seeds
    torch.manual_seed(tracktor['seed'])
    torch.cuda.manual_seed(tracktor['seed'])
    np.random.seed(tracktor['seed'])
    torch.backends.cudnn.deterministic = True

    output_dir = osp.join(get_output_dir(tracktor['module_name']), tracktor['name'])
    sacred_config = osp.join(output_dir, 'sacred_config.yaml')

    if not osp.exists(output_dir):
        os.makedirs(output_dir)
    with open(sacred_config, 'w') as outfile:
        yaml.dump(_config, outfile, default_flow_style=False)

    ##########################
    # Initialize the modules #
    ##########################

    # object detection
    _log.info("Initializing object detector.")
    use_masks = _config['tracktor']['tracker']['use_masks']
    mask_model = Mask_RCNN(num_classes=2)
    fast_model = FRCNN_FPN(num_classes=2)
    fast_model.load_state_dict(torch.load(_config['tracktor']['fast_rcnn_model'],
                               map_location=lambda storage, loc: storage))
    if(use_masks):

      mask_model.load_state_dict(torch.load(_config['tracktor']['mask_rcnn_model'],
                               map_location=lambda storage, loc: storage)['model_state_dict'])
      mask_model.eval()
      mask_model.cuda()

    fast_model.eval()
    fast_model.cuda()

    # reid
    reid_network = resnet50(pretrained=False, **reid['cnn'])
    reid_network.load_state_dict(torch.load(tracktor['reid_weights'],
                                 map_location=lambda storage, loc: storage))
    reid_network.eval()
    reid_network.cuda()

    # tracktor
    if 'oracle' in tracktor:
        tracker = OracleTracker(fast_model, reid_network, tracktor['tracker'], tracktor['oracle'])
    else:
        tracker = Tracker(fast_model, reid_network, tracktor['tracker'], mask_model)

    time_total = 0
    num_frames = 0
    mot_accums = []
    dataset = Datasets(tracktor['dataset'])
    for seq in dataset:
        num_frames = 0
        tracker.reset()

        start = time.time()

        _log.info(f"Tracking: {seq}")

        data_loader = DataLoader(seq, batch_size=1, shuffle=False)
        if tracktor['write_images'] and use_masks:
            print("[*] Plotting image to {}".format(osp.join(output_dir, tracktor['dataset'])))


        for i, frame in enumerate(tqdm(data_loader)):
            if len(seq) * tracktor['frame_split'][0] <= i <= len(seq) * tracktor['frame_split'][1]:
                tracker.step(frame)
                if tracktor['write_images'] and use_masks:
                  result = tracker.get_results()
                  masks = tracker.get_masks()
                  plot_sequence(result, masks, seq, num_frames, osp.join(output_dir, tracktor['dataset'], str(seq)), plot_masks = True)
                num_frames += 1

        results = tracker.get_results()
        import matplotlib.pyplot as plt

        time_total += time.time() - start

        _log.info(f"Tracks found: {len(results)}")
        _log.info(f"Runtime for {seq}: {time.time() - start :.1f} s.")

        if tracktor['interpolate']:
            results = interpolate(results)

        if seq.no_gt:
            _log.info(f"No GT data for evaluation available.")
        else:
            mot_accums.append(get_mot_accum(results, seq))

        _log.info(f"Writing predictions to: {output_dir}")
        seq.write_results(results, output_dir)


    _log.info(f"Tracking runtime for all sequences (without evaluation or image writing): "
              f"{time_total:.1f} s ({num_frames / time_total:.1f} Hz)")
    if mot_accums:
        evaluate_mot_accums(mot_accums, [str(s) for s in dataset if not s.no_gt], generate_overall=True)