示例#1
0
    def _construct_seq_dict(self, root_dir, subset):
        # load task information
        task_file = osp.join(root_dir, 'tasks/{}.csv'.format(subset))
        task = np.loadtxt(task_file, delimiter=',', dtype=str)

        # load dev annotations
        if subset == 'dev':
            dev_anno_file = osp.join(root_dir, 'annotations/dev.csv')
            dev_anno = np.loadtxt(dev_anno_file, delimiter=',', dtype=str)

        # construct seq_dict
        seq_dict = {}
        for s, line in enumerate(task):
            # parse task information
            vid_id, obj_id = line[:2]
            init_frame, last_frame = line[2:4].astype(int)
            init_anno = line[4:8].astype(np.float32)

            # log information
            seq_name = '_'.join([vid_id, obj_id])
            if s % 50 == 0 or s + 1 == len(task):
                ops.sys_print('Processing sequence [%d/%d]: %s...' %
                              (s + 1, len(task), seq_name))

            # parse annotations
            seq_dir = osp.join(root_dir, 'images', subset, vid_id)
            img0 = ops.read_image(seq_dir + '/000000.jpeg')
            h, w = img0.shape[:2]
            meta = {
                'width': img0.shape[1],
                'height': img0.shape[0],
                'target_num': 1
            }

            # parse and rescale initial annotations
            anno = np.expand_dims(init_anno[[0, 2, 1, 3]], axis=0)
            anno[:, [0, 2]] *= w
            anno[:, [1, 3]] *= h

            # image paths
            frames = np.arange(init_frame, last_frame + 1, self.frame_stride)
            img_files = [osp.join(seq_dir, '%06d.jpeg' % f) for f in frames]

            # update meta information
            meta.update({
                'frame_num': len(img_files),
                'total_instances': len(img_files),
                'frames': frames
            })

            # update seq_dict
            seq_dict[seq_name] = {
                'img_files': img_files,
                'target': {
                    'anno': anno,
                    'meta': meta
                }
            }

        return seq_dict
示例#2
0
    def _construct_ins_dict(self, seqs, sampling_stride):
        # construct ins_dict
        ins_dict = OrderedDict()
        for s, (img_files, target) in enumerate(seqs):
            seq_name = seqs.seq_names[s]
            if s % 100 == 0 or (s + 1) == len(seqs):
                ops.sys_print('Processing [%d/%d]: %s...' %
                              (s + 1, len(seqs), seq_name))

            # filter out invalid frames
            anno, meta = target['anno'], target['meta']
            mask = self._filter(anno, meta)
            anno = anno[mask]

            for f, img_file in enumerate(img_files):
                if f % sampling_stride != 0:
                    continue
                bbox = target['anno'][f]
                ins_id, cam_id = s + 1, 1
                meta_info = {'width': meta['width'], 'height': meta['height']}

                # updat ins_dict
                name = '{}-{}_{}'.format(ins_id, cam_id, f + 1)
                ins_dict[name] = {
                    'img_file': img_file,
                    'target': {
                        'bbox': bbox,
                        'ins_id': ins_id,
                        'cam_id': cam_id,
                        'frame_id': f + 1,
                        'meta': meta_info
                    }
                }

        return ins_dict
示例#3
0
    def _record(self, record_file, bboxes, times):
        # convert bboxes to string
        lines = []
        for bbox in bboxes:
            if len(bbox) == 1:
                lines.append('%d' % bbox[0])
            else:
                lines.append(str.join(',', ['%.4f' % t for t in bbox]))

        # record bounding boxes
        record_dir = os.path.dirname(record_file)
        if not os.path.isdir(record_dir):
            os.makedirs(record_dir)
        with open(record_file, 'w') as f:
            f.write(str.join('\n', lines))
        ops.sys_print('  Results recorded at %s' % record_file)

        # convert times to string
        lines = ['%.4f' % t for t in times]
        lines = [t.replace('nan', 'NaN') for t in lines]

        # record running times
        time_file = record_file[:record_file.rfind('_')] + '_time.txt'
        if os.path.exists(time_file):
            with open(time_file) as f:
                exist_lines = f.read().strip().split('\n')
            lines = [t + ',' + s for t, s in zip(exist_lines, lines)]
        with open(time_file, 'w') as f:
            f.write(str.join('\n', lines))
示例#4
0
    def build(self, cfg):
        assert isinstance(cfg, dict) and 'type' in cfg
        cfg = cfg.copy()
        module_name = cfg.pop('type')
        if 'input_type' in cfg:
            ops.sys_print('Warning: "input_type" should be parsed '
                          'before building module')
            cfg.pop('input_type')

        # get module
        if isinstance(module_name, six.string_types):
            module = self.get(module_name)
            if module.__name__ == 'dict':
                module = addict.Dict
            if module is None:
                raise KeyError('{} is not in the registry'.format(module_name))
        else:
            raise TypeError(
                'type must be a string, but got {}'.format(module_name))

        # build submodules
        for k, v in cfg.items():
            if isinstance(v, dict) and 'type' in v:
                cfg[k] = self.build(v)

        return module(**cfg)
示例#5
0
    def test_loc_losses(self):
        x = torch.rand(64, 4)
        x[:, 2:] += x[:, :2]
        y = x + 0.3 * (torch.rand(x.size()) - 0.5)
        y = y.clamp_(0)
        criterions = [
            nn.SmoothL1Loss(),
            SmoothL1Loss(beta=1. / 9),
            IoULoss(),
            GHMR_Loss(mu=0.02, bins=10, momentum=0.1)
        ]

        # check losses
        for criterion in criterions:
            loss = criterion(x, y)
            ops.sys_print('Loss[{}]: {:.3f}'.format(
                criterion.__class__.__name__, loss.item()))
            self.assertGreaterEqual(loss.item(), 0)

        # check losses on correct predictions
        for criterion in criterions:
            loss = criterion(y, y)
            ops.sys_print('GT Loss[{}]: {:.3f}'.format(
                criterion.__class__.__name__, loss.item()))
            self.assertGreaterEqual(loss.item(), 0)
示例#6
0
文件: vot_eval.py 项目: hqucv/dmtrack
    def run(self, tracker, visualize=False):
        ops.sys_print('Running tracker %s on %s...' %
                      (tracker.name, self.dataset.name))

        # run all specified experiments
        if 'supervised' in self.experiments:
            self.run_supervised(tracker, visualize)
示例#7
0
    def test_pair_wrapper(self):
        dataset = PairWrapper(base_transforms='extra_partial')
        indices = np.random.choice(len(dataset), 10)
        for i in indices:
            item = dataset[i]

            # check keys
            keys = [
                'img_z', 'img_x', 'img_meta_z', 'img_meta_x', 'gt_bboxes_z',
                'gt_bboxes_x'
            ]
            self.assertTrue(all([k in item for k in keys]))

            # check data types
            for _, v in item.items():
                self.assertTrue(isinstance(v, DC))

            # check sizes
            self.assertEqual(len(item['gt_bboxes_z'].data),
                             len(item['gt_bboxes_x'].data))
            if 'gt_labels' in item:
                self.assertEqual(len(item['gt_bboxes_x'].data),
                                 len(item['gt_labels'].data))

            # visualize pair
            if self.visualize:
                ops.sys_print('Item index:', i)
                self._show_image(item['img_z'].data,
                                 item['gt_bboxes_z'].data,
                                 fig=0,
                                 delay=1)
                self._show_image(item['img_x'].data,
                                 item['gt_bboxes_x'].data,
                                 fig=1,
                                 delay=0)
示例#8
0
    def _construct_seq_dict(self, root_dir, subset):
        # image and annotation paths
        seq_dirs = []
        anno_files = []
        if 'train' in subset:
            _seq_dirs = sorted(
                glob.glob(
                    osp.join(root_dir,
                             'VisDrone2018-VID-train/sequences/*_v')))
            _anno_files = [
                osp.join(root_dir, 'VisDrone2018-VID-train/annotations',
                         osp.basename(s) + '.txt') for s in _seq_dirs
            ]
            seq_dirs += _seq_dirs
            anno_files += _anno_files
        if 'val' in subset:
            _seq_dirs = sorted(
                glob.glob(
                    osp.join(root_dir, 'VisDrone2018-VID-val/sequences/*_v')))
            _anno_files = [
                osp.join(root_dir, 'VisDrone2018-VID-val/annotations',
                         osp.basename(s) + '.txt') for s in _seq_dirs
            ]
            seq_dirs += _seq_dirs
            anno_files += _anno_files
        seq_names = [osp.basename(s) for s in seq_dirs]

        # construct seq_dict
        seq_dict = {}
        for s, seq_name in enumerate(seq_names):
            if s % 10 == 0 or s + 1 == len(seq_names):
                ops.sys_print('Processing [%d/%d]: %s' %
                              (s + 1, len(seq_names), seq_name))

            img_files = sorted(glob.glob(osp.join(seq_dirs[s], '*.jpg')))
            anno_s = np.loadtxt(anno_files[s], delimiter=',', dtype=np.float32)
            anno_s = self._format(anno_s)

            # meta information
            img0 = ops.read_image(img_files[0])
            meta = {
                'width': img0.shape[1],
                'height': img0.shape[0],
                'frame_num': len(img_files),
                'target_num': len(set(anno_s[:, 1])),
                'total_instances': len(anno_s)
            }

            # update seq_dict
            seq_dict[seq_name] = {
                'img_files': img_files,
                'target': {
                    'anno': anno_s,
                    'meta': meta
                }
            }

        return seq_dict
示例#9
0
 def _calc_metrics(self, bboxes, anno):
     valid = ~np.any(np.isnan(anno), axis=1)
     if len(valid) == 0:
         ops.sys_print('Warning: no valid annotations')
         return None, None
     else:
         ious = ops.rect_iou(bboxes[valid, :], anno[valid, :])
         center_errors = ops.center_error(bboxes[valid, :], anno[valid, :])
         return ious, center_errors
示例#10
0
    def _check_xcorr_head(self, backbone, head):
        begin = time.time()
        out = head(backbone(self.z), backbone(self.x))
        end = time.time()

        # print inference information
        ops.sys_print('[{}] z: {} x: {} output: {} time: {:.5f}s'.format(
            head.__class__.__name__, tuple(self.z.shape), tuple(self.x.shape),
            tuple(out.shape), end - begin))
示例#11
0
def main():
    # parse arguments
    args = parse_args()
    cfg = Config.fromfile(args.config)
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    if args.work_dir is not None:
        cfg.work_dir = args.work_dir
    if args.load_from is not None:
        cfg.load_from = args.load_from
    if args.resume_from is not None:
        cfg.resume_from = args.resume_from
    if args.fp16:
        cfg.fp16 = {'loss_scale': 512.}
    if args.workers is not None:
        cfg.data.workers_per_gpu = args.workers
    cfg.gpus = args.gpus
    if args.autoscale_lr:
        cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8.
    ops.sys_print('Args:\n--', args)
    ops.sys_print('Configs:\n--', cfg)

    # init distributed env, logger and random seeds
    if args.launcher == 'none':
        distributed = False
    else:
        distributed = True
        init_dist(args.launcher, **cfg.dist_params)
    logger = get_root_logger(cfg.log_level)
    logger.info('Distributed training: {}'.format(distributed))
    if args.seed is not None:
        logger.info('Set random seed to {}'.format(args.seed))
        set_random_seed(args.seed)
    
    # build model
    model = build_detector(
        cfg.model,
        train_cfg=cfg.train_cfg,
        test_cfg=cfg.test_cfg)
    
    # build dataset
    train_dataset = build_dataset(cfg.data.train)
    if cfg.checkpoint_config is not None:
        cfg.checkpoint_config.meta = {
            'mmdet_version': mmdet.__version__,
            'config': cfg.text,
            'CLASSES': train_dataset.CLASSES}
    model.CLASSES = train_dataset.CLASSES

    # run training
    train_detector(
        model,
        train_dataset,
        cfg,
        distributed=distributed,
        validate=args.validate,
        logger=logger)
示例#12
0
    def plot_curves(self, report_files, tracker_names):
        assert isinstance(report_files, list), \
            'Expected "report_files" to be a list, ' \
            'but got %s instead' % type(report_files)

        # assume tracker_names[0] is your tracker
        report_dir = osp.join(self.report_dir, tracker_names[0])
        if not osp.exists(report_dir):
            os.makedirs(report_dir)

        performance = {}
        for report_file in report_files:
            with open(report_file) as f:
                performance.update(json.load(f))

        succ_file = osp.join(report_dir, 'success_plot.pdf')
        key = 'overall'

        # sort trackers by AO
        tracker_names = list(performance.keys())
        aos = [t[key]['ao'] for t in performance.values()]
        inds = np.argsort(aos)[::-1]
        tracker_names = [tracker_names[i] for i in inds]

        # markers
        markers = ['-', '--', '-.']
        markers = [c + m for m in markers for c in [''] * 10]

        # plot success curves
        thr_iou = np.linspace(0, 1, self.nbins_iou)
        fig, ax = plt.subplots()
        lines = []
        legends = []
        for i, name in enumerate(tracker_names):
            line, = ax.plot(thr_iou,
                            performance[name][key]['succ_curve'],
                            markers[i % len(markers)])
            lines.append(line)
            legends.append('%s: [%.3f]' % (
                name, performance[name][key]['ao']))
        matplotlib.rcParams.update({'font.size': 7.4})
        legend = ax.legend(lines, legends, loc='center left',
                           bbox_to_anchor=(1, 0.5))

        matplotlib.rcParams.update({'font.size': 9})
        ax.set(xlabel='Overlap threshold',
               ylabel='Success rate',
               xlim=(0, 1), ylim=(0, 1),
               title='Success plots on GOT-10k')
        ax.grid(True)
        fig.tight_layout()

        ops.sys_print('Saving success plots to %s' % succ_file)
        fig.savefig(succ_file,
                    bbox_extra_artists=(legend,),
                    bbox_inches='tight',
                    dpi=300)
示例#13
0
文件: otb.py 项目: zhangjf2018/LTMU
    def _filter_files(self, filenames):
        filtered_files = []
        for filename in filenames:
            with open(filename, 'r') as f:
                if f.read().strip() == '':
                    ops.sys_print('Warning: %s is empty.' % filename)
                else:
                    filtered_files.append(filename)

        return filtered_files
示例#14
0
    def test_siamfc_transforms(self):
        transforms = data.ReID_Transforms()
        dataset = data.Seq2Instance(self.seqs, transforms=transforms)

        inds = np.random.choice(len(dataset), 10)
        for i in inds:
            img, target = dataset[i]
            img = 255. * (img - img.min()) / (img.max() - img.min())
            img = img.permute(1, 2, 0).numpy().astype(np.uint8)
            if self.visualize:
                ops.sys_print('Label: %d' % target['label'].item())
                ops.show_image(img, target['bbox'], delay=0)
示例#15
0
 def test_xcorr(self):
     backbone = models.AlexNetV1()
     heads = [
         models.XCorr(scale=0.001, learnable=False),
         models.XCorr(scale=0.001, learnable=True),
         models.XCorr(scale=[0.001], learnable=False),
         models.XCorr(scale=[0.001], learnable=True)
     ]
     for head in heads:
         ops.sys_print(head.scale.exp())
         self._check_xcorr_head(backbone.to(self.device),
                                head.to(self.device))
示例#16
0
    def show(self,
             tracker_names,
             seq_names=None,
             play_speed=1,
             visualize=True,
             save=False,
             save_dir='screenshots'):
        if seq_names is None:
            seq_names = self.dataset.seq_names
        elif isinstance(seq_names, str):
            seq_names = [seq_names]
        assert isinstance(tracker_names, (list, tuple))
        assert isinstance(seq_names, (list, tuple))

        play_speed = int(round(play_speed))
        assert play_speed > 0

        for s, seq_name in enumerate(seq_names):
            ops.sys_print('[%d/%d] Showing results on %s...' %
                          (s + 1, len(seq_names), seq_name))

            # mkdir if required to save screenshots
            if save:
                out_dir = osp.join(save_dir, seq_name)
                if not osp.exists(out_dir):
                    os.makedirs(out_dir)

            # load all tracking results
            records = {}
            for name in tracker_names:
                record_file = osp.join(self.result_dir, name,
                                       '%s.txt' % seq_name)
                records[name] = np.loadtxt(record_file, delimiter=',')

            # loop over the sequence and display results
            img_files, target = self.dataset[seq_name][:2]
            img_files = img_files[::self.frame_stride]
            target['anno'] = target['anno'][self.frame_stride]
            for f, img_file in enumerate(img_files):
                if not f % play_speed == 0:
                    continue
                img = ops.read_image(img_file)
                bboxes = [records[name][f] for name in tracker_names]
                if len(target['anno']) > f:
                    bboxes = [target['anno'][f]] + bboxes
                img = ops.show_image(img, bboxes, visualize=visualize)

                # save screenshot if required
                if save:
                    out_file = osp.join(out_dir, '%08d.jpg' % (f + 1))
                    cv2.imwrite(out_file, img)
示例#17
0
文件: nfs.py 项目: hqucv/dmtrack
    def _construct_seq_dict(self, root_dir, fps):
        # image and annotation paths
        anno_files = sorted(glob.glob(osp.join(root_dir, '*/%d/*.txt' % fps)))
        seq_names = [osp.basename(f)[:-4] for f in anno_files]
        seq_dirs = [
            osp.join(osp.dirname(f), n) for f, n in zip(anno_files, seq_names)
        ]

        # construct seq_dict
        seq_dict = {}
        for s, seq_name in enumerate(seq_names):
            if s % 50 == 0 or s + 1 == len(seq_names):
                ops.sys_print('Processing sequence [%d/%d]: %s...' %
                              (s + 1, len(seq_names), seq_name))
            img_files = sorted(glob.glob(osp.join(seq_dirs[s], '*.jpg')))
            anno = np.loadtxt(anno_files[s], dtype=str)
            anno = anno[:, 1:5].astype(np.float32)

            # handle inconsistent lengths
            if not len(img_files) == len(anno):
                if abs(len(anno) / len(img_files) - 8) < 1:
                    anno = anno[0::8, :]
                diff = abs(len(img_files) - len(anno))
                if diff > 0 and diff <= 1:
                    n = min(len(img_files), len(anno))
                    anno = anno[:n]
                    img_files = img_files[:n]
            assert len(img_files) == len(anno)

            # meta information
            seq_len = len(img_files)
            img0 = ops.read_image(img_files[0])
            meta = {
                'width': img0.shape[1],
                'height': img0.shape[0],
                'frame_num': seq_len,
                'target_num': 1,
                'total_instances': seq_len
            }

            # update seq_dict
            seq_dict[seq_name] = {
                'img_files': img_files,
                'target': {
                    'anno': anno,
                    'meta': meta
                }
            }

        return seq_dict
示例#18
0
    def _construct_seq_dict(self, root_dir, subset):
        # load subset sequence names
        split_file = osp.join(osp.dirname(__file__), 'lasot.json')
        with open(split_file, 'r') as f:
            splits = json.load(f)
        seq_names = splits[subset]

        # image and annotation paths
        seq_dirs = [
            osp.join(root_dir, n[:n.rfind('-')], n, 'img') for n in seq_names
        ]
        anno_files = [
            osp.join(root_dir, n[:n.rfind('-')], n, 'groundtruth.txt')
            for n in seq_names
        ]

        # construct seq_dict
        seq_dict = {}
        for s, seq_name in enumerate(seq_names):
            if s % 100 == 0 or s + 1 == len(seq_names):
                ops.sys_print('Processing sequence [%d/%d]: %s...' %
                              (s + 1, len(seq_names), seq_name))
            img_files = sorted(glob.glob(osp.join(seq_dirs[s], '*.jpg')))
            anno = np.loadtxt(anno_files[s], delimiter=',', dtype=np.float32)
            anno[:, 2:] = anno[:, :2] + anno[:, 2:] - 1

            # meta information
            seq_len = len(img_files)
            img0 = ops.read_image(img_files[0])
            meta = self._fetch_meta(seq_dirs[s])
            meta.update({
                'width': img0.shape[1],
                'height': img0.shape[0],
                'frame_num': seq_len,
                'target_num': 1,
                'total_instances': seq_len
            })

            # update seq_dict
            seq_dict[seq_name] = {
                'img_files': img_files,
                'target': {
                    'anno': anno,
                    'meta': meta
                }
            }

        return seq_dict
示例#19
0
文件: vot_eval.py 项目: hqucv/dmtrack
    def _record_confidence(self, record_file, bboxes):
        # convert conf to string
        lines = []
        for bbox in bboxes:
            if len(bbox) == 1:
                lines.append('')
            else:
                lines.append(str(bbox[-1]))

        # record confidence
        record_dir = os.path.dirname(record_file)
        if not os.path.isdir(record_dir):
            os.makedirs(record_dir)
        with open(record_file, 'w') as f:
            f.write(str.join('\n', lines))
        ops.sys_print('  Results recorded at %s' % record_file)
示例#20
0
    def _record(self, record_file, bboxes, times):
        # record bounding boxes
        record_dir = osp.dirname(record_file)
        if not osp.isdir(record_dir):
            os.makedirs(record_dir)
        np.savetxt(record_file, bboxes, fmt='%.3f', delimiter=',')
        ops.sys_print('  Results recorded at %s' % record_file)

        # record running times
        time_dir = osp.join(record_dir, 'times')
        if not osp.isdir(time_dir):
            os.makedirs(time_dir)
        time_file = osp.join(
            time_dir,
            osp.basename(record_file).replace('.txt', '_time.txt'))
        np.savetxt(time_file, times, fmt='%.8f')
示例#21
0
    def _construct_seq_dict(self, root_dir, subset_dirs):
        # image and annotation paths
        anno_files = [
            glob.glob(osp.join(root_dir, c, 'anno/*.txt')) for c in subset_dirs
        ]
        anno_files = sorted(sum(anno_files, []))
        seq_dirs = [
            osp.join(osp.dirname(osp.dirname(f)), 'frames',
                     osp.basename(f)[:-4]) for f in anno_files
        ]
        seq_names = [osp.basename(d) for d in seq_dirs]

        # construct seq_dict
        seq_dict = {}
        for s, seq_name in enumerate(seq_names):
            if s % 100 == 0 or s + 1 == len(seq_names):
                ops.sys_print('Processing sequence [%d/%d]: %s...' %
                              (s + 1, len(seq_names), seq_name))
            img_files = glob.glob(osp.join(seq_dirs[s], '*.jpg'))
            img_files = sorted(img_files,
                               key=lambda f: int(osp.basename(f)[:-4]))
            anno = np.loadtxt(anno_files[s], delimiter=',')
            if anno.ndim == 1:
                anno = np.expand_dims(anno, axis=0)
            anno[:, 2:] = anno[:, :2] + anno[:, 2:] - 1

            # meta information
            seq_len = len(img_files)
            img0 = ops.read_image(img_files[0])
            meta = {
                'width': img0.shape[1],
                'height': img0.shape[0],
                'frame_num': seq_len,
                'target_num': 1,
                'total_instances': seq_len
            }

            # update seq_dict
            seq_dict[seq_name] = {
                'img_files': img_files,
                'target': {
                    'anno': anno,
                    'meta': meta
                }
            }

        return seq_dict
示例#22
0
    def _record(self, record_file, bboxes, times):
        # record bounding boxes
        record_dir = osp.dirname(record_file)
        if not osp.isdir(record_dir):
            os.makedirs(record_dir)
        np.savetxt(record_file, bboxes, fmt='%.3f', delimiter=',')
        ops.sys_print('  Results recorded at %s' % record_file)

        # record running times
        time_file = record_file[:record_file.rfind('_')] + '_time.txt'
        times = times[:, np.newaxis]
        if osp.exists(time_file):
            exist_times = np.loadtxt(time_file, delimiter=',')
            if exist_times.ndim == 1:
                exist_times = exist_times[:, np.newaxis]
            times = np.concatenate((exist_times, times), axis=1)
        np.savetxt(time_file, times, fmt='%.8f', delimiter=',')
示例#23
0
    def test_metric_losses(self):
        x = torch.rand(16, 2048)
        y = torch.LongTensor([0, 1, 2, 3, 2, 3, 1, 0, 0, 3, 2, 1, 0, 2, 3, 1])
        criterions = [
            TripletLoss(margin=None, normalize_feats=False),
            TripletLoss(margin=0.3, normalize_feats=False),
            TripletLoss(margin=None, normalize_feats=True),
            TripletLoss(margin=0.3, normalize_feats=True),
            CenterLoss(731, 2048)
        ]

        # check losses
        for criterion in criterions:
            loss = criterion(x, y)
            ops.sys_print('Loss[{}]: {:.3f}'.format(
                criterion.__class__.__name__, loss.item()))
            self.assertGreaterEqual(loss.item(), 0)
示例#24
0
    def _construct_seq_dict(self, root_dir):
        # image and annotation paths
        seq_dirs = sorted(glob.glob(
            osp.join(root_dir, '*/*_*/')))
        seq_dirs = [d[:-1] for d in seq_dirs]
        seq_names = [osp.basename(d) for d in seq_dirs]
        anno_files = [osp.join(
            root_dir, 'annotation/annotation/{}_gt_points.txt'.format(n))
            for n in seq_names]
        
        # construct seq_dict
        seq_dict = {}
        for s, seq_name in enumerate(seq_names):
            if s % 50 == 0 or s + 1 == len(seq_names):
                ops.sys_print('Processing sequence [%d/%d]: %s...' % (
                    s + 1, len(seq_names), seq_name))
            
            img_files = sorted(glob.glob(
                osp.join(seq_dirs[s], '*.jpg')))
            anno = np.loadtxt(anno_files[s])
            
            n = min(len(img_files), len(anno))
            assert n > 0
            img_files = img_files[:n]
            anno = anno[:n]

            # meta information
            seq_len = len(img_files)
            img0 = ops.read_image(img_files[0])
            meta = {
                'width': img0.shape[1],
                'height': img0.shape[0],
                'frame_num': seq_len,
                'target_num': 1,
                'total_instances': seq_len}
            
            # update seq_dict
            seq_dict[seq_name] = {
                'img_files': img_files,
                'target': {
                    'anno': anno,
                    'meta': meta}}
        
        return seq_dict
示例#25
0
    def _check_net(self, net):
        begin = time.time()
        out = net(self.input)
        end = time.time()

        # preserve the last-layer output
        if isinstance(out, dict):
            key = list(out.keys())[-1]
            out = out[key]
        elif isinstance(out, list):
            out = out[-1]

        # print inference information
        ops.sys_print('[{}] input: {} output: {} stride: {} '
                      'speed: {:.1f} fps'.format(net.__class__.__name__,
                                                 tuple(self.input.shape),
                                                 tuple(out.shape),
                                                 net.out_stride,
                                                 1. / (end - begin)))
示例#26
0
    def _construct_seq_dict(self, root_dir, subset, list_file):
        # image and annotation paths
        with open(list_file, 'r') as f:
            seq_names = f.read().strip().split('\n')
        seq_dirs = [osp.join(root_dir, subset, s) for s in seq_names]
        anno_files = [osp.join(d, 'groundtruth.txt') for d in seq_dirs]

        # construct seq_dict
        seq_dict = {}
        for s, seq_name in enumerate(seq_names):
            if s % 100 == 0 or s + 1 == len(seq_names):
                ops.sys_print('Processing sequence [%d/%d]: %s...' %
                              (s + 1, len(seq_names), seq_name))
            img_files = sorted(glob.glob(osp.join(seq_dirs[s], '*.jpg')))
            anno = np.loadtxt(anno_files[s], delimiter=',', dtype=np.float32)
            if anno.ndim == 1:
                assert anno.size == 4
                anno = anno[np.newaxis, :]
            anno[:, 2:] = anno[:, :2] + anno[:, 2:] - 1

            # meta information
            seq_len = len(img_files)
            img0 = ops.read_image(img_files[0])
            meta = self._fetch_meta(seq_dirs[s])
            meta.update({
                'width': img0.shape[1],
                'height': img0.shape[0],
                'frame_num': seq_len,
                'target_num': 1,
                'total_instances': seq_len
            })

            # update seq_dict
            seq_dict[seq_name] = {
                'img_files': img_files,
                'target': {
                    'anno': anno,
                    'meta': meta
                }
            }

        return seq_dict
示例#27
0
    def run(self, tracker, visualize=None):
        if visualize is None:
            visualize = self.visualize
        # sanity check
        if not isinstance(tracker, OxUvA_Tracker):
            raise ValueError(
                'Only supports trackers that implement OxUvA_Tracker.')
        ops.sys_print('Running tracker %s on %s...' %
                      (tracker.name, self.dataset.name))

        # loop over the complete dataset
        for s, (img_files, target) in enumerate(self.dataset):
            seq_name = self.dataset.seq_names[s]
            ops.sys_print('--Sequence %d/%d: %s' %
                          (s + 1, len(self.dataset), seq_name))

            # skip if results exist
            record_file = osp.join(self.result_dir, tracker.name,
                                   '%s.csv' % seq_name)
            if osp.exists(record_file):
                ops.sys_print('  Found results, skipping %s' % seq_name)
                continue

            # tracking loop
            preds, times = tracker.forward_test(img_files,
                                                target['anno'][0, :],
                                                visualize=visualize)
            assert len(preds) == len(img_files)

            # record results
            self._record(record_file, preds, times, seq_name, target['meta'])
示例#28
0
    def run_unsupervised(self, tracker, visualize=False):
        ops.sys_print('Running unsupervised experiment...')

        # loop over the complete dataset
        for s, (img_files, target) in enumerate(self.dataset):
            seq_name = self.dataset.seq_names[s]
            ops.sys_print('--Sequence %d/%d: %s' %
                          (s + 1, len(self.dataset), seq_name))

            # skip if results exist
            record_file = os.path.join(self.result_dir, tracker.name,
                                       'unsupervised', seq_name,
                                       '%s_001.txt' % seq_name)
            if os.path.exists(record_file):
                ops.sys_print('  Found results, skipping %s' % seq_name)
                continue

            # rectangular bounding boxes
            anno_rects = target['anno'].copy()
            if anno_rects.shape[1] == 8:
                anno_rects = self.dataset._corner2rect(anno_rects)

            # tracking loop
            bboxes, times = tracker.forward_test(img_files,
                                                 anno_rects[0],
                                                 visualize=visualize)
            assert len(bboxes) == len(img_files)

            # re-formatting
            bboxes = list(bboxes)
            bboxes[0] = [1]

            # record results
            self._record(record_file, bboxes, times)
示例#29
0
    def run(self, tracker, visualize=None):
        if visualize is None:
            visualize = self.visualize
        ops.sys_print('Running tracker %s on %s...' %
                      (tracker.name, self.dataset.name))

        # loop over the complete dataset
        for s, (img_files, target) in enumerate(self.dataset):
            seq_name = self.dataset.seq_names[s]
            ops.sys_print('--Sequence %d/%d: %s' %
                          (s + 1, len(self.dataset), seq_name))

            # skip if results exist
            record_file = osp.join(self.result_dir, tracker.name,
                                   '%s.txt' % seq_name)
            if osp.exists(record_file):
                ops.sys_print('  Found results, skipping', seq_name)
                continue

            # tracking loop
            img_files = img_files[::self.frame_stride]
            init_bbox = target['anno'][0]
            bboxes, times = tracker.forward_test(img_files,
                                                 init_bbox,
                                                 visualize=visualize)
            assert len(bboxes) == len(img_files)

            # record results
            self._record(record_file, bboxes, times)
示例#30
0
    def _check_dataset(self, dataset):
        seq_num = len(dataset)
        if seq_num == 0:
            ops.sys_print('Warning: {} dataset is empty, '
                          'skip test...'.format(dataset.name))
            return
        self.assertGreater(seq_num, 0)
        ops.sys_print('{} contains {} sequences'.format(dataset.name, seq_num))

        # sanity check
        inds = random.sample(range(seq_num), min(seq_num, 10))
        for i in inds:
            img_files, target = dataset[i]
            anno, meta = target['anno'], target['meta']
            if anno.shape[0] == 1:
                continue  # test sets
            if anno.shape[1] in [4, 8]:
                self.assertEqual(len(img_files), len(anno))
            else:
                self.assertGreaterEqual(len(img_files) - 1, anno[:, 0].max())

        # visualization
        if self.visualize:
            img_files, target = random.choice(dataset)
            anno = target['anno']

            for f, img_file in enumerate(img_files):
                if f >= anno.shape[0]:
                    break  # test sets
                if anno.shape[1] == 9:
                    bboxes = anno[anno[:, 0] == f, 2:6]
                elif anno.shape[1] == 8:
                    break  # TODO: points are not supported yet
                else:
                    bboxes = anno[f, :]
                img = ops.read_image(img_file)
                ops.show_image(img, bboxes, delay=1)