Exemplo n.º 1
0
    def _construct_sequence(self, sequence_info):
        sequence_path = sequence_info['path']
        nz = sequence_info['nz']
        ext = sequence_info['ext']
        start_frame = sequence_info['startFrame']
        end_frame = sequence_info['endFrame']

        init_omit = 0
        if 'initOmit' in sequence_info:
            init_omit = sequence_info['initOmit']

        frames = [
            '{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(
                base_path=self.base_path,
                sequence_path=sequence_path,
                frame=frame_num,
                nz=nz,
                ext=ext)
            for frame_num in range(start_frame + init_omit, end_frame + 1)
        ]

        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])

        # NOTE: OTB has some weird annos which panda cannot handle
        ground_truth_rect = load_text(str(anno_path),
                                      delimiter=(',', None),
                                      dtype=np.float64,
                                      backend='numpy')

        return Sequence(sequence_info['name'],
                        frames,
                        'otb',
                        ground_truth_rect[init_omit:, :],
                        object_class=sequence_info['object_class'])
Exemplo n.º 2
0
    def _construct_sequence(self, set, sequence_name):
        # anno_path = '{}/{}/anno/{}.txt'.format(self.base_path, set, sequence_name)
        # Below line fixes error produced by line 35 :D
        anno_path = os.path.join(os.getcwd(),
                                 set) + '/anno/' + sequence_name + '.txt'

        ground_truth_rect = load_text(str(anno_path),
                                      delimiter=',',
                                      dtype=np.float64,
                                      backend='numpy')

        # frames_path = '{}/{}/frames/{}'.format(self.base_path, set, sequence_name)
        # Below line fixes error produced by line 39 :D
        frames_path = os.path.join(os.getcwd(), set) + '/zips/' + sequence_name
        frame_list = [
            frame for frame in os.listdir(frames_path)
            if frame.endswith(".jpg")
        ]
        frame_list.sort(key=lambda f: int(f[:-4]))
        frames_list = [
            os.path.join(frames_path, frame) for frame in frame_list
        ]

        return Sequence(sequence_name, frames_list, 'trackingnet',
                        ground_truth_rect.reshape(-1, 4))
Exemplo n.º 3
0
    def _construct_sequence(self, sequence_name):
        anno_path = '{}/{}/groundtruth.txt'.format(self.base_path,
                                                   sequence_name)

        ground_truth_rect = load_text(str(anno_path),
                                      delimiter=',',
                                      dtype=np.float64)

        frames_path = '{}/{}/color/'.format(self.base_path, sequence_name)
        depths_path = '{}/{}/depth/'.format(self.base_path, sequence_name)
        frame_list = [
            frame for frame in os.listdir(frames_path)
            if frame.endswith(".jpg")
        ]
        depth_list = [
            depth for depth in os.listdir(depths_path)
            if depth.endswith(".png")
        ]
        frame_list.sort(key=lambda f: int(f[:-4]))
        depth_list.sort(key=lambda f: int(f[:-4]))
        frames_list = [
            os.path.join(frames_path, frame) for frame in frame_list
        ]
        depths_list = [
            os.path.join(depths_path, depth) for depth in depth_list
        ]

        return SequenceDepth(sequence_name, frames_list, depths_list, 'cdtb',
                             ground_truth_rect.reshape(-1, 4))
Exemplo n.º 4
0
    def _construct_sequence(self, sequence_name):
        class_name = sequence_name.split('-')[0]
        class_name = ''
        anno_path = '{}/{}/{}/groundtruth.txt'.format(self.base_path,
                                                      class_name,
                                                      sequence_name)

        ground_truth_rect = load_text(str(anno_path),
                                      delimiter=',',
                                      dtype=np.float64)

        occlusion_label_path = '{}/{}/{}/full_occlusion.txt'.format(
            self.base_path, class_name, sequence_name)

        # NOTE: pandas backed seems super super slow for loading occlusion/oov masks
        full_occlusion = load_text(str(occlusion_label_path),
                                   delimiter=',',
                                   dtype=np.float64,
                                   backend='numpy')

        out_of_view_label_path = '{}/{}/{}/out_of_view.txt'.format(
            self.base_path, class_name, sequence_name)
        out_of_view = load_text(str(out_of_view_label_path),
                                delimiter=',',
                                dtype=np.float64,
                                backend='numpy')

        target_visible = np.logical_and(full_occlusion == 0, out_of_view == 0)

        frames_path = '{}/{}/{}/img'.format(self.base_path, class_name,
                                            sequence_name)

        frames_list = [
            '{}/{:08d}.jpg'.format(frames_path, frame_number)
            for frame_number in range(1, ground_truth_rect.shape[0] + 1)
        ]

        target_class = class_name
        return Sequence(sequence_name,
                        frames_list,
                        'lasot',
                        ground_truth_rect.reshape(-1, 4),
                        object_class=target_class,
                        target_visible=target_visible)
Exemplo n.º 5
0
    def _construct_sequence(self, sequence_info):
        sequence_path = sequence_info['path']
        nz = sequence_info['nz']
        ext = sequence_info['ext']
        start_frame = sequence_info['startFrame']
        end_frame = sequence_info['endFrame']

        init_omit = 0
        if 'initOmit' in sequence_info:
            init_omit = sequence_info['initOmit']

        frames = ['{base_path}/{sequence_path}/{frame:0{nz}}.{ext}'.format(base_path=self.base_path, 
        sequence_path=sequence_path, frame=frame_num, nz=nz, ext=ext) for frame_num in range(start_frame+init_omit, end_frame+1)]

        anno_path = '{}/{}'.format(self.base_path, sequence_info['anno_path'])

        ground_truth_rect = load_text(str(anno_path), delimiter='\t', dtype=np.float64)

        return Sequence(sequence_info['name'], frames, 'nfs', ground_truth_rect[init_omit:,:],
                        object_class=sequence_info['object_class'])
Exemplo n.º 6
0
    def _construct_sequence(self, set, sequence_name):
        anno_path = '{}/{}/anno/{}.txt'.format(self.base_path, set,
                                               sequence_name)

        ground_truth_rect = load_text(str(anno_path),
                                      delimiter=',',
                                      dtype=np.float64,
                                      backend='numpy')

        frames_path = '{}/{}/frames/{}'.format(self.base_path, set,
                                               sequence_name)
        frame_list = [
            frame for frame in os.listdir(frames_path)
            if frame.endswith(".jpg")
        ]
        frame_list.sort(key=lambda f: int(f[:-4]))
        frames_list = [
            os.path.join(frames_path, frame) for frame in frame_list
        ]

        return Sequence(sequence_name, frames_list, 'trackingnet',
                        ground_truth_rect.reshape(-1, 4))
Exemplo n.º 7
0
def extract_results(trackers, dataset, report_name, skip_missing_seq=False, plot_bin_gap=0.05,
                    exclude_invalid_frames=False):
    settings = env_settings()
    eps = 1e-16

    result_plot_path = os.path.join(settings.result_plot_path, report_name)

    if not os.path.exists(result_plot_path):
        os.makedirs(result_plot_path)

    threshold_set_overlap = torch.arange(0.0, 1.0 + plot_bin_gap, plot_bin_gap, dtype=torch.float64)
    threshold_set_center = torch.arange(0, 51, dtype=torch.float64)
    threshold_set_center_norm = torch.arange(0, 51, dtype=torch.float64) / 100.0

    avg_overlap_all = torch.zeros((len(dataset), len(trackers)), dtype=torch.float64)
    ave_success_rate_plot_overlap = torch.zeros((len(dataset), len(trackers), threshold_set_overlap.numel()),
                                                dtype=torch.float32)
    ave_success_rate_plot_center = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()),
                                               dtype=torch.float32)
    ave_success_rate_plot_center_norm = torch.zeros((len(dataset), len(trackers), threshold_set_center.numel()),
                                                    dtype=torch.float32)

    valid_sequence = torch.ones(len(dataset), dtype=torch.uint8)

    for seq_id, seq in enumerate(tqdm(dataset)):
        # Load anno
        anno_bb = torch.tensor(seq.ground_truth_rect)
        target_visible = torch.tensor(seq.target_visible, dtype=torch.uint8) if seq.target_visible is not None else None
        for trk_id, trk in enumerate(trackers):
            # Load results
            base_results_path = '{}/{}'.format(trk.results_dir, seq.name)
            if report_name == 'nfs' and (trk.results_dir.split('/')[-2] == 'atom' or trk.results_dir.split('/')[-2] == 'ECO' or trk.results_dir.split('/')[-2] == 'UPDT' or trk.results_dir.split('/')[-2] == 'MDNet' or trk.results_dir.split('/')[-2] == 'CCOT'):
                base_results_path = '{}/nfs_{}'.format(trk.results_dir, seq.name)
                
            results_path = '{}.txt'.format(base_results_path)

            if os.path.isfile(results_path):
                pred_bb = torch.tensor(load_text(str(results_path), delimiter=('\t', ','), dtype=np.float64))
            else:
                if skip_missing_seq:
                    valid_sequence[seq_id] = 0
                    break
                else:
                    raise Exception('Result not found. {}'.format(results_path))

            # Calculate measures
            err_overlap, err_center, err_center_normalized, valid_frame = calc_seq_err_robust(
                pred_bb, anno_bb, seq.dataset, target_visible)

            avg_overlap_all[seq_id, trk_id] = err_overlap[valid_frame].mean()

            if exclude_invalid_frames:
                seq_length = valid_frame.long().sum()
            else:
                seq_length = anno_bb.shape[0]

            if seq_length <= 0:
                raise Exception('Seq length zero')

            ave_success_rate_plot_overlap[seq_id, trk_id, :] = (err_overlap.view(-1, 1) > threshold_set_overlap.view(1, -1)).sum(0).float() / seq_length
            ave_success_rate_plot_center[seq_id, trk_id, :] = (err_center.view(-1, 1) <= threshold_set_center.view(1, -1)).sum(0).float() / seq_length
            ave_success_rate_plot_center_norm[seq_id, trk_id, :] = (err_center_normalized.view(-1, 1) <= threshold_set_center_norm.view(1, -1)).sum(0).float() / seq_length

    print('\n\nComputed results over {} / {} sequences'.format(valid_sequence.long().sum().item(), valid_sequence.shape[0]))

    # Prepare dictionary for saving data
    seq_names = [s.name for s in dataset]
    tracker_names = [{'name': t.name, 'param': t.parameter_name, 'run_id': t.run_id, 'disp_name': t.display_name}
                     for t in trackers]

    eval_data = {'sequences': seq_names, 'trackers': tracker_names,
                 'valid_sequence': valid_sequence.tolist(),
                 'ave_success_rate_plot_overlap': ave_success_rate_plot_overlap.tolist(),
                 'ave_success_rate_plot_center': ave_success_rate_plot_center.tolist(),
                 'ave_success_rate_plot_center_norm': ave_success_rate_plot_center_norm.tolist(),
                 'avg_overlap_all': avg_overlap_all.tolist(),
                 'threshold_set_overlap': threshold_set_overlap.tolist(),
                 'threshold_set_center': threshold_set_center.tolist(),
                 'threshold_set_center_norm': threshold_set_center_norm.tolist()}

    with open(result_plot_path + '/eval_data.pkl', 'wb') as fh:
        pickle.dump(eval_data, fh)

    return eval_data