示例#1
0
def generate_obj_proposals(davis_root, subset, num_proposals, save_path):
    dataset = DAVIS(davis_root, subset=subset, codalab=True)
    for seq in dataset.get_sequences():
        save_dir = os.path.join(save_path, seq)
        if os.path.exists(save_dir):
            continue
        all_gt_masks, all_masks_id = dataset.get_all_masks(seq, True)
        img_size = all_gt_masks.shape[2:]
        num_rows = int(np.ceil(np.sqrt(num_proposals)))
        proposals = np.zeros((num_proposals, len(all_masks_id), *img_size))
        height_slices = np.floor(
            np.arange(0, img_size[0] + 1,
                      img_size[0] / num_rows)).astype(np.uint).tolist()
        width_slices = np.floor(
            np.arange(0, img_size[1] + 1,
                      img_size[1] / num_rows)).astype(np.uint).tolist()
        ii = 0
        prev_h, prev_w = 0, 0
        for h in height_slices[1:]:
            for w in width_slices[1:]:
                proposals[ii, :, prev_h:h, prev_w:w] = 1
                prev_w = w
                ii += 1
                if ii == num_proposals:
                    break
            prev_h, prev_w = h, 0
            if ii == num_proposals:
                break

        os.makedirs(save_dir, exist_ok=True)
        for i, mask_id in enumerate(all_masks_id):
            mask = np.sum(proposals[:, i, ...] *
                          np.arange(1, proposals.shape[0] + 1)[:, None, None],
                          axis=0)
            save_mask(mask, os.path.join(save_dir, f'{mask_id}.png'))
示例#2
0
def generate_random_permutation_gt_obj_proposals(davis_root, subset,
                                                 save_path):
    dataset = DAVIS(davis_root, subset=subset, codalab=True)
    for seq in dataset.get_sequences():
        gt_masks, all_masks_id = dataset.get_all_masks(seq, True)
        obj_swap = np.random.permutation(np.arange(gt_masks.shape[0]))
        gt_masks = gt_masks[obj_swap, ...]
        save_dir = os.path.join(save_path, seq)
        os.makedirs(save_dir, exist_ok=True)
        for i, mask_id in enumerate(all_masks_id):
            mask = np.sum(gt_masks[:, i, ...] *
                          np.arange(1, gt_masks.shape[0] + 1)[:, None, None],
                          axis=0)
            save_mask(mask, os.path.join(save_dir, f'{mask_id}.png'))
示例#3
0
        bmap = np.zeros((height, width))
        for x in range(w):
            for y in range(h):
                if b[y, x]:
                    j = 1 + math.floor((y - 1) + height / h)
                    i = 1 + math.floor((x - 1) + width / h)
                    bmap[j, i] = 1

    return bmap


if __name__ == '__main__':
    from davis2017.davis import DAVIS
    from davis2017.results import Results

    dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics')
    results = Results(root_dir='examples/osvos')
    # Test timing F measure
    for seq in dataset.get_sequences():
        all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True)
        all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[
            1:-1]
        all_res_masks = results.read_masks(seq, all_masks_id)
        f_metrics_res = np.zeros(all_gt_masks.shape[:2])
        for ii in range(all_gt_masks.shape[0]):
            f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...],
                                                    all_res_masks[ii, ...])

    # Run using to profile code: python -m cProfile -o f_measure.prof metrics.py
    #                            snakeviz f_measure.prof
示例#4
0
class DAVISEvaluation(object):
    def __init__(self,
                 davis_root,
                 task,
                 gt_set,
                 sequences='all',
                 codalab=False,
                 year='2017'):
        """
        Class to evaluate DAVIS sequences from a certain set and for a certain task
        :param davis_root: Path to the DAVIS folder that contains JPEGImages, Annotations, etc. folders.
        :param task: Task to compute the evaluation, chose between semi-supervised or unsupervised.
        :param gt_set: Set to compute the evaluation
        :param sequences: Sequences to consider for the evaluation, 'all' to use all the sequences in a set.
        """
        self.davis_root = davis_root
        self.task = task
        self.year = year
        self.dataset = DAVIS(root=davis_root,
                             task=task,
                             subset=gt_set,
                             sequences=sequences,
                             codalab=codalab,
                             year=self.year)

    @staticmethod
    def _evaluate_semisupervised(all_gt_masks, all_res_masks, all_void_masks,
                                 metric):
        if all_res_masks.shape[0] > all_gt_masks.shape[0]:
            sys.stdout.write(
                "\nIn your PNG files there is an index higher than the number of objects in the sequence!"
            )
            sys.exit()
        elif all_res_masks.shape[0] < all_gt_masks.shape[0]:
            zero_padding = np.zeros(
                (all_gt_masks.shape[0] - all_res_masks.shape[0],
                 *all_res_masks.shape[1:]))
            all_res_masks = np.concatenate([all_res_masks, zero_padding],
                                           axis=0)
        j_metrics_res, f_metrics_res = np.zeros(
            all_gt_masks.shape[:2]), np.zeros(all_gt_masks.shape[:2])
        for ii in range(all_gt_masks.shape[0]):
            if 'J' in metric:
                j_metrics_res[ii, :] = db_eval_iou(all_gt_masks[ii, ...],
                                                   all_res_masks[ii, ...],
                                                   all_void_masks)
            if 'F' in metric:
                f_metrics_res[ii, :] = db_eval_boundary(
                    all_gt_masks[ii, ...], all_res_masks[ii, ...],
                    all_void_masks)
        return j_metrics_res, f_metrics_res

    @staticmethod
    def _evaluate_unsupervised(all_gt_masks,
                               all_res_masks,
                               all_void_masks,
                               metric,
                               max_n_proposals=20):
        if all_res_masks.shape[0] > max_n_proposals:
            sys.stdout.write(
                f"\nIn your PNG files there is an index higher than the maximum number ({max_n_proposals}) of proposals allowed!"
            )
            sys.exit()
        elif all_res_masks.shape[0] < all_gt_masks.shape[0]:
            zero_padding = np.zeros(
                (all_gt_masks.shape[0] - all_res_masks.shape[0],
                 *all_res_masks.shape[1:]))
            all_res_masks = np.concatenate([all_res_masks, zero_padding],
                                           axis=0)
        j_metrics_res = np.zeros(
            (all_res_masks.shape[0], all_gt_masks.shape[0],
             all_gt_masks.shape[1]))
        f_metrics_res = np.zeros(
            (all_res_masks.shape[0], all_gt_masks.shape[0],
             all_gt_masks.shape[1]))
        for ii in range(all_gt_masks.shape[0]):
            for jj in range(all_res_masks.shape[0]):
                if 'J' in metric:
                    j_metrics_res[jj,
                                  ii, :] = db_eval_iou(all_gt_masks[ii, ...],
                                                       all_res_masks[jj, ...],
                                                       all_void_masks)
                if 'F' in metric:
                    f_metrics_res[jj, ii, :] = db_eval_boundary(
                        all_gt_masks[ii, ...], all_res_masks[jj, ...],
                        all_void_masks)
        if 'J' in metric and 'F' in metric:
            all_metrics = (np.mean(j_metrics_res, axis=2) +
                           np.mean(f_metrics_res, axis=2)) / 2
        else:
            all_metrics = np.mean(j_metrics_res,
                                  axis=2) if 'J' in metric else np.mean(
                                      f_metrics_res, axis=2)
        row_ind, col_ind = linear_sum_assignment(-all_metrics)
        return j_metrics_res[row_ind, col_ind, :], f_metrics_res[row_ind,
                                                                 col_ind, :]

    def evaluate(self, res_path, metric=('J', 'F'), debug=False):
        metric = metric if isinstance(metric, tuple) or isinstance(
            metric, list) else [metric]
        if 'T' in metric:
            raise ValueError('Temporal metric not supported!')
        if 'J' not in metric and 'F' not in metric:
            raise ValueError(
                'Metric possible values are J for IoU or F for Boundary')

        # Containers
        metrics_res = {}
        if 'J' in metric:
            metrics_res['J'] = {"M": [], "R": [], "D": [], "M_per_object": {}}
        if 'F' in metric:
            metrics_res['F'] = {"M": [], "R": [], "D": [], "M_per_object": {}}

        separate_objects_masks = self.year != '2016'

        # Sweep all sequences
        results = Results(root_dir=res_path)
        for seq in tqdm(list(self.dataset.get_sequences())):
            all_gt_masks, all_void_masks, all_masks_id = self.dataset.get_all_masks(
                seq, separate_objects_masks)
            if self.task == 'semi-supervised':
                all_gt_masks, all_masks_id = all_gt_masks[:, 1:
                                                          -1, :, :], all_masks_id[
                                                              1:-1]
            all_res_masks = results.read_masks(seq, all_masks_id)
            if self.task == 'unsupervised':
                j_metrics_res, f_metrics_res = self._evaluate_unsupervised(
                    all_gt_masks, all_res_masks, all_void_masks, metric)
            elif self.task == 'semi-supervised':
                j_metrics_res, f_metrics_res = self._evaluate_semisupervised(
                    all_gt_masks, all_res_masks, None, metric)
            for ii in range(all_gt_masks.shape[0]):
                seq_name = f'{seq}_{ii+1}'
                if 'J' in metric:
                    [JM, JR, JD] = utils.db_statistics(j_metrics_res[ii])
                    metrics_res['J']["M"].append(JM)
                    metrics_res['J']["R"].append(JR)
                    metrics_res['J']["D"].append(JD)
                    metrics_res['J']["M_per_object"][seq_name] = JM
                if 'F' in metric:
                    [FM, FR, FD] = utils.db_statistics(f_metrics_res[ii])
                    metrics_res['F']["M"].append(FM)
                    metrics_res['F']["R"].append(FR)
                    metrics_res['F']["D"].append(FD)
                    metrics_res['F']["M_per_object"][seq_name] = FM

            # Show progress
            if debug:
                sys.stdout.write(seq + '\n')
                sys.stdout.flush()
        return metrics_res