Exemple #1
0
    def label_next_batch(self):
        uind_list = list(self.unlabeled_indices)
        if len(self.labeled_indices) == 0:
            ind_list = self.rng.choice(uind_list,
                                       min(self.init_sample_size,
                                           len(uind_list)),
                                       replace=False)
        else:
            if self.heuristic == 'random':
                ind_list = self.rng.choice(uind_list,
                                           min(self.sample_size,
                                               len(uind_list)),
                                           replace=False)

            elif self.heuristic == 'entropy':
                ind_list = []
                print('%s Scoring' % self.heuristic)
                for ind in tqdm.tqdm(self.unlabeled_indices):
                    batch = ut.collate_fn([self.train_set[ind]])
                    probs_mcmc = self.mcmc_on_batch(batch['images'],
                                                    replicate=True,
                                                    scale_factor=1)
                    entropy = -xlogy(probs_mcmc).mean(dim=0).sum(dim=1)
                    score_map = entropy

                    ind_list += [{
                        'score': float(score_map.mean()),
                        'index': ind
                    }]

                # sort ind_list and pick top k
                ind_list = [
                    idict['index']
                    for idict in sorted(ind_list, key=lambda x: -x['score'])
                ]
                ind_list = ind_list[:self.sample_size]
            else:
                raise ValueError('%s heuristic not available' % self.heuristic)

        # update labeled indices
        for ind in ind_list:
            assert ind not in self.labeled_indices, 'index already exists'
            self.labeled_indices.add(ind)

        # update unlabeled indices
        for ind in ind_list:
            self.unlabeled_indices.remove(ind)

        # return active dataset
        return DatasetWrapper(self.train_set, self.labeled_indices)
Exemple #2
0
        def label_next_batch(self):
            pool_ind = self.get_pool_ind()

            if self.label_map.sum() == 0:
                with hu.random_seed(1):
                    # find k with infected regions
                    n_batches = 0
                    ind_list = []
                    for ind in np.random.permutation(len(self.train_set)):
                        if n_batches == self.init_sample_size:
                            break

                        # batch = ut.collate_fn([self.train_set[ind]])

                        # if batch['masks'].sum() == 0:
                        #     continue

                        n_batches += 1
                        ind_list += [{
                            'bbox_yxyx': np.arange(self.n_regions),
                            'index': ind
                        }]
            else:
                if self.heuristic == 'random':
                    with hu.random_seed(1):
                        img_ind_list = self.rng.choice(pool_ind,
                                                       min(
                                                           self.sample_size,
                                                           len(pool_ind)),
                                                       replace=False)
                        ind_list = []
                        for ind in img_ind_list:
                            bbox_ind = np.random.choice(
                                np.where(self.label_map[ind] == 0)[0])
                            ind_list += [{
                                'bbox_yxyx': [bbox_ind],
                                'index': ind
                            }]
                else:
                    ind_list = []
                    print('%s Scoring' % self.heuristic)
                    arange = np.arange(self.n_regions)
                    for ind in tqdm.tqdm(pool_ind):
                        batch = ut.collate_fn([self.train_set[ind]])
                        score_map = self.compute_uncertainty(
                            batch['images'],
                            replicate=True,
                            scale_factor=1,
                            method=self.heuristic).squeeze()
                        h, w = score_map.shape
                        bbox_yxyx = get_rect_bbox(h,
                                                  w,
                                                  n_regions=self.n_regions)

                        unlabeled = self.label_map[ind] == 0

                        bbox_yxyx = bbox_yxyx[unlabeled]
                        bbox_ind = arange[unlabeled]

                        assert len(bbox_yxyx) > 0

                        s_list = np.zeros(len(bbox_yxyx))
                        for i, (y1, x1, y2, x2) in enumerate(bbox_yxyx):
                            s_list[i] = score_map[y1:y2, x1:x2].mean()

                        # if 1:
                        #     heatmap =  hu.f2l(hi.gray2cmap((score_map/score_map.max()).cpu().numpy().squeeze()))
                        #     original = hu.denormalize(batch['images'], mode='rgb')

                        #     img_bbox = bbox_yxyx_on_image(bbox_yxyx[[s_list.argmax()]], original)
                        #     hu.save_image('.tmp/tmp.png' , img_bbox*0.5 + heatmap*0.5)

                        ind_list += [{
                            'score': float(score_map.mean()),
                            'bbox_yxyx': [bbox_ind[s_list.argmax()]],
                            'index': ind
                        }]

                    # sort ind_list and pick top k
                    ind_list = sorted(ind_list, key=lambda x: -x['score'])
                    ind_list = ind_list[:self.sample_size]

            # update labeled indices
            for ind_dict in ind_list:
                assert self.label_map[ind_dict['index'],
                                      ind_dict['bbox_yxyx']].sum() == 0
                self.label_map[ind_dict['index'], ind_dict['bbox_yxyx']] = 1
                assert self.label_map[ind_dict['index'],
                                      ind_dict['bbox_yxyx']].mean() == 1
                if not self.exp_dict['active_learning'].get('effort', None):
                    for i in ind_dict['bbox_yxyx']:
                        self.cost_map[ind_dict['index'], i] = 1

                elif self.exp_dict['model']['loss'] in [
                        'const_point_level', 'point_level'
                ]:
                    self.cost_map[ind_dict['index'], ind_dict['bbox_yxyx']] = 3

                elif self.exp_dict['model']['loss'] in ['point_level_2']:
                    self.cost_map[ind_dict['index'], ind_dict['bbox_yxyx']] = 4

                elif self.exp_dict['model']['loss'] in [
                        'joint_cross_entropy', 'cross_entropy'
                ]:
                    batch = self.train_set[ind_dict['index']]
                    mask = batch['masks'].squeeze()
                    h, w = mask.shape
                    bbox_yxyx = get_rect_bbox(h, w, n_regions=self.n_regions)
                    for i in ind_dict['bbox_yxyx']:
                        (y1, x1, y2, x2) = bbox_yxyx[i]

                        # in enumerate(bbox_yxyx):
                        patch = mask[y1:y2, x1:x2]
                        u_list = patch.unique()
                        if 1 in u_list:
                            from scipy.spatial import ConvexHull, convex_hull_plot_2d
                            pts = np.stack(np.where(patch)).transpose()
                            if len(pts) <= 2:
                                cost = len(pts) * 2
                            else:
                                hull = ConvexHull(pts, qhull_options='QJ')
                                cost = len(hull.simplices) * 3
                            self.cost_map[ind_dict['index'], i] = cost
                        elif 0 in u_list:
                            self.cost_map[ind_dict['index'], i] = 4

                else:
                    raise ValueError

            # return active dataset
            train_list = sorted(self.get_train_ind())
            return torch.utils.data.Subset(self.train_set, train_list)
Exemple #3
0
            'weakly_covid19_v2_sep_c2', 'weakly_covid19_v2_mixed_c3',
            'weakly_covid19_v2_sep_c3', 'weakly_covid19_v3_mixed_c2'
    ]:
        exp_dict = exp_configs.EXP_GROUPS[exp_group][0]
        dataset_name = exp_dict['dataset']['name']
        n_classes = exp_dict['dataset']['n_classes']
        train_set = datasets.get_dataset(dataset_dict={'name': dataset_name},
                                         datadir=None,
                                         split="test",
                                         exp_dict=exp_dict)
        for i, b in enumerate(train_set):
            if b['masks'].sum() == 0:
                print(i)
                continue
            break
        batch = ut.collate_fn([b])

        image = batch['images']
        gt = np.asarray(batch['masks'], np.float32)
        gt /= (gt.max() + 1e-8)

        image = F.interpolate(image,
                              size=gt.shape[-2:],
                              mode='bilinear',
                              align_corners=False)
        # img_res = hu.save_image('',
        #              hu.denormalize(image, mode='rgb')[0],
        #               mask=res[0], return_image=True)

        img_gt = hu.save_image('',
                               hu.denormalize(image, mode='rgb')[0],
Exemple #4
0
    # ut.generate_seam_segmentation(train_set,
    #                               path_base='/mnt/datasets/public/issam/seam',
    #                             #   path_base='D:/Issam/SEAM_model/'
    #                               )
    # stop
    model = models.get_model(model_dict=exp_dict['model'],
                             exp_dict=exp_dict,
                             train_set=train_set).cuda()
    exp_id = hu.hash_dict(exp_dict)
    fname = os.path.join('/mnt/public/results/toolkit/weak_supervision',
                         exp_id, 'model.pth')
    model.model_base.load_state_dict(torch.load(fname)['model'], strict=False)

    for k in range(5):
        batch_id = np.where(train_set.labels)[0][k]
        batch = ut.collate_fn([train_set[batch_id]])
        logits = F.softmax(model.model_base.forward(batch['images'].cuda()),
                           dim=1)

        img = batch['images'].cuda()
        logits_new = model_aff.apply_affinity(batch['images'], logits, crf=0)

        i1 = hu.save_image('old.png',
                           img=hu.denormalize(img, mode='rgb'),
                           mask=logits.argmax(dim=1).cpu().numpy(),
                           return_image=True)

        i2 = hu.save_image('new.png',
                           img=hu.denormalize(img, mode='rgb'),
                           mask=logits_new.argmax(dim=1).cpu().numpy(),
                           return_image=True)
def save_images(exp_dict):

    dataset_name = exp_dict['dataset']['name']
    n_classes = exp_dict['dataset']['n_classes']
    model = models.get_model(model_dict=exp_dict['model'],
                             exp_dict=exp_dict,
                             train_set=None).cuda()
    state_dict = hc.load_checkpoint(exp_dict,
                                    savedir_base,
                                    fname='model_best.pth')
    model.load_state_dict(state_dict)
    model.eval()
    np.random.seed(1)

    train_set = datasets.get_dataset(dataset_dict={'name': dataset_name},
                                     datadir=None,
                                     split="test",
                                     exp_dict=exp_dict)
    n_images = 0
    for _ in range(len(train_set)):
        i = np.random.choice(len(train_set))
        b = train_set[i]

        if n_images > 5:
            break

        if b['masks'].sum() == 0:
            print(i)
            continue
        n_images += 1
        batch = ut.collate_fn([b])

        image = batch['images']
        gt = np.asarray(batch['masks'], np.float32)
        gt /= (gt.max() + 1e-8)

        image = F.interpolate(image,
                              size=gt.shape[-2:],
                              mode='bilinear',
                              align_corners=False)
        img_rgb = hu.f2l(hu.denormalize(image, mode='rgb')[0])
        img_rgb = (np.array(img_rgb) * 255.).astype('uint8')

        # save rgb
        fname_rgb = '.tmp/covid_qualitative/%s/%s/%d_rgb.png' % (exp_group,
                                                                 'gt', i)
        hu.save_image(fname_rgb, img_rgb)

        # save pts
        fname_pts = '.tmp/covid_qualitative/%s/%s/%d_pts.png' % (exp_group,
                                                                 'gt', i)
        img_gt = np.array(hu.save_image('', img_rgb, return_image=True))

        if 'points' in batch:
            pts = batch['points'][0].numpy()
            pts[pts == 1] = 2
            pts[pts == 0] = 1
            pts[pts == 255] = 0
            img_gt = np.array(
                hu.save_image('',
                              img_gt / 255.,
                              points=pts,
                              radius=2,
                              return_image=True))
        hu.save_image(fname_pts, img_gt)

        # save mask
        fname_mask = '.tmp/covid_qualitative/%s/%s/%d_mask.png' % (exp_group,
                                                                   'gt', i)

        img_mask = np.array(
            hu.save_image('', img_rgb, mask=gt[0], return_image=True))
        hu.save_image(fname_mask, img_mask)

        # pred
        fname_pred = '.tmp/covid_qualitative/%s/%s/%d_%s.png' % (
            exp_group, 'preds', i, exp_dict['model']['loss'])
        res = model.predict_on_batch(batch)

        img_res = hu.save_image('', img_rgb, mask=res[0], return_image=True)
        hu.save_image(fname_pred, np.array(img_res))