Пример #1
0
    def vis_on_batch(self, batch, savedir_image):
        image = batch['images']
        gt = np.asarray(batch['masks'], np.float32)
        gt /= (gt.max() + 1e-8)
        res = self.predict_on_batch(batch)

        image = F.interpolate(image, size=gt.shape[-2:], mode='bilinear', align_corners=False)
        img_res = hu.save_image(savedir_image,
                     hu.denormalize(image, mode='rgb')[0],
                      mask=res[0], return_image=True)

        img_gt = hu.save_image(savedir_image,
                     hu.denormalize(image, mode='rgb')[0],
                      mask=gt[0], return_image=True)
        img_gt = models.text_on_image( 'Groundtruth', np.array(img_gt), color=(0,0,0))
        img_res = models.text_on_image( 'Prediction', np.array(img_res), color=(0,0,0))
        
        if 'points' in batch:
            pts = batch['points'][0].numpy().copy()
            pts[pts == 1] = 2
            pts[pts == 0] = 1
            pts[pts == 255] = 0
            img_gt = np.array(hu.save_image(savedir_image, img_gt/255.,
                                points=pts, radius=2, return_image=True))
        img_list = [np.array(img_gt), np.array(img_res)]
        hu.save_image(savedir_image, np.hstack(img_list))
Пример #2
0
def save_tmp(fname, images, logits, points):
    from haven import haven_utils as hu
    probs = F.softmax(logits, 1)
    mask = probs.argmax(dim=1).cpu().numpy().astype('uint8').squeeze() * 255
    img_mask = hu.save_image('tmp2.png',
                             hu.denormalize(images, mode='rgb'),
                             mask=mask,
                             return_image=True)
    hu.save_image(fname, np.array(img_mask) / 255., points=att_dict['points'])
Пример #3
0
    def vis_on_batch(self, batch, savedir_image, save_preds=False):
        self.eval()
        images = batch["images"].cuda()
        counts = float(batch["counts"][0])

        logits = self.model_base(images)

        probs = logits.sigmoid()

        # get points from attention
        att_dict = self.att_model.get_attention_dict(
            images_original=torch.FloatTensor(
                hu.denormalize(batch['images'], mode='rgb')),
            counts=batch['counts'][0],
            probs=probs.squeeze(),
            return_roi=True)

        blobs = lcfcn.get_blobs(probs.squeeze().detach().cpu().numpy())
        org_img = hu.denormalize(images.squeeze(), mode='rgb')
        rgb_labels = label2rgb(
            blobs,
            hu.f2l(org_img),
            bg_label=0,
            bg_color=None,
        )
        res1 = mark_boundaries(rgb_labels, blobs)

        if att_dict['roi_mask'] is not None:
            img_mask = hu.save_image('tmp2.png',
                                     org_img,
                                     mask=att_dict['roi_mask'] == 1,
                                     return_image=True)
            res2 = hu.save_image('tmp.png',
                                 np.array(img_mask) / 255.,
                                 points=att_dict['points'],
                                 radius=1,
                                 return_image=True)

            os.makedirs(os.path.dirname(savedir_image), exist_ok=True)
            # plt.savefig(savedir_image.replace('.jpg', '.png')
            hu.save_image(savedir_image.replace('.jpg', '.png'),
                          np.hstack([res1, np.array(res2) / 255.]))
    def vis_on_batch(self, batch, savedir_image):
        image = batch['images']
        res = self.predict_on_batch(batch)

        img_gt = hu.save_image('tmp.png',
                               hu.denormalize(image, mode='rgb')[0],
                               points=batch['points'][0],
                               radius=1,
                               return_image=True)

        img_res = hu.save_image(savedir_image,
                                hu.denormalize(image, mode='rgb')[0],
                                mask=res.cpu().numpy(),
                                return_image=True)

        # img_gt = semseg.text_on_image( 'Groundtruth', np.array(img_gt), color=(0,0,0))
        # img_res = semseg.text_on_image( 'Prediction', np.array(img_res), color=(0,0,0))
        hu.save_image(savedir_image,
                      np.hstack([np.array(img_gt),
                                 np.array(img_res)]))
Пример #5
0
    def vis_on_batch(self, batch, savedir_image):
        image = batch['images']
        original = hu.denormalize(image, mode='rgb')[0]
        img_pred = hu.save_image(savedir_image,
                    original,
                      mask=self.predict_on_batch(batch, method='semseg'), return_image=True)

        img_gt = hu.save_image(savedir_image,
                     original,
                     return_image=True)
                     
        gt_counts = float(batch['points'].sum())
        pred_counts = self.predict_on_batch(batch, method='counts')
        img_gt = models.text_on_image( 'Groundtruth: %d' % gt_counts, np.array(img_gt), color=(0,0,0))
        img_pred = models.text_on_image( 'Prediction: %d' % pred_counts, np.array(img_pred), color=(0,0,0))
        
        if 'points' in batch:
            pts = (batch['points'][0].numpy().copy() != 0).astype('uint8')

            img_gt = np.array(hu.save_image(savedir_image, img_gt/255.,
                                points=pts.squeeze(), radius=2, return_image=True))

        img_list = [np.array(img_gt), np.array(img_pred)]
        hu.save_image("%s" %(savedir_image), np.hstack(img_list))
Пример #6
0
        batch = ut.collate_fn([b])

        image = batch['images']
        gt = np.asarray(batch['masks'], np.float32)
        gt /= (gt.max() + 1e-8)

        image = F.interpolate(image,
                              size=gt.shape[-2:],
                              mode='bilinear',
                              align_corners=False)
        # img_res = hu.save_image('',
        #              hu.denormalize(image, mode='rgb')[0],
        #               mask=res[0], return_image=True)

        img_gt = hu.save_image('',
                               hu.denormalize(image, mode='rgb')[0],
                               mask=gt[0],
                               return_image=True)
        img_gt = models.text_on_image('Groundtruth',
                                      np.array(img_gt),
                                      color=(0, 0, 0))
        # img_res = models.text_on_image( 'Prediction', np.array(img_res), color=(0,0,0))

        if 'points' in batch:
            img_gt = np.array(
                hu.save_image('',
                              img_gt / 255.,
                              points=batch['points'][0].numpy() != 255,
                              radius=2,
                              return_image=True))
        img_list = [np.array(img_gt)]
Пример #7
0
                                    datadir=datadir,
                                    exp_dict=exp_dict,
                                    dataset_size=exp_dict['dataset_size'])
    test_loader = DataLoader(
        test_set,
        # sampler=val_sampler,
        batch_size=1,
        collate_fn=ut.collate_fn,
        num_workers=0)

    for i, batch in enumerate(test_loader):
        points = (batch['points'].squeeze() == 1).numpy()
        if points.sum() == 0:
            continue
        savedir_image = os.path.join('.tmp/qualitative/%d.png' % (i))
        img = hu.denormalize(batch['images'], mode='rgb')
        img_org = np.array(
            hu.save_image(savedir_image,
                          img,
                          mask=batch['masks'].numpy(),
                          return_image=True))

        img_list = [img_org]
        with torch.no_grad():
            for hash_id in hash_list:
                score_path = os.path.join(savedir_base, hash_id,
                                          'score_list_best.pkl')
                score_list = hu.load_pkl(score_path)

                exp_dict = hu.load_json(
                    os.path.join(savedir_base, hash_id, 'exp_dict.json'))
Пример #8
0
                             exp_dict=exp_dict,
                             train_set=test_set).cuda()

    model_path = '/mnt/public/results/toolkit/weak_supervision/%s/model_best.pth' % hash_dir

    # load best model
    model.load_state_dict(hu.torch_load(model_path))

    # loop over the val_loader and saves image
    for i, batch in enumerate(test_loader):
        savedir_image = os.path.join("%s" % savedir, "save_preds",
                                     "%s" % hash_dir, "%s" % split,
                                     "%d.png" % i)

        image = batch['images']
        original = hu.denormalize(image, mode='rgb')[0]
        gt = np.asarray(batch['masks'])

        image = F.interpolate(image,
                              size=gt.shape[-2:],
                              mode='bilinear',
                              align_corners=False)
        img_pred = hu.save_image(savedir_image,
                                 original,
                                 mask=model.predict_on_batch(batch),
                                 return_image=True)

        img_gt = hu.save_image(savedir_image,
                               original,
                               mask=gt,
                               return_image=True)
Пример #9
0
    # stop
    model = models.get_model(model_dict=exp_dict['model'],
                             exp_dict=exp_dict,
                             train_set=train_set).cuda()
    exp_id = hu.hash_dict(exp_dict)
    fname = os.path.join('/mnt/public/results/toolkit/weak_supervision',
                         exp_id, 'model.pth')
    model.model_base.load_state_dict(torch.load(fname)['model'], strict=False)

    for k in range(5):
        batch_id = np.where(train_set.labels)[0][k]
        batch = ut.collate_fn([train_set[batch_id]])
        logits = F.softmax(model.model_base.forward(batch['images'].cuda()),
                           dim=1)

        img = batch['images'].cuda()
        logits_new = model_aff.apply_affinity(batch['images'], logits, crf=0)

        i1 = hu.save_image('old.png',
                           img=hu.denormalize(img, mode='rgb'),
                           mask=logits.argmax(dim=1).cpu().numpy(),
                           return_image=True)

        i2 = hu.save_image('new.png',
                           img=hu.denormalize(img, mode='rgb'),
                           mask=logits_new.argmax(dim=1).cpu().numpy(),
                           return_image=True)
        hu.save_image('tmp/tmp%d.png' % k,
                      np.concatenate([np.array(i1), np.array(i2)], axis=1))
        print('saved %d' % k)
Пример #10
0
    def train_on_batch(self, batch, **extras):

        self.train()

        images = batch["images"].cuda()
        counts = float(batch["counts"][0])

        logits = self.model_base(images)
        if self.exp_dict['model'].get('loss') == 'lcfcn':
            loss = lcfcn.compute_lcfcn_loss(logits, batch["points"].cuda(),
                                            None)
            probs = F.softmax(logits, 1)
            mask = probs.argmax(
                dim=1).cpu().numpy().astype('uint8').squeeze() * 255

            # img_mask=hu.save_image('tmp2.png',
            #             hu.denormalize(images, mode='rgb'), mask=mask, return_image=True)
            # hu.save_image('tmp2.png',np.array(img_mask)/255. , radius=3,
            #                 points=batch["points"])

        elif self.exp_dict['model'].get('loss') == 'glance':
            pred_counts = logits[:, 1].mean()
            loss = F.mse_loss(pred_counts.squeeze(), counts.float().squeeze())

        elif self.exp_dict['model'].get('loss') == 'att_lcfcn':
            probs = logits.sigmoid()

            # get points from attention
            att_dict = self.att_model.get_attention_dict(
                images_original=torch.FloatTensor(
                    hu.denormalize(batch['images'], mode='rgb')),
                counts=batch['counts'][0],
                probs=probs.squeeze(),
                return_roi=True)
            if 1:
                blobs = lcfcn.get_blobs(probs.squeeze().detach().cpu().numpy())
                org_img = hu.denormalize(images.squeeze(), mode='rgb')
                rgb_labels = label2rgb(
                    blobs,
                    hu.f2l(org_img),
                    bg_label=0,
                    bg_color=None,
                )
                res1 = mark_boundaries(rgb_labels, blobs)
                img_mask = hu.save_image('tmp2.png',
                                         org_img,
                                         return_image=True)
                res2 = hu.save_image('tmp.png',
                                     np.array(img_mask) / 255.,
                                     points=att_dict['points'],
                                     radius=1,
                                     return_image=True)

                hu.save_image('tmp_blobs.png',
                              np.hstack([res1, np.array(res2) / 255.]))

            loss = lcfcn.compute_loss(
                probs=probs,
                # batch["points"].cuda(),
                points=att_dict['points'].cuda(),
                roi_mask=att_dict['roi_mask'])
            # loss += .5 * F.cross_entropy(logits,
            #             torch.from_numpy(1 -
            #                 att_dict['mask_bg']).long().cuda()[None],
            #             ignore_index=1)

        self.opt.zero_grad()
        loss.backward()
        if self.exp_dict['optimizer'] == 'sps':
            self.opt.step(loss=loss)
        else:
            self.opt.step()

        return {"train_loss": float(loss)}
Пример #11
0
def bbox_on_image(batch, bbox_norm, scores, ind_fg):
    img_org = hu.denormalize(batch['images'], mode='rgb') 
    img_org = img_org / img_org.max()
    img_box = bbox_on_image_utils(bbox_norm[ind_fg], img_org.squeeze(), text_list=scores)
    
    return np.hstack([img_box])
Пример #12
0
        def vis_on_batch(self, batch, savedir_image):
            image = batch['images']
            index = batch['meta'][0]['index']
            gt = np.asarray(batch['masks'], np.float32)
            gt /= (gt.max() + 1e-8)
            res = self.predict_on_batch(batch)

            image = F.interpolate(image,
                                  size=gt.shape[-2:],
                                  mode='bilinear',
                                  align_corners=False)
            original = hu.denormalize(image, mode='rgb')[0]
            img_res = hu.save_image(savedir_image,
                                    original,
                                    mask=res[0],
                                    return_image=True)

            img_gt = hu.save_image(savedir_image,
                                   original,
                                   mask=gt[0],
                                   return_image=True)
            img_gt = models.text_on_image('Groundtruth',
                                          np.array(img_gt),
                                          color=(0, 0, 0))
            img_res = models.text_on_image('Prediction',
                                           np.array(img_res),
                                           color=(0, 0, 0))

            if 'points' in batch:
                pts = batch['points'][0].numpy().copy()
                pts[pts == 1] = 2
                pts[pts == 0] = 1
                pts[pts == 255] = 0
                img_gt = np.array(
                    hu.save_image(savedir_image,
                                  img_gt / 255.,
                                  points=pts,
                                  radius=2,
                                  return_image=True))

            # score map
            if self.heuristic != 'random':
                score_map = self.compute_uncertainty(batch['images'],
                                                     replicate=True,
                                                     scale_factor=1,
                                                     method=self.heuristic)
                score_map = F.interpolate(score_map[None],
                                          size=gt.shape[-2:],
                                          mode='bilinear',
                                          align_corners=False).squeeze()
                h, w = score_map.shape
                bbox_yxyx = get_rect_bbox(h, w, n_regions=self.n_regions)

                heatmap = hu.f2l(
                    hi.gray2cmap(
                        (score_map / score_map.max()).cpu().numpy().squeeze()))

                s_list = np.zeros(len(bbox_yxyx))
                for i, (y1, x1, y2, x2) in enumerate(bbox_yxyx):
                    s_list[i] = score_map[y1:y2, x1:x2].mean()

                img_bbox = bbox_yxyx_on_image(bbox_yxyx[[s_list.argmax()]],
                                              original)
                img_score_map = img_bbox * 0.5 + heatmap * 0.5

                img_list = [
                    np.array(img_gt),
                    np.array(img_res), (img_score_map * 255).astype('uint8')
                ]
            else:
                img_list = [np.array(img_gt), np.array(img_res)]
            hu.save_image(savedir_image, np.hstack(img_list))
def save_images(exp_dict):

    dataset_name = exp_dict['dataset']['name']
    n_classes = exp_dict['dataset']['n_classes']
    model = models.get_model(model_dict=exp_dict['model'],
                             exp_dict=exp_dict,
                             train_set=None).cuda()
    state_dict = hc.load_checkpoint(exp_dict,
                                    savedir_base,
                                    fname='model_best.pth')
    model.load_state_dict(state_dict)
    model.eval()
    np.random.seed(1)

    train_set = datasets.get_dataset(dataset_dict={'name': dataset_name},
                                     datadir=None,
                                     split="test",
                                     exp_dict=exp_dict)
    n_images = 0
    for _ in range(len(train_set)):
        i = np.random.choice(len(train_set))
        b = train_set[i]

        if n_images > 5:
            break

        if b['masks'].sum() == 0:
            print(i)
            continue
        n_images += 1
        batch = ut.collate_fn([b])

        image = batch['images']
        gt = np.asarray(batch['masks'], np.float32)
        gt /= (gt.max() + 1e-8)

        image = F.interpolate(image,
                              size=gt.shape[-2:],
                              mode='bilinear',
                              align_corners=False)
        img_rgb = hu.f2l(hu.denormalize(image, mode='rgb')[0])
        img_rgb = (np.array(img_rgb) * 255.).astype('uint8')

        # save rgb
        fname_rgb = '.tmp/covid_qualitative/%s/%s/%d_rgb.png' % (exp_group,
                                                                 'gt', i)
        hu.save_image(fname_rgb, img_rgb)

        # save pts
        fname_pts = '.tmp/covid_qualitative/%s/%s/%d_pts.png' % (exp_group,
                                                                 'gt', i)
        img_gt = np.array(hu.save_image('', img_rgb, return_image=True))

        if 'points' in batch:
            pts = batch['points'][0].numpy()
            pts[pts == 1] = 2
            pts[pts == 0] = 1
            pts[pts == 255] = 0
            img_gt = np.array(
                hu.save_image('',
                              img_gt / 255.,
                              points=pts,
                              radius=2,
                              return_image=True))
        hu.save_image(fname_pts, img_gt)

        # save mask
        fname_mask = '.tmp/covid_qualitative/%s/%s/%d_mask.png' % (exp_group,
                                                                   'gt', i)

        img_mask = np.array(
            hu.save_image('', img_rgb, mask=gt[0], return_image=True))
        hu.save_image(fname_mask, img_mask)

        # pred
        fname_pred = '.tmp/covid_qualitative/%s/%s/%d_%s.png' % (
            exp_group, 'preds', i, exp_dict['model']['loss'])
        res = model.predict_on_batch(batch)

        img_res = hu.save_image('', img_rgb, mask=res[0], return_image=True)
        hu.save_image(fname_pred, np.array(img_res))