示例#1
0
    def __getitem__(self, idx):
        name = self.img_name_list[idx]
        name_str = decode_int_filename(name)

        img = imageio.imread(get_img_path(name_str, self.voc12_root))

        ms_img_list = []
        for s in self.scales:
            if s == 1:
                s_img = img
            else:
                s_img = imutils.pil_rescale(img, s, order=3)
            s_img = self.img_normal(s_img)
            s_img = imutils.HWC_to_CHW(s_img)
            ms_img_list.append(np.stack([s_img, np.flip(s_img, -1)], axis=0))
        if len(self.scales) == 1:
            ms_img_list = ms_img_list[0]

        out = {
            "name": name_str,
            "img": ms_img_list,
            "size": (img.shape[0], img.shape[1]),
            "label": torch.from_numpy(self.label_list[idx])
        }
        return out
示例#2
0
    def __getitem__(self, idx):
        # name = self.img_name_list[idx]
        name = self.sample_list_ct[idx][:-4]
        # name_str = decode_int_filename(name)

        # img = imageio.imread(get_img_path(name_str, self.voc12_root))
        img = np.load(self.data_dir + 'ct/' + self.sample_list_ct[idx])
        label = np.load(self.data_dir + 'seg/' + self.sample_list_seg[idx])
        img = ndimage.zoom(img, (0.5, 0.5), order=3)
        label = ndimage.zoom(label, (0.5, 0.5), order=0)

        ms_img_list = []
        for s in self.scales:
            if s == 1:
                s_img = img
            else:
                s_img = imutils.pil_rescale(img, s, order=3)
            # s_img = self.img_normal(s_img)
            # s_img = imutils.HWC_to_CHW(s_img)
            ms_img_list.append(np.stack([s_img, np.flip(s_img, -1)], axis=0))
        if len(self.scales) == 1:
            ms_img_list = ms_img_list[0]

        out = {
            "name": name,
            "img": ms_img_list,
            "size": (img.shape[0], img.shape[1]),
            "label": torch.from_numpy(np.array([label.max()]))
        }
        return out
示例#3
0
    def __getitem__(self, idx):
        name = self.img_name_list[idx]
        name_str = name  # decode_int_filename(name)

        #img = imageio.imread(get_img_path(name_str, self.voc12_root))
        print(get_img_path(name_str, self.voc12_root).strip('['))
        try:
            img = imageio.imread(get_img_path(name_str, self.voc12_root))
        except:
            print("this didn't work")
            #import bpython
            #bpython.embed(locals())
        ms_img_list = []
        for s in self.scales:
            if s == 1:
                s_img = img
            else:
                s_img = imutils.pil_rescale(img, s, order=3)
            s_img = self.img_normal(s_img)
            s_img = imutils.HWC_to_CHW(s_img)
            ms_img_list.append(np.stack([s_img, np.flip(s_img, -1)], axis=0))
        if len(self.scales) == 1:
            ms_img_list = ms_img_list[0]

        out = {
            "name": name_str,
            "img": ms_img_list,
            "size": (img.shape[0], img.shape[1]),
            "label": torch.from_numpy(self.label_list[idx])
        }
        return out
示例#4
0
 def __getitem__(self, index):
     datafiles = self.imgs[index]
     name = datafiles['name']
     x_path = datafiles['img']
     img_y = datafiles['label']
     img = imageio.imread(x_path)
     if img.shape != (128, 128, 3):
         img = np.expand_dims(img, axis=2)
         img = np.concatenate((img, img, img), axis=-1)
     ms_img_list = []
     for s in self.scales:
         if s == 1:
             s_img = img
         else:
             s_img = imutils.pil_rescale(img, s, order=3)
         s_img = self.img_normal(s_img)
         #print(s_img.shape)
         s_img = imutils.HWC_to_CHW(s_img)
         ms_img_list.append(
             np.stack([s_img, np.flip(s_img, -1)],
                      axis=0))  # np.flip(img, -1)左右翻转, 将原图与翻转后的图像拼在一起
     if len(self.scales) == 1:
         ms_img_list = ms_img_list[0]
     out = {
         "name": name,
         "img": ms_img_list,
         "size": (img.shape[0], img.shape[1]),
         "label": img_y
     }
     return out
示例#5
0
    def __getitem__(self, idx):
        name_str = self.img_name_list[idx]
        #name_str = decode_int_filename(name)
        #if self.image_folder.split('/')[1] == 'kaggle':
        paths_imgs = glob.glob('{}/{}*.png'.format(self.image_folder,
                                                   name_str))
        img = combine_imgs(paths_imgs)
        #else:
        #   img = np.asarray(imageio.imread(get_img_path_2(name_str,
        #                                                 self.voc12_root,self.image_folder)))

        #img = imageio.imread(get_img_path_2(name_str, self.voc12_root,self.image_folder))

        ms_img_list = []
        for s in self.scales:
            if s == 1:
                s_img = img
            else:
                s_img = imutils.pil_rescale(img, s, order=3)
            s_img = self.img_normal(s_img)
            s_img = imutils.HWC_to_CHW(s_img)
            ms_img_list.append(np.stack([s_img, np.flip(s_img, -1)], axis=0))
        if len(self.scales) == 1:
            ms_img_list = ms_img_list[0]

        out = {
            "name": name_str,
            "img": ms_img_list,
            "size": (img.shape[0], img.shape[1]),
            "label": torch.from_numpy(self.label_list[idx])
        }
        return out
示例#6
0
def mywork(model, args):
    img_dir = "/home/pfc/code/object_detect/irn/voc12/data/VOC12/infer/JPEGImages"
    img_normal = voc12.dataloader.TorchvisionNormalize()
    with torch.no_grad():
        for img_name in os.listdir(img_dir):
            curimg_path = os.path.join(img_dir, img_name)
            img = imageio.imread(curimg_path)
            size = (img.shape[0], img.shape[1])

            ms_img_list = []
            for s in args.cam_scales:
                if s == 1:
                    s_img = img
                else:
                    s_img = imutils.pil_rescale(img, s, order=3)
                s_img = img_normal(s_img)
                s_img = imutils.HWC_to_CHW(s_img)
                ms_img_list.append(
                    np.stack([s_img, np.flip(s_img, -1)], axis=0))
            if len(args.cam_scales) == 1:
                ms_img_list = ms_img_list[0]

            strided_size = imutils.get_strided_size(size, 4)
            strided_up_size = imutils.get_strided_up_size(size, 16)

            # img_variable = Variable(img.unsqueeze(0))
            outputs = [model(torch.Tensor(img)) for img in ms_img_list]

            strided_cam = torch.sum(
                torch.stack([
                    F.interpolate(torch.unsqueeze(o, 0),
                                  strided_size,
                                  mode='bilinear',
                                  align_corners=False)[0] for o in outputs
                ]), 0)
            highres_cam = [
                F.interpolate(torch.unsqueeze(o, 1),
                              strided_up_size,
                              mode='bilinear',
                              align_corners=False) for o in outputs
            ]
            highres_cam = torch.sum(torch.stack(highres_cam, 0),
                                    0)[:, 0, :size[0], :size[1]]

            valid_cat = torch.zeros(1).type(torch.uint8)

            strided_cam = strided_cam[valid_cat]
            strided_cam /= F.adaptive_max_pool2d(strided_cam, (1, 1)) + 1e-5

            highres_cam = highres_cam[valid_cat]
            highres_cam /= F.adaptive_max_pool2d(highres_cam, (1, 1)) + 1e-5

            # save cams
            np.save(
                os.path.join(args.cam_out_dir, img_name + '.npy'), {
                    "keys": valid_cat,
                    "cam": strided_cam.cpu(),
                    "high_res": highres_cam.cpu().numpy()
                })
示例#7
0
    def __getitem__(self, idx):
        out = super().__getitem__(idx)

        reduced_label = imutils.pil_rescale(out['label'], 0.25, 0)

        out['aff_bg_pos_label'], out['aff_fg_pos_label'], out['aff_neg_label'] = self.extract_aff_lab_func(reduced_label)

        return out
示例#8
0
    def __getitem__(self, idx):
        name, _ = self.images_df.iloc[idx]

        ds = pydicom.dcmread(os.path.join(self.images_dir, f'{name}.dcm'))
        img = np.tile(ds.pixel_array[..., None], 3)
        mask = cv2.imread(os.path.join(self.masks_path, name + '.png'), 0)
        img, mask = self.transform(img, mask)

        out = {
            'img': img.permute(2, 0, 1),
            'label': mask.squeeze(-1),
            'name': name
        }

        reduced_label = imutils.pil_rescale(out['label'].numpy(), 0.25, 0)

        out['aff_bg_pos_label'], out['aff_fg_pos_label'], out[
            'aff_neg_label'] = self.extract_aff_lab_func(reduced_label)
        return out