Exemplo n.º 1
0
    def __getitem__(self, index):
        random.seed()

        # super sample small masks
        #if random.random() > 0.25 or self.valid:
        img = img_as_float(imread(self.img_paths +
                                  self.img_ids[index]))[:, :, :3]
        #img[:,:,2] = 1. - fltrs.laplace(img[:,:,1])
        msk = imread(self.mask_paths + self.img_ids[index]).astype(np.bool)
        msk = np.expand_dims(msk, axis=-1)
        boundry = find_boundaries(msk) * 1

        if not self.valid:
            img_np, msk_np, boundry_np = augment_img([img, msk, boundry],
                                                     imsize=self.imsize)
        else:
            #img_np = resize(np.asarray(img), (self.imsize, self.imsize),
            #            preserve_range=True, mode='reflect')
            #msk_np = resize(msk, (self.imsize, self.imsize),
            #                preserve_range=True, mode='reflect')
            img_np = reflect_pad(img, int((self.imsize - img.shape[0]) / 2))
            msk_np = reflect_pad(msk, int((self.imsize - msk.shape[0]) / 2))
            boundry_np = reflect_pad(boundry,
                                     int((self.imsize - boundry.shape[0]) / 2))

            img_np = img_np.transpose((2, 0, 1)).astype(np.float32)
            msk_np = msk_np.transpose((2, 0, 1)).astype(np.float32)
            boundry_np = boundry_np.transpose((2, 0, 1)).astype(np.float32)

        #print(img_np.shape, msk_np.shape)

        # get image ready for torch
        img_tch = self.normalize(torch.from_numpy(img_np.astype(np.float32)))
        msk_tch = torch.from_numpy(msk_np.astype(np.float32))
        boundry_tch = torch.from_numpy(boundry_np.astype(np.float32))

        img_tch = add_depth_channels(img_tch)

        out_dict = {
            'img': img_tch,
            'msk': msk_tch,
            'has_msk': msk_tch.sum() > 0,
            'boundry': boundry_tch,
            'id': self.img_ids[index].replace('.png', '')
        }

        return out_dict
Exemplo n.º 2
0
    def __getitem__(self, index):

        img = img_as_float(imread('../data/test/images/' +
                                  self.img_ids[index]))[:, :, :3]
        # scale up image to 202 or keep at 101, reflect pad to get network sizes
        if self.imsize == 256:
            img = resize(img, (202, 202), preserve_range=True, mode='reflect')
            img = reflect_pad(img, 27)
        else:
            img = reflect_pad(img, 13)
        img_lr = np.fliplr(img)

        #print(img.shape, img_lr.shape)

        img = img.transpose((2, 0, 1)).astype(np.float32)
        img_lr = img_lr.transpose((2, 0, 1)).astype(np.float32)

        img_tch = self.normalize(torch.from_numpy(img))
        img_lr_tch = self.normalize(torch.from_numpy(img_lr))

        #img_tch = add_depth_channels(img_tch)
        #img_lr_tch = add_depth_channels(img_lr_tch)

        out_dict = {
            'img':
            img_tch,
            'img_lr':
            img_lr_tch,
            'id':
            self.img_ids[index].replace('.png', ''),
            'blank':
            torch.tensor(
                os.stat('../data/test/images/' +
                        self.img_ids[index]).st_size != 107)
        }

        return out_dict
Exemplo n.º 3
0
    def __getitem__(self, index):
        random.seed()

        # super sample small masks
        #if random.random() > 0.25 or self.valid:
        img = img_as_float(imread(self.img_paths +
                                  self.img_ids[index]))[:, :, :3]
        #img[:,:,2] = 1. - fltrs.laplace(img[:,:,0])
        msk = imread(self.mask_paths + self.img_ids[index]).astype(np.bool)
        msk = np.expand_dims(msk, axis=-1)
        #else:
        #    small_idx = random.randint(0, len(self.small_msk_ids))
        #    img = img_as_float(imread('../data/train/small_masks/images/' + self.small_msk_ids[small_idx]))[:,:,:3]
        #    msk = imread('../data/train/small_masks/masks/' + self.small_msk_ids[small_idx]).astype(np.bool)
        #    msk = np.expand_dims(msk, axis=-1)

        if not self.valid:
            img_np, msk_np = augment_img([img, msk], imsize=self.imsize)
        else:
            #img_np = resize(np.asarray(img), (self.imsize, self.imsize),
            #            preserve_range=True, mode='reflect')
            #msk_np = resize(msk, (self.imsize, self.imsize),
            #                preserve_range=True, mode='reflect')
            img_np = reflect_pad(img, int((self.imsize - img.shape[0]) / 2))
            msk_np = reflect_pad(msk, int((self.imsize - msk.shape[0]) / 2))
            img_np = img_np.transpose((2, 0, 1)).astype(np.float32)
            msk_np = msk_np.transpose((2, 0, 1)).astype(np.float32)

        #print(img_np.shape, msk_np.shape)

        # get image ready for torch
        img_tch = self.normalize(torch.from_numpy(img_np.astype(np.float32)))

        msk_tch = torch.from_numpy(msk_np.astype(np.float32))

        msk_half_np = np.expand_dims(resize(msk_np[0].astype(np.float32),
                                            (64, 64),
                                            preserve_range=True),
                                     axis=-1).transpose(2, 0, 1)
        msk_half_tch = torch.from_numpy(msk_half_np.astype(np.float32))
        msk_qrtr_np = np.expand_dims(resize(msk_np[0].astype(np.float32),
                                            (32, 32),
                                            preserve_range=True),
                                     axis=-1).transpose(2, 0, 1)
        msk_qrtr_tch = torch.from_numpy(msk_qrtr_np.astype(np.float32))
        msk_eigt_np = np.expand_dims(resize(msk_np[0].astype(np.float32),
                                            (16, 16),
                                            preserve_range=True),
                                     axis=-1).transpose(2, 0, 1)
        msk_eigt_tch = torch.from_numpy(msk_eigt_np.astype(np.float32))
        msk_sixteen_np = np.expand_dims(resize(msk_np[0].astype(np.float32),
                                               (8, 8),
                                               preserve_range=True),
                                        axis=-1).transpose(2, 0, 1)
        msk_sixteen_tch = torch.from_numpy(msk_sixteen_np.astype(np.float32))

        #img_tch = add_depth_channels(img_tch)

        out_dict = {
            'img':
            img_tch,
            'msk': [
                msk_tch, msk_half_tch, msk_qrtr_tch, msk_eigt_tch,
                msk_sixteen_tch
            ],
            #'msk_dwnsample': [msk_half_tch, msk_qrtr_tch, msk_eigt_tch, msk_sixteen_tch],
            #'msk_qrtr': msk_qrtr_tch,
            #'msk_eigt': msk_eigt_tch,
            'has_msk':
            msk_tch.sum() > 0,
            'id':
            self.img_ids[index].replace('.png', '')
        }

        return out_dict