Пример #1
0
    def __getitem__(self, index):
        random.seed()

        # choose whether to pull labeled or unlabaled
        labeled = 1 if random.random(
        ) > self.unlabeled_ratio else 0  # print(imgs.size())
        if labeled == 1:
            img = img_as_float(
                imread(self.labeled_img_paths +
                       self.labeled_ids[index]))[:, :, :3]
            msk = imread(self.mask_paths + self.labeled_ids[index]).astype(
                np.bool)
            msk = np.expand_dims(msk, axis=-1)
        else:
            img = img_as_float(
                imread(self.unlabeled_img_paths +
                       self.unlabeled_ids[self.unlabeled_idx]))[:, :, :3]
            msk = np.ones((101, 101, 1)) * -1.
            self.unlabeled_idx += 1
            # if we go through all the labeled images, shuffle them and start counter over
            if self.unlabeled_idx >= len(self.unlabeled_ids):
                self.unlabeled_ids = shuffle(self.unlabeled_ids)
                self.unlabeled_idx = 0

        # the geometric augmentions have to be the same
        img, msk = augment_img([img, msk], imsize=self.imsize, mt=True)
        # brightness, gamma, and gaussian noise can be different
        img_a = mt_noise(img)
        img_b = mt_noise(img)

        msk_tch = torch.from_numpy(msk.astype(np.float32))
        msk_half_tch = torch.from_numpy(
            resize(msk.astype(np.float32), (64, 64), preserve_range=True))
        msk_qrtr_tch = torch.from_numpy(
            resize(msk.astype(np.float32), (32, 32), preserve_range=True))

        out_dict = {
            'img_a':
            self.normalize(torch.from_numpy(img_a.astype(np.float32))),
            'img_b':
            self.normalize(torch.from_numpy(img_b.astype(np.float32))),
            'msk': msk_tch,
            'msk_half': msk_half_tch,
            'msk_qrtr': msk_qrtr_tch,
            'has_msk': msk_tch.sum() > 0,
            'is_labeled': torch.tensor(labeled).long()
        }

        return out_dict
Пример #2
0
    def __getitem__(self, index):
        random.seed()

        # super sample small masks
        #if random.random() > 0.25 or self.valid:
        img = img_as_float(imread(self.img_paths +
                                  self.img_ids[index]))[:, :, :3]
        #img[:,:,2] = 1. - fltrs.laplace(img[:,:,1])
        msk = imread(self.mask_paths + self.img_ids[index]).astype(np.bool)
        msk = np.expand_dims(msk, axis=-1)
        boundry = find_boundaries(msk) * 1

        if not self.valid:
            img_np, msk_np, boundry_np = augment_img([img, msk, boundry],
                                                     imsize=self.imsize)
        else:
            #img_np = resize(np.asarray(img), (self.imsize, self.imsize),
            #            preserve_range=True, mode='reflect')
            #msk_np = resize(msk, (self.imsize, self.imsize),
            #                preserve_range=True, mode='reflect')
            img_np = reflect_pad(img, int((self.imsize - img.shape[0]) / 2))
            msk_np = reflect_pad(msk, int((self.imsize - msk.shape[0]) / 2))
            boundry_np = reflect_pad(boundry,
                                     int((self.imsize - boundry.shape[0]) / 2))

            img_np = img_np.transpose((2, 0, 1)).astype(np.float32)
            msk_np = msk_np.transpose((2, 0, 1)).astype(np.float32)
            boundry_np = boundry_np.transpose((2, 0, 1)).astype(np.float32)

        #print(img_np.shape, msk_np.shape)

        # get image ready for torch
        img_tch = self.normalize(torch.from_numpy(img_np.astype(np.float32)))
        msk_tch = torch.from_numpy(msk_np.astype(np.float32))
        boundry_tch = torch.from_numpy(boundry_np.astype(np.float32))

        img_tch = add_depth_channels(img_tch)

        out_dict = {
            'img': img_tch,
            'msk': msk_tch,
            'has_msk': msk_tch.sum() > 0,
            'boundry': boundry_tch,
            'id': self.img_ids[index].replace('.png', '')
        }

        return out_dict
Пример #3
0
    def __getitem__(self, index):
        random.seed()

        # super sample small masks
        #if random.random() > 0.25 or self.valid:
        img = img_as_float(imread(self.img_paths +
                                  self.img_ids[index]))[:, :, :3]
        #img[:,:,2] = 1. - fltrs.laplace(img[:,:,0])
        msk = imread(self.mask_paths + self.img_ids[index]).astype(np.bool)
        msk = np.expand_dims(msk, axis=-1)
        #else:
        #    small_idx = random.randint(0, len(self.small_msk_ids))
        #    img = img_as_float(imread('../data/train/small_masks/images/' + self.small_msk_ids[small_idx]))[:,:,:3]
        #    msk = imread('../data/train/small_masks/masks/' + self.small_msk_ids[small_idx]).astype(np.bool)
        #    msk = np.expand_dims(msk, axis=-1)

        if not self.valid:
            img_np, msk_np = augment_img([img, msk], imsize=self.imsize)
        else:
            #img_np = resize(np.asarray(img), (self.imsize, self.imsize),
            #            preserve_range=True, mode='reflect')
            #msk_np = resize(msk, (self.imsize, self.imsize),
            #                preserve_range=True, mode='reflect')
            img_np = reflect_pad(img, int((self.imsize - img.shape[0]) / 2))
            msk_np = reflect_pad(msk, int((self.imsize - msk.shape[0]) / 2))
            img_np = img_np.transpose((2, 0, 1)).astype(np.float32)
            msk_np = msk_np.transpose((2, 0, 1)).astype(np.float32)

        #print(img_np.shape, msk_np.shape)

        # get image ready for torch
        img_tch = self.normalize(torch.from_numpy(img_np.astype(np.float32)))

        msk_tch = torch.from_numpy(msk_np.astype(np.float32))

        msk_half_np = np.expand_dims(resize(msk_np[0].astype(np.float32),
                                            (64, 64),
                                            preserve_range=True),
                                     axis=-1).transpose(2, 0, 1)
        msk_half_tch = torch.from_numpy(msk_half_np.astype(np.float32))
        msk_qrtr_np = np.expand_dims(resize(msk_np[0].astype(np.float32),
                                            (32, 32),
                                            preserve_range=True),
                                     axis=-1).transpose(2, 0, 1)
        msk_qrtr_tch = torch.from_numpy(msk_qrtr_np.astype(np.float32))
        msk_eigt_np = np.expand_dims(resize(msk_np[0].astype(np.float32),
                                            (16, 16),
                                            preserve_range=True),
                                     axis=-1).transpose(2, 0, 1)
        msk_eigt_tch = torch.from_numpy(msk_eigt_np.astype(np.float32))
        msk_sixteen_np = np.expand_dims(resize(msk_np[0].astype(np.float32),
                                               (8, 8),
                                               preserve_range=True),
                                        axis=-1).transpose(2, 0, 1)
        msk_sixteen_tch = torch.from_numpy(msk_sixteen_np.astype(np.float32))

        #img_tch = add_depth_channels(img_tch)

        out_dict = {
            'img':
            img_tch,
            'msk': [
                msk_tch, msk_half_tch, msk_qrtr_tch, msk_eigt_tch,
                msk_sixteen_tch
            ],
            #'msk_dwnsample': [msk_half_tch, msk_qrtr_tch, msk_eigt_tch, msk_sixteen_tch],
            #'msk_qrtr': msk_qrtr_tch,
            #'msk_eigt': msk_eigt_tch,
            'has_msk':
            msk_tch.sum() > 0,
            'id':
            self.img_ids[index].replace('.png', '')
        }

        return out_dict