コード例 #1
0
ファイル: datasets.py プロジェクト: yeyuhang/ferattention
    def __getitem__(self, idx):

        idx1, idx2, idx3 = self.triplets[idx]
        img1, lab1 = self.data[idx1]
        img2, lab2 = self.data[idx2]
        img3, lab3 = self.data[idx3]

        img1 = np.array(img1)
        img2 = np.array(img2)
        img3 = np.array(img3)

        img1 = utility.to_channels(img1, self.num_channels)
        img2 = utility.to_channels(img2, self.num_channels)
        img3 = utility.to_channels(img3, self.num_channels)

        lab1 = utility.to_one_hot(lab1, self.numclass)
        lab2 = utility.to_one_hot(lab2, self.numclass)
        lab3 = utility.to_one_hot(lab3, self.numclass)

        a = ObjectImageAndLabelTransform(img1, lab1)
        b = ObjectImageAndLabelTransform(img2, lab2)
        c = ObjectImageAndLabelTransform(img3, lab3)
        if self.transform is not None:
            a = self.transform(a)
            b = self.transform(b)
            c = self.transform(c)

        return {'a': a.to_dict(), 'b': b.to_dict(), 'c': c.to_dict()}
コード例 #2
0
    def __getitem__(self, idx):

        # read image
        image, label = self.data[(idx) % len(self.data)]
        #A,A_inv = F.compute_norm_mat( image.shape[1], image.shape[0] )
        #image = F.equalization(image,A,A_inv)
        image = utility.to_channels(image, self.num_channels)

        # read background
        if self.bbackimage:
            idxk = random.randint(1, len(self.databack) - 1)
            back = self.databack[idxk]
            back = F.resize_image(back,
                                  640,
                                  1024,
                                  resize_mode='crop',
                                  interpolate_mode=cv2.INTER_LINEAR)
            back = utility.to_channels(back, self.num_channels)
        else:
            back = np.ones((640, 1024, 3), dtype=np.uint8) * 255

        if self.generate == 'image':
            obj = ObjectImageTransform(image)

        elif self.generate == 'image_and_mask':

            image_org, image_ilu, mask, h = self.ren.generate(image, back)

            image_org = utility.to_gray(image_org.astype(np.uint8))
            image_org = utility.to_channels(image_org, self.num_channels)
            image_org = image_org.astype(np.uint8)

            image_ilu = utility.to_gray(image_ilu.astype(np.uint8))
            image_ilu = utility.to_channels(image_ilu, self.num_channels)
            image_ilu = image_ilu.astype(np.uint8)

            mask = mask[:, :, 0]
            mask_t = np.zeros((mask.shape[0], mask.shape[1], 2))
            mask_t[:, :, 0] = (mask == 0).astype(np.uint8)  # 0-backgraund
            mask_t[:, :, 1] = (mask == 1).astype(np.uint8)

            obj_image = ObjectImageTransform(image_org.copy())
            obj_data = ObjectImageAndMaskMetadataTransform(
                image_ilu.copy(), mask_t, np.concatenate(
                    ([label], h), axis=0))  #np.array([label])

        else:
            assert (False)

        if self.transform_image:
            obj_image = self.transform_image(obj_image)

        if self.transform_data:
            obj_data = self.transform_data(obj_data)

        x_img, y_mask, y_lab = obj_data.to_value()
        x_org = obj_image.to_value()

        return x_org, x_img, y_mask, y_lab
コード例 #3
0
    def __getitem__(self, idx):

        # read image
        image, label = self.data[(idx) % len(self.data)]
        image = utility.to_channels(image, self.num_channels)

        # read background
        if self.bbackimage:
            idxk = random.randint(1, len(self.databack) - 1)
            back = self.databack[idxk]  #(idx)%len(self.databack)
            back = F.resize_image(back,
                                  640,
                                  1024,
                                  resize_mode='crop',
                                  interpolate_mode=cv2.INTER_LINEAR)
            back = utility.to_channels(back, self.num_channels)
        else:
            back = np.ones((640, 1024, 3), dtype=np.uint8) * 255

        if self.generate == 'image':
            obj = ObjectImageTransform(image)

        elif self.generate == 'image_and_label':
            _, image, _ = self.ren.generate(image, back)
            image = utility.to_gray(image.astype(np.uint8))
            image_t = utility.to_channels(image, self.num_channels)
            image_t = image_t.astype(np.uint8)
            label = utility.to_one_hot(int(label), self.data.numclass)
            obj = ObjectImageAndLabelTransform(image_t, label)

        elif self.generate == 'image_and_mask':
            _, image, mask = self.ren.generate(image, back)
            image = utility.to_gray(image.astype(np.uint8))
            image_t = utility.to_channels(image, self.num_channels)
            image_t = image_t.astype(np.uint8)
            #print( image_t.shape, image_t.min(), image_t.max(), flush=True )
            #assert(False)
            mask = mask[:, :, 0]
            mask_t = np.zeros((mask.shape[0], mask.shape[1], 2))
            mask_t[:, :, 0] = (mask == 0).astype(np.uint8)  # backgraund
            mask_t[:, :, 1] = (mask == 1).astype(np.uint8)
            obj = ObjectImageAndMaskMetadataTransform(image_t, mask_t,
                                                      np.array([label]))

        else:
            assert (False)

        if self.transform:
            obj = self.transform(obj)

        return obj.to_dict()
コード例 #4
0
ファイル: dsxbdata.py プロジェクト: CarlosPena00/pytorch-unet
    def __getitem__(self, idx):
        image, label = self.data[idx]
        image_t = utility.to_channels(image, ch=self.num_channels)
        label_t = np.zeros((label.shape[0], label.shape[1], 2))
        label_t[:, :, 0] = (label < 1)
        label_t[:, :, 1] = (label >= 1)

        obj = ObjectImageAndMaskTransform(image_t, label_t)
        if self.transform:
            obj = self.transform(obj)
        return obj.to_dict()
コード例 #5
0
    def __getitem__(self, idx):   

        image, label = self.data[idx]
        image = np.array(image) 
        image = utility.to_channels(image, self.num_channels)        
        label = utility.to_one_hot(label, self.numclass)

        obj = ObjectImageAndLabelTransform( image, label )
        if self.transform: 
            sample = self.transform( obj )
        return obj.to_dict()
コード例 #6
0
ファイル: learner.py プロジェクト: HelenGuohx/cv-ferattn-code
 def transform_image(image, image_size, num_channels):
     image = np.array(image, dtype=np.uint8)
     image = utility.to_channels(image, num_channels)
     obj = ObjectImageTransform(image)
     obj = get_transforms_det(image_size)(obj)
     # tensor is channel x height x width
     # image = image.transpose((2, 0, 1))
     # obj.to_tensor()
     # value = torch.from_numpy(obj.image).float()
     obj.image = obj.image.unsqueeze(0)
     return obj.to_dict()
コード例 #7
0
ファイル: dsxbdata.py プロジェクト: CarlosPena00/pytorch-unet
    def __getitem__(self, idx):

        idx = idx % len(self.data)
        image, label = self.data[idx]
        image_t = utility.to_channels(image, ch=self.num_channels)

        label_t = (label > 127).astype(np.uint8)

        obj = ObjectImageAndMaskTransform(image_t, label_t)
        if self.transform:
            obj = self.transform(obj)
        return obj.to_dict()
コード例 #8
0
    def __getitem__(self, idx):

        idx = idx % len(self.data)
        if self.data.train:
            name, image, mask, depth = self.data[idx]
            image_t = utility.to_channels(image, ch=self.num_channels)
            weight_t = getweightmap(mask)
            weight_t = weight_t[:, :, np.newaxis]
            mask_t = np.zeros((mask.shape[0], mask.shape[1], 2))
            mask_t[:, :, 0] = (mask <= 0)  #bg
            mask_t[:, :, 1] = (mask > 0)
            obj = ObjectImageMaskMetadataAndWeightTransform(
                image_t, mask_t, weight_t, np.array([depth]))
        else:
            name, image, depth = self.data[idx]
            image_t = utility.to_channels(image, ch=self.num_channels)
            obj = ObjectImageMetadataTransform(image_t, np.array((idx, depth)))

        if self.transform:
            obj = self.transform(obj)

        return obj.to_dict()
コード例 #9
0
ファイル: imaterialist.py プロジェクト: yeyuhang/ferattention
    def __getitem__(self, idx):   
        image = self.data[idx]        
        Id = idx
        
        image = np.array(image) 
        image = utility.to_channels(image, self.num_channels)      
    
        obj = ObjectImageTransform( image )
        if self.transform: 
            obj = self.transform(obj)        

        Id = torch.from_numpy( np.array([Id])).float()
        return Id, obj.to_value() 
コード例 #10
0
ファイル: dsxbdata.py プロジェクト: CarlosPena00/pytorch-unet
    def __getitem__(self, idx):

        idx = idx % len(self.data)
        data = self.data[idx]

        if self.use_weight:
            image, label, weight = data
        elif self.load_segments:
            image, label, segs = data
            if self.shuffle_segments:
                segs = segs[..., np.random.permutation(segs.shape[-1])]
        else:
            image, label = data

        image_t = utility.to_channels(image, ch=self.num_channels)

        label = to_one_hot(label, self.num_classes)

        if self.use_weight:
            obj = ObjectImageMaskAndWeightTransform(image_t, label, weight)
        elif self.load_segments:
            obj = ObjectImageMaskAndSegmentationsTransform(
                image_t, label, segs)
        else:
            obj = ObjectImageAndMaskTransform(image_t, label)

        if self.transform:
            obj = self.transform(obj)

        obj = obj.to_dict()
        obj['segment'] = preprocessing.apply_preprocessing(
            obj['segment'], self.middle_proc)

        if self.load_segments:  ## Warring!
            axis = np.argmin(obj['segment'].shape)

            if self.use_ori:
                if self.transform:
                    inputs = torch.cat((obj['image'], obj['segment']),
                                       dim=axis)
                else:
                    inputs = np.concatenate((obj['image'], obj['segment']),
                                            axis=axis)
                obj['image'] = inputs

            else:
                obj['image'] = obj['segment']

            obj.pop('segment')

        return obj
コード例 #11
0
    def __getitem__(self, idx):

        image, label, contours, weight = self.data[idx]
        image_t = utility.to_channels(image, ch=self.num_channels)
        label_t = np.zeros((label.shape[0], label.shape[1], 3))
        label_t[:, :, 0] = (label < 128)
        label_t[:, :, 1] = (label > 128)
        label_t[:, :, 2] = (contours > 128)
        weight_t = weight[:, :, np.newaxis]

        obj = ObjectImageMaskAndWeightTransform(image_t, label_t, weight_t)
        if self.transform:
            obj = self.transform(obj)
        return obj.to_dict()
コード例 #12
0
ファイル: dsxbdata.py プロジェクト: CarlosPena00/pytorch-unet
    def __getitem__(self, idx):

        idx = idx % len(self.data)
        image, label = self.data[idx]
        image_t = utility.to_channels(image, ch=self.num_channels)
        label_t = np.zeros_like(label)
        label_t[:, :, 0] = (label[..., 0] == 0)
        label_t[:, :, 1] = (label[..., 0] == 1)
        label_t[:, :, 2] = (label[..., 0] >= 2)

        obj = ObjectImageAndMaskTransform(image_t, label_t)
        if self.transform:
            obj = self.transform(obj)
        return obj.to_dict()
コード例 #13
0
    def __getitem__(self, idx):

        idx = idx % len(self.data)
        image, label = self.data[idx]
        image = np.array(image)
        image = utility.to_channels(image, self.num_channels)
        label = utility.to_one_hot(label,
                                   self.numclass)  #no one-hot haixuanguo

        # parse image and label to tensor
        obj = ObjectImageAndLabelTransform(image, label)
        # transform data
        if self.transform:
            obj = self.transform(obj)
        return obj.to_dict()
コード例 #14
0
    def __getitem__(self, idx):   

        idx = idx%len(self.data)
        image, mask = self.data[idx] 

        image_t = utility.to_channels(image, ch=self.num_channels )
        label_t = np.zeros( (mask.shape[0], mask.shape[1], 2) )
        label_t[:,:,0] = (mask < 0)
        label_t[:,:,1] = (mask > 0)

        obj = ObjectImageAndMaskTransform( image_t, label_t  )
        if self.transform: 
            sample = self.transform( obj )

        return obj.to_dict()
コード例 #15
0
ファイル: dsxbdata.py プロジェクト: CarlosPena00/pytorch-unet
    def __getitem__(self, idx):

        idx = idx % len(self.data)
        image, label, contours = self.data[idx]
        image_t = utility.to_channels(image, ch=self.num_channels)

        label_t = np.zeros((label.shape[0], label.shape[1], 3))
        label_t[:, :, 0] = (label < 128)
        label_t[:, :, 1] = (label > 128)
        label_t[:, :, 2] = (contours > 128)

        obj = ObjectImageAndMaskTransform(image_t, label_t)
        if self.transform:
            obj = self.transform(obj)
        return obj.to_dict()
コード例 #16
0
ファイル: datasets.py プロジェクト: yeyuhang/ferattention
    def __getitem__(self, idx):

        idx = idx % self.numclass
        class_index = self.labels_index[idx]
        n = len(class_index)
        idx = class_index[random.randint(0, n - 1)]
        image, label = self.data[idx]

        image = np.array(image)
        image = utility.to_channels(image, self.num_channels)
        label = utility.to_one_hot(label, self.numclass)

        obj = ObjectImageAndLabelTransform(image, label)
        if self.transform:
            obj = self.transform(obj)
        return obj.to_dict()
コード例 #17
0
ファイル: dsxbdata.py プロジェクト: CarlosPena00/pytorch-unet
    def __getitem__(self, idx):

        idx = idx % len(self.data)
        data = self.data[idx]
        if self.use_weight:
            image, label, weight = data
        else:
            image, label = data

        label = (label == 255).astype(float)  # 1024, 1024, 3, max= 1

        # image: 1024, 1024, 3, max = 255
        image_t = utility.to_channels(image, ch=self.num_channels)
        # image_t: 1024, 1024, 3, max = 255

        if self.use_weight:
            obj = ObjectImageMaskAndWeightTransform(image_t, label, weight)
        else:
            obj = ObjectImageAndMaskTransform(image_t, label)

        if self.transform:
            obj = self.transform(obj)
        return obj.to_dict()