예제 #1
0
 def __getitem__(self, index):
     img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])
     img = utils.bgr2rgb(cv2.imread(str(img_path)))
     try:
         img_path_2 = os.path.join(self.data_dir, self.data.iloc[index, 1])
         target = utils.bgr2rgb(cv2.imread(str(img_path_2)))
     except:
         target = utils.bgr2rgb(cv2.imread(str(img_path)))
     if self.input_transforms:
         img = self.input_transforms(image=img)['image']
     if self.target_transforms:
         target = self.target_transforms(image=target)['image']
     return img, target
예제 #2
0
    def __getitem__(self, index):
        img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])
        if self.channels == 3:
            img = utils.bgr2rgb(cv2.imread(str(img_path)))
        else:
            img = cv2.imread(str(img_path), 0)

        # img = Image.open(img_path)
        # if self.channels == 3:
        #     img = img.convert('RGB')
        # else:
        #     img = img.convert('L')

        y = self.data.iloc[index, 1]
        if self.minorities and self.bal_tfms:
            if y in self.minorities:
                if hasattr(self.bal_tfms, 'transforms'):
                    for tr in self.bal_tfms.transforms:
                        tr.p = self.diffs[y]
                    l = [self.bal_tfms]
                    l.extend(self.transforms_)
                    self.tfms = albu.Compose(l)
                else:
                    for t in self.bal_tfms:
                        t.p = self.diffs[y]
                    self.transforms_[1:1] = self.bal_tfms
                    # self.tfms = transforms.Compose(self.transforms_)
                    self.tfms = albu.Compose(self.transforms_)
                    # print(self.tfms)
            else:
                # self.tfms = transforms.Compose(self.transforms_)
                self.tfms = albu.Compose(self.transforms_)
        else:
            # self.tfms = transforms.Compose(self.transforms_)
            self.tfms = albu.Compose(self.transforms_)
        # x = self.tfms(img)
        x = self.tfms(image=img)['image']
        if self.channels == 1:
            x = x.unsqueeze(0)
        if self.seg:
            mask = Image.open(self.data.iloc[index, 1])
            seg_tfms = albu.Compose([self.tfms.transforms[0]])
            y = torch.from_numpy(np.array(seg_tfms(mask))).long().squeeze(0)

        # if self.obj:
        #     s = x.size()[1]
        #     if isinstance(s,tuple):
        #         s = s[0]
        #     row_scale = s/img.size[0]
        #     col_scale = s/img.size[1]
        #     y = rescale_bbox(y,row_scale,col_scale)
        #     y.squeeze_()
        #     y2 = self.data.iloc[index, 2]
        #     y = (y,y2)
        return (x, y, self.data.iloc[index, 0])
예제 #3
0
    def __getitem__(self, index):

        frames = utils.path_list(os.path.join(self.data_dir,
                                              self.data.iloc[index, 0]),
                                 sort=True)
        img_path = frames[self.n_frames - 1]
        try:
            img_ = utils.bgr2rgb(cv2.imread(str(img_path)))
        except:
            print(img_path)
        if len(self.pre_transforms.transforms.transforms) > 0:
            img_ = self.pre_transforms(image=img_)['image']
        target = self.target_transforms(image=img_)['image']
        if len(self.pre_input_transforms.transforms.transforms) > 0:
            img_ = self.pre_input_transforms(image=img_)['image']
        # img_ = self.downscale_transforms(image=img_)['image']
        img = self.input_transforms(image=img_)['image']
        resized_target = self.resized_target_transforms(image=img_)['image']

        seq = list(reversed(frames[:-1]))
        neighbours = []
        for img_path in seq:
            try:
                img_ = utils.bgr2rgb(cv2.imread(str(img_path)))
            except:
                print(img_path)
            if len(self.pre_transforms.transforms.transforms) > 0:
                img_ = self.pre_transforms(image=img_)['image']
            if len(self.pre_input_transforms.transforms.transforms) > 0:
                img_ = self.pre_input_transforms(image=img_)['image']
            neighbours.append(self.input_transforms(image=img_)['image'])

        flow = [
            utils.to_tensor(
                self.get_flow(utils.tensor_to_img(img),
                              utils.tensor_to_img(j))) for j in neighbours
        ]

        return img, target, neighbours, flow, resized_target
예제 #4
0
 def __getitem__(self, index):
     img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])
     try:
         img_ = utils.bgr2rgb(cv2.imread(str(img_path)))
     except:
         print(img_path)
     if len(self.pre_transforms.transforms.transforms) > 0:
         img_ = self.pre_transforms(image=img_)['image']
     target = self.target_transforms(image=img_)['image']
     if len(self.pre_input_transforms.transforms.transforms) > 0:
         img_ = self.pre_input_transforms(image=img_)['image']
     # img_ = self.downscale_transforms(image=img_)['image']
     img = self.input_transforms(image=img_)['image']
     resized_target = self.resized_target_transforms(image=img_)['image']
     return img, target, resized_target
예제 #5
0
    def __getitem__(self, index):
        img_path = os.path.join(self.data_dir, self.data.iloc[index, 0])
        if self.channels == 3:
            img = utils.bgr2rgb(cv2.imread(str(img_path)))
        else:
            img = cv2.imread(str(img_path), 0)

        # img = Image.open(img_path)
        # if self.channels == 3:
        #     img = img.convert('RGB')
        # else:
        #     img = img.convert('L')

        y1, y2 = self.data.iloc[index, 1], self.data.iloc[index, 2]
        self.tfms = albu.Compose(self.transforms_)
        x = self.tfms(image=img)['image'].unsqueeze(0)
        # self.tfms = transforms.Compose(self.transforms_)
        # x = self.tfms(img)
        return (x, y1, y2)