def __getitem__(self,idx): #if index % 2 == 0: img0 = cv2.imread(self._image_paths[idx][0], 0) img1 = cv2.imread(self._image_paths[idx][1], 0) img0 = Image.fromarray(img0) img1 = Image.fromarray(img1) if self._mode == 'train': img0 = GetTransforms(img0, type=self.cfg.use_transforms_type) img1 = GetTransforms(img1, type=self.cfg.use_transforms_type) img0 = np.array(img0) img1 = np.array(img1) img0 = transform(img0, self.cfg) img1 = transform(img1, self.cfg) labels = np.array(self._labels[idx]).astype(np.float32) #print(self._image_paths[index][0],self._image_paths[index][1],labels) img0 = torch.from_numpy(img0).float() img1 = torch.from_numpy(img1).float() labels = torch.from_numpy(labels).float() if self._mode == 'train' or self._mode == 'dev': return (img0, img1, labels) else: raise Exception('Unknown mode : {}'.format(self._mode)) #return img0, img1 , torch.from_numpy(np.array([int(self.training_df.iat[index,2])],dtype=np.float32)) return img0, img1 , labels
def __getitem__(self, idx): # print(os.getcwd()) # print(path_test) path_test = os.path.join(os.getcwd(),'Chexpert/data/nas',self._image_paths[idx]) # print(path_test) # print(self._image_paths[idx]) # testing = cv2.imread() # image = cv2.imread(self._image_paths[idx], 0) image = cv2.imread(path_test, 0) # print(image) image = Image.fromarray(image) if self._mode == 'train': image = GetTransforms(image, type=self.cfg.use_transforms_type) image = np.array(image) if self.cfg.use_equalizeHist: image = cv2.equalizeHist(image) image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB).astype(np.float32) # changed the resolution of the img from 512 to 640 all around to see if produces better results # chaged the interpolation method from INTER_LINEAR to INTER_AREA which resamples using pixel area relation. # possibly better results with inter_area if no zooming is applied if self.cfg.fix_ratio: image = self._fix_ratio(image) else: image = cv2.resize(image, dsize=(self.cfg.width, self.cfg.height), interpolation=cv2.INTER_AREA) # chaged the smoothing of the image: # changed from gaussian_blur to bilateralFiltering, which is a better version, which simplifies the img but preserve edges # it can run a little slower than gaussian_blur since it a more complex process, but might improve results # left the if statement unchanged, just to filter the same imgs than before. if self.cfg.gaussian_blur > 0: image = cv2.bilateralFilter(image, 9, 75, 75) # normalization image -= self.cfg.pixel_mean # vgg and resnet do not use pixel_std, densenet and inception use. if self.cfg.use_pixel_std: image /= self.cfg.pixel_std # normal image tensor : H x W x C # torch image tensor : C X H X W image = image.transpose((2, 0, 1)) labels = np.array(self._labels[idx]).astype(np.float32) path = self._image_paths[idx] if self._mode == 'train' or self._mode == 'dev': return (image, labels) elif self._mode == 'test': return (image, path) elif self._mode == 'heatmap': return (image, path, labels) else: raise Exception('Unknown mode : {}'.format(self._mode))
def __getitem__(self, idx): # print(os.getcwd()) # print(path_test) path_test = os.path.join(os.getcwd(), 'Chexpert/data/nas', self._image_paths[idx]) # print(path_test) # print(self._image_paths[idx]) # testing = cv2.imread() # image = cv2.imread(self._image_paths[idx], 0) image = cv2.imread(path_test, 0) # print(image) image = Image.fromarray(image) # print('working,----------------------------------') if self._mode == 'train': image = GetTransforms(image, type=self.cfg.use_transforms_type) image = np.array(image) if self.cfg.use_equalizeHist: image = cv2.equalizeHist(image) image = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB).astype(np.float32) if self.cfg.fix_ratio: image = self._fix_ratio(image) else: image = cv2.resize(image, dsize=(self.cfg.width, self.cfg.height), interpolation=cv2.INTER_LINEAR) if self.cfg.gaussian_blur > 0: image = cv2.GaussianBlur( image, (self.cfg.gaussian_blur, self.cfg.gaussian_blur), 0) # normalization image -= self.cfg.pixel_mean # vgg and resnet do not use pixel_std, densenet and inception use. if self.cfg.use_pixel_std: image /= self.cfg.pixel_std # normal image tensor : H x W x C # torch image tensor : C X H X W image = image.transpose((2, 0, 1)) labels = np.array(self._labels[idx]).astype(np.float32) path = self._image_paths[idx] if self._mode == 'train' or self._mode == 'dev': return (image, labels) elif self._mode == 'test': return (image, path) elif self._mode == 'heatmap': return (image, path, labels) else: raise Exception('Unknown mode : {}'.format(self._mode))
def __getitem__(self, idx): image = cv2.imread(self._image_paths[idx], 0) image = Image.fromarray(image) if self._mode == 'train': image = GetTransforms(image, type=self.cfg.use_transforms_type) image = np.array(image) image = transform(image, self.cfg) labels = np.array(self._labels[idx]).astype(np.float32) path = self._image_paths[idx] if self._mode == 'train' or self._mode == 'dev': return (image, labels) elif self._mode == 'test': return (image, path) elif self._mode == 'heatmap': return (image, path, labels) else: raise Exception('Unknown mode : {}'.format(self._mode))
def __getitem__(self, idx): image = cv2.imread(self._image_paths[idx], 0) # reading images in grayscale image = Image.fromarray(image) # converting it to pil image if self._mode == 'train': # No TTA used image = GetTransforms(image, type=self.cfg.use_transforms_type) image = np.array(image) image = transform(image, self.cfg) # this is happening for test time as well labels = np.array(self._labels[idx]).astype(np.float32) path = self._image_paths[idx] if self._mode == 'train' or self._mode == 'dev': return (image, labels) elif self._mode == 'test': return (image, path) elif self._mode == 'heatmap': return (image, path, labels) else: raise Exception('Unknown mode : {}'.format(self._mode))
def __getitem__(self, idx): image = cv2.imread(self._image_paths[idx], 0) image = Image.fromarray(image) if self._mode == 'train': image = GetTransforms(image, type=self.cfg.use_transforms_type) image = np.array(image) image = user_transform(image, self.cfg) labels = [random.uniform(self.smooth_range[0], self.smooth_range[1]) if x == -1.0 else x for x in self._labels[idx]] labels = np.array(labels).astype(np.float32) path = self._image_paths[idx] if self._mode == 'train' or self._mode == 'dev': return (image, labels) elif self._mode == 'test': return (image, path) elif self._mode == 'heatmap': return (image, path, labels) else: raise Exception('Unknown mode : {}'.format(self._mode))
def __getitem__(self, index): #if index % 2 == 0: #print(index) if (index % 4 == 0): while True: line0 = random.choice(self.lines) fields0 = line0.strip('\n').split(',') if self.dict[0].get(fields0[15]) == '0': break while True: line1 = random.choice(self.lines) fields1 = line1.strip('\n').split(',') if self.dict[0].get(fields1[15]) == '1': break self._labels.append(1) elif (index % 4 == 1): while True: line0 = random.choice(self.lines) fields0 = line0.strip('\n').split(',') if self.dict[0].get(fields0[15]) == '1': break while True: line1 = random.choice(self.lines) fields1 = line1.strip('\n').split(',') if self.dict[0].get(fields1[15]) == '0': break self._labels.append(1) elif (index % 4 == 2 or index % 4 == 3): while True: line0 = random.choice(self.lines) fields0 = line0.strip('\n').split(',') if self.dict[0].get(fields0[15]) == '1': break while True: line1 = random.choice(self.lines) fields1 = line1.strip('\n').split(',') if self.dict[0].get(fields1[15]) == '1': break self._labels.append(0) ''' if should_get_same_class: while True: line1 = random.choice(self.lines) fields1 = line1.strip('\n').split(',') if self.dict[0].get(fields0[7]) == self.dict[0].get(fields1[7]): break else: line1 = random.choice(self.lines) fields1 = line1.strip('\n').split(',') ''' image_path = fields0[0] image_path = "/kaggle/input/chexpert/" + image_path[21:] img0 = cv2.imread(image_path, 0) image_path = fields1[0] image_path = "/kaggle/input/chexpert/" + image_path[21:] img1 = cv2.imread(image_path, 0) img0 = Image.fromarray(img0) img1 = Image.fromarray(img1) if self._mode == 'train': img0 = GetTransforms(img0, type=self.cfg.use_transforms_type) img1 = GetTransforms(img1, type=self.cfg.use_transforms_type) img0 = np.array(img0) img1 = np.array(img1) img0 = transform(img0, self.cfg) img1 = transform(img1, self.cfg) labels = np.array(self._labels[0]).astype(np.float32) #print(self._image_paths[index][0],self._image_paths[index][1],labels) img0 = torch.from_numpy(img0).float() img1 = torch.from_numpy(img1).float() labels = torch.from_numpy(labels).float() if self._mode == 'train' or self._mode == 'dev': return (img0, img1, labels) else: raise Exception('Unknown mode : {}'.format(self._mode)) #return img0, img1 , torch.from_numpy(np.array([int(self.training_df.iat[index,2])],dtype=np.float32)) return img0, img1, labels