def __getitem__(self, index): if self.return_bounding_box: path, target, bbox = self.samples[index] sample = pil_loader(path) if self.transform is not None: sample = self.transform(sample) if self.target_transform is not None: target = self.target_transform(target) return sample, target, torch.from_numpy(bbox) else: if self.store_as_numpy: sample = Image.fromarray(self.samples[index]) target = self.targets[index] else: path, target = self.samples[index] sample = pil_loader(path) if self.transform is not None: sample = self.transform(sample) if self.target_transform is not None: target = self.target_transform(target) return sample, target
def __getitem__(self, idx): if self.split is "train": pair_idx = idx // self.batch_size #a batch train the same pair true_label = int(self.label_pairs[pair_idx][0]) fake_label = int(self.label_pairs[pair_idx][1]) true_label_128 = self.condition128[true_label] true_label_64 = self.condition64[true_label] fake_label_64 = self.condition64[fake_label] true_label_img = pil_loader(self.label_group_images[true_label][ self.train_group_pointer[true_label]]).resize((128, 128)) source_img = pil_loader(self.source_images[self.source_pointer]) source_img_227 = source_img.resize((227, 227)) source_img_128 = source_img.resize((128, 128)) if self.train_group_pointer[true_label] < len( self.label_group_images[true_label]) - 1: self.train_group_pointer[true_label] += 1 else: self.train_group_pointer[true_label] = 0 if self.source_pointer < len(self.source_images) - 1: self.source_pointer += 1 else: self.source_pointer = 0 if self.transforms is not None: true_label_img = self.transforms(true_label_img) source_img_227 = self.transforms(source_img_227) source_img_128 = self.transforms(source_img_128) if self.label_transforms is not None: true_label_128 = self.label_transforms(true_label_128) true_label_64 = self.label_transforms(true_label_64) fake_label_64 = self.label_transforms(fake_label_64) #source img 227 : use it to extract face feature #source img 128 : use it to generate different age face -> then resize to (227,227) to extract feature, compile with source img 227 #ture_label_img : img in target age group -> use to train discriminator #true_label_128 : use this condition to generate #true_label_64 and fake_label_64 : use this condition to discrimination #true_label : label return source_img_227,source_img_128,true_label_img,\ true_label_128,true_label_64,fake_label_64, true_label else: source_img_128 = pil_loader(self.source_images[idx]).resize( (128, 128)) if self.transforms is not None: source_img_128 = self.transforms(source_img_128) condition_128_tensor_li = [] if self.label_transforms is not None: for condition in self.condition128: condition_128_tensor_li.append( self.label_transforms(condition).cuda()) return source_img_128.cuda(), condition_128_tensor_li
def __getitem__(self, index): image_path = self.images[index] image = pil_loader(image_path) ref_image_path = os.path.join(self.tgtpath, os.path.basename(image_path)) refimage = pil_loader(ref_image_path) if self.resizetarget is not None and ( image.height, image.width) != self.resizetarget: image = F.resize(image, self.resizetarget) refimage = F.resize(refimage, sef.resizetarget) return self.transform(image, refimage)
def __getitem__(self, index): """ Retrieve a sample by index Parameters ---------- index : int Returns ------- img_a : FloatTensor Anchor image img_p : FloatTensor Positive image (same class of anchor) img_n : FloatTensor Negative image (different class of anchor) """ if not self.train: # a, pn, l = self.matches[index] l = self.labels[index] if self.in_memory: img_a = Image.fromarray(self.data[index]) else: # img_a = Image.fromarray(cv2.imread(self.file_names[index])) img_a = pil_loader(self.file_names[index]) if self.transform is not None: img_a = self.transform(img_a) return img_a, l a, p, n = self.triplets[index] # Doing this so that it is consistent with all other datasets to return a PIL Image if self.in_memory: img_a = Image.fromarray(self.data[a]) img_p = Image.fromarray(self.data[p]) img_n = Image.fromarray(self.data[n]) else: # img_a = Image.fromarray(cv2.imread(self.file_names[a])) # img_p = Image.fromarray(cv2.imread(self.file_names[p])) # img_n = Image.fromarray(cv2.imread(self.file_names[n])) img_a = pil_loader(self.file_names[a]) img_p = pil_loader(self.file_names[p]) img_n = pil_loader(self.file_names[n]) if self.transform is not None: img_a = self.transform(img_a) img_p = self.transform(img_p) img_n = self.transform(img_n) return img_a, img_p, img_n
def __getitem__(self, idx): image = pil_loader(self.paths[idx]) if self.transform is not None: image = self.transform(image) return image, self.label[idx]
def __getitem__(self, item): imgPath = self.df.iloc[item, 0] image = pil_loader(imgPath) label = self.df.iloc[item, 1] if self.transform: image = self.transform(image) splitedPath = imgPath.split('/') study_name = splitedPath[2] imgnum = int(splitedPath[5][5:-4]) jointDirPath = '/'.join(splitedPath[:-1]) more = 0 while item + more + 1 < len( self.df) and self.df.iloc[item + more + 1, 0].startswith(jointDirPath): more += 1 metadata = { 'study_name': study_name, 'patient/study': splitedPath[3] + '/' + splitedPath[4], 'total_img_num': imgnum + more, 'nt': self.nt[study_name], 'at': self.at[study_name], 'dataset_size': self.sizes[study_name], 'wt1': self.wt1[study_name], 'wt0': self.wt0[study_name] } if label == 1: metadata['wt'] = metadata['wt1'] else: metadata['wt'] = metadata['wt0'] sample = {'image': image, 'label': label, 'metadata': metadata} return sample
def __getitem__(self, idx): filename = self.image_filenames[idx] if filename not in self._image_cache: img = pil_loader(filename) xformed = self.transform(img) self._image_cache[filename] = xformed return self._image_cache[filename]
def __getitem__(self, idx): source_label = self.source_labels[idx] target_label = self.target_labels[idx] true_label = self.true_labels[idx] source_img = pil_loader(random.choice(self.label_group_images[source_label])) index = random.randint(0, len(self.label_group_images[true_label]) - 1) true_img = pil_loader(self.label_group_images[true_label][index]) true_age = self.label_group_ages[true_label][index] mean_age = self.mean_ages[target_label] if self.transforms is not None: source_img = self.transforms(source_img) true_img = self.transforms(true_img) return source_img, true_img, source_label, target_label, true_label, true_age, mean_age
def __getitem__(self, index): target_label = self.target_labels[index] target_img = pil_loader( random.choice(self.label_group_images[target_label])) if self.transforms is not None: target_img = self.transforms(target_img) return target_img, target_label
def __getitem__(self, index): """ Retrieve a sample by index and provides its filename as well Parameters ---------- index : int Returns ------- img : FloatTensor target : int label of the image filename : string """ img = pil_loader(self.file_names[index]) target, filename = self.labels[index], self.file_names[index] if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target, filename
def __getitem__(self, index): """ Retrieve a sample by index Parameters ---------- index : int Returns ------- img : FloatTensor target : dict Dict with all the labels """ # Fetch images img = pil_loader(self.filenames[index]) # Fetch the target values and store them into a dict{category:index} target = self.values[index, :] target = {c:self._name_to_idx(c, t) for c, t in zip(self.categories, target)} if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target
def __getitem__(self, index): img, target = self.data[index] if self._load_pil: img = pil_loader(img) if self.transforms is not None: img, target = self.transforms(img, target) return img, target
def __getitem__(self, index: int): image = pil_loader(self.paths[index]) if self.transform is not None: image = self.transform(image) return image
def __getitem__(self, index): """ Retrieve a sample by index Parameters ---------- index : int Returns ------- img : FloatTensor target : int label of the image """ img, target = self.filenames[index], self.labels[index] img = pil_loader(img) target = torch.from_numpy(target.astype(np.float32)) target[target == -1] = 0 if self.transform is not None: img = self.transform(img) if self.target_transform is not None: target = self.target_transform(target) return img, target
def __getitem__(self, idx: int): image = pil_loader(self.imgs_paths[idx]) if not self.transform is None: image = self.transform(image) return {"img": image}
class MuraImageDataset(Dataset): """Mura dataset.""" def __init__(self, df, root_dir, transform=None): """ Args: df (dataframe): Path to the image file with annotations. root_dir (string): Directory with all the images. transform (callable, optional): Optional transform to be applied on a sample. """ self.df = df self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.df) def __getitem__(self, idx): img_name = os.path.join(self.root_dir, self.df.iloc[idx, 0]) #print(img_name) image = pil_loader(img_name) labels = self.df.iloc[idx, 2] labels = labels.astype('float') if self.transform: image = self.transform(image) return [image, labels]
def __getitem__(self, idx: int): image = pil_loader(self.imgs_paths[idx]) if not self.transform is None: image = self.transform(image) return (image, self.dummy_class)
def process_resize_imgseq(target_dataset): img_transform = transforms.Compose([ transforms.Resize(224), transforms.CenterCrop(224), transforms.ToTensor() ]) dataset_path = os.path.join(CONFIG.DATA_PATH, 'dataset', target_dataset) if not os.path.exists(dataset_path): os.mkdir(dataset_path) if not os.path.exists(os.path.join(dataset_path, 'resize224')): os.mkdir(os.path.join(dataset_path, 'resize224')) df_data = pd.read_csv(os.path.join(CONFIG.TARGET_PATH, 'posts.csv'), encoding='utf-8') pbar = tqdm(total=df_data.shape[0]) for index, in_row in df_data.iterrows(): pbar.update(1) images = [] for image in in_row.iloc[7:]: if not pd.isna(image): image_path = os.path.join(CONFIG.TARGET_PATH, 'original', image) try: images.append(img_transform(pil_loader(image_path))) except OSError as e: print(e) print(image_path) if len(images) > 0: image_data = torch.stack(images).detach().numpy() with open( os.path.join(dataset_path, 'resize224', in_row.iloc[1] + '.p'), 'wb') as f: cPickle.dump(image_data, f) f.close() del image_data pbar.close()
def __init__(self, img_path: os.PathLike, transform: Optional[Callable] = None): self.image = pil_loader(img_path) if not transform is None: self.image = transform(self.image)
def __getitem__(self, idx): line = self.img_list[idx] img = pil_loader(line[0]) if self.transform is not None: img = self.transform(img) labels = line[1:].astype(int) return img, labels
def __getitem__(self, index: int) -> Dict[str, torch.Tensor]: image_file = self.file_list[index] image_id = image_file.stem image_metadata = self.metadata[self.metadata['image_id'] == image_id] boxes = [] labels = [] if self.ignore_images_without_objects: image_metadata = image_metadata[ image_metadata['class_id'] != self.background_class] for row in image_metadata.itertuples(index=False): labels.append( row.class_id + 1) # Torchvision expects that 0 is a background class boxes.append([row.x_min, row.y_min, row.x_max, row.y_max]) labels = np.array(labels) boxes = np.array(boxes) if self.read_dicom: image = read_xray(image_file) else: image = np.array(pil_loader(image_file)) result = {'image': image, 'boxes': boxes, 'labels': labels} if self.transform is not None: result = self.transform(image=image, bboxes=boxes, labels=labels) # Rename bounding boxes field to match torchvision expectations boxes = result.pop('bboxes') result['boxes'] = boxes return result
def __getitem__(self, index): filename, age = self.items[index] age = int(age) im = pil_loader(join(self.root, filename)) return im, age
def __getitem__(self, idx): study_path = self.df.iloc[idx, 0] image = pil_loader(study_path) #print(self.transform) image=self.transform(image) label = self.df.iloc[idx, 1] return (image, label)
def __getitem__(self, index): img = pil_loader(self.image_list[index]) if self.transforms is not None: img = self.transforms(img) age = self.ages[index] gender = self.genders[index] label = self.ids[index] return img, label, age, gender
def __getitem__(self, idx): image = pil_loader(self.paths[idx]) # print(self.paths[idx]) label = int(self.label_dict[self.paths[idx].split('/')[6]]) # print(image.shape) if self.transform is not None: image = self.transform(image) return image, label
def __getitem__(self, index): path = join(self.root, self.img_folder, self.image_names[index]) image = pil_loader(path) if self.transform: image = self.transform(image) return image
def __getitem__(self, index): path, label = self.samples[index] image = pil_loader(path) if self.transform is not None: image = self.transform(image) return image, label
def __getitem__(self, index): """ Retrieve a sample by index Parameters ---------- index : int Returns ------- img_a : FloatTensor Anchor image img_p : FloatTensor Positive image (same class of anchor) img_n : FloatTensor Negative image (different class of anchor) """ if not self.train: l = self.labels[index] if self.in_memory: img_a = self.data[index] else: img_a = pil_loader(self.file_names[index]) if self.transform is not None: img_a = self.transform(img_a) return img_a, l a, p, n = self.triplets[index] if self.in_memory: img_a = self.data[a] img_p = self.data[p] img_n = self.data[n] else: img_a = pil_loader(self.file_names[a]) img_p = pil_loader(self.file_names[p]) img_n = pil_loader(self.file_names[n]) if self.transform is not None: img_a = self.transform(img_a) img_p = self.transform(img_p) img_n = self.transform(img_n) return img_a, img_p, img_n
def __getitem__(self, idx): img_path = os.path.join(self._images_root, self._content[idx]) labels = np.array(self._labels_list[idx], dtype=np.int64) img = pil_loader(img_path) if self._transform is not None: img = self._transform(img) return {'image': img, 'labels': labels}
def __getitem__(self, index): p, c = self.samples[index] img = pil_loader(p) targets = self._get_target(index, p, c, img.size) if self._transform is not None: img, targets = self._transform(img, targets) return img, targets