def __getitem__(self, index): dir_path_name = self.dir_filenames[index] path_list = os.listdir(dir_path_name) path_list.sort() name = path_list[0].split('.')[0] path_csv = join(dir_path_name, path_list[0]) pd_data = pd.read_csv(path_csv).Exposure label = np.array(pd_data) label = torch.from_numpy(label).float() path_list = [ join(dir_path_name, x) for x in path_list if is_image_file(x) ] data_init = Image.open(path_list[0]) data_init = data_init.resize((1024, 1024)) if self.transform: data_init = self.transform(data_init) name = int(name) for i in range(len(path_list) - 1): data = Image.open(path_list[i + 1]) data = data.resize((1024, 1024)) if self.transform: data = self.transform(data) data_init = torch.cat((data_init, data), 0) return data_init, label, name
def __getitem__(self, index): dir_path_name = self.dir_filenames[index] path_list = os.listdir(dir_path_name) path_list.sort() name = path_list[0].split('.')[0].split('_')[1] path_list = [ join(dir_path_name, x) for x in path_list if is_image_file(x) ] data_init = Image.open(path_list[0]) data_init = data_init.resize((4032, 3024)) if self.transform: data_init = self.transform(data_init) name = int(name) for i in range(len(path_list) - 1): data = Image.open(path_list[i + 1]) data = data.resize((4032, 3024)) if self.transform: data = self.transform(data) data_init = torch.cat((data_init, data), 0) return data_init, name
def __getitem__(self, index): sample = self.imgs[index] splits = sample.split() img_path = splits[0] data = Image.open(img_path) data = data.resize((224, 224)) #data = data.convert('L') data = self.transforms(data) label = np.int32(splits[1]) return data.float(), label
def __getitem__(self, index): img_name = self.fileList[index] data = PIL.Image.open('%s/%s' % (self.path, img_name)) height = 32 width = int(data.size[0] / (data.size[1] / height)) data = data.resize((width, height)) if self.transform is not None: data = self.transform(data) label = re.search('([0-9_]+)', img_name).group(1) label = re.sub('\D', '', label) return data, label
def img_open(path): data = PIL.Image.open(path) height = 32 width = int(data.size[0] / (data.size[1] / height)) data = data.resize((width, height)) Transform = transforms.Compose([ transforms.Grayscale(), transforms.ToTensor(), transforms.Lambda(lambda x: torch.unsqueeze(x, 0)) ]) data = Transform(data) return data
def __getitem__(self, index): data_path = self.seq_list[index] imgs = [] for img in data_path: data = Image.open(img) data = data.resize(self.size, Image.ANTIALIAS) data = (np.asarray(data) / 255.0) imgs.append(torch.from_numpy(data).float()) imgs = torch.stack(imgs) return imgs.permute(0, 3, 1, 2)
def __getitem__(self, index): sample = self.imgs[index] race_list = ['African', 'Caucasian', 'Asian', 'Indian'] splits = sample.split() img_path = splits[0] race = race_list.index(img_path.split('/')[-3]) data = Image.open(img_path) data = np.array(data.resize((128, 128))) #data = data.convert('L') data = self.transforms(data) data = np.transpose(data, (2, 0, 1)) label = np.int32(race) return torch.from_numpy(data).float(), label
def __getitem__(self, index): img_path = self.train_data[index][0] i = self.train_data[index][1] id = self.train_data[index][2] cam = self.train_data[index][3] label = np.asarray(self.train_attr[id]) data = Image.open(img_path) data = data.resize((64, 64)) #data=self.validate_image(data) #data = self.transforms(data) data = np.array(data, dtype=float) data = torch.FloatTensor(data) data = data.sub_(127.5).div_(127.5) name = self.train_data[index][4] return data, i, label, id, cam, name
def get_cla_sample(job): image, x, y, z, dx, dy, size = job if max(dx,dy) <=size+1: r = size / 2 data = image[y - r:y + r, x - r:x + r, z - r:z + r].copy()#涉及浅拷贝问题:无法修改 if data.shape != (size, size, size): data.resize((size,size,size)) else: r = max(dx,dy)/2 data = image[y - r:y + r, x - r:x + r, z - r:z + r].copy() if data.shape != (2*r,2*r,2*r): data.resize((2*r,2*r,2*r)) data.resize((size,size,size)) return data