def __getitem__(self, index): left = self.left[index] right = self.right[index] disp_L = self.disp_L[index] left_img = self.loader(left) right_img = self.loader(right) dataL = self.dploader(disp_L) if self.training: w, h = left_img.size th, tw = 256, 512 x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) left_img = left_img.crop((x1, y1, x1 + tw, y1 + th)) right_img = right_img.crop((x1, y1, x1 + tw, y1 + th)) dataL = dataL[y1:y1 + th, x1:x1 + tw] processed = preprocess.get_transform(augment=False) left_img = processed(left_img) right_img = processed(right_img) else: w, h = left_img.size # left_img = left_img.crop((w - 1232, h - 368, w, h)) # right_img = right_img.crop((w - 1232, h - 368, w, h)) left_img = left_img.crop((w - 1200, h - 352, w, h)) right_img = right_img.crop((w - 1200, h - 352, w, h)) w1, h1 = left_img.size # dataL1 = dataL[h - 368:h, w - 1232:w] dataL = dataL[h - 352:h, w - 1200:w] processed = preprocess.get_transform(augment=False) left_img = processed(left_img) right_img = processed(right_img) dataL = torch.from_numpy(dataL).float() return left_img, right_img, dataL
def __getitem__(self, index): left = self.left[index] right = self.right[index] disp_L = self.disp_L[index] left_img = self.loader(left) right_img = self.loader(right) dataL = self.dploader(disp_L) if self.training: w, h = left_img.size th, tw = 256, 512 x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) left_img = left_img.crop((x1, y1, x1 + tw, y1 + th)) right_img = right_img.crop((x1, y1, x1 + tw, y1 + th)) dataL = np.ascontiguousarray(dataL, dtype=np.float32) / 256 dataL = dataL[y1:y1 + th, x1:x1 + tw] processed = preprocess.get_transform(augment=False) left_img = processed(left_img) right_img = processed(right_img) return left_img, right_img, dataL else: w, h = left_img.size left_img = left_img.crop((w - 1232, h - 368, w, h)) right_img = right_img.crop((w - 1232, h - 368, w, h)) w1, h1 = left_img.size dataL = dataL.crop((w - 1232, h - 368, w, h)) dataL = np.ascontiguousarray(dataL, dtype=np.float32) / 256 processed = preprocess.get_transform(augment=False) left_img = processed(left_img) right_img = processed(right_img) return left_img, right_img, dataL
def __getitem__(self, index): left = self.left[index] right = self.right[index] disp_L = self.disp_L[index] left_img = self.loader(left) right_img = self.loader(right) dataL = self.dploader(disp_L) if self.training: w, h = left_img.size th, tw = 256, 768 x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) left_img = left_img.crop((x1, y1, x1 + tw, y1 + th)) right_img = right_img.crop((x1, y1, x1 + tw, y1 + th)) dataL = dataL[y1:y1 + th, x1:x1 + tw] processed = preprocess.get_transform(augment=False) left_img = processed(left_img) right_img = processed(right_img) else: w, h = left_img.size th, tw = 512, 1024 x1 = random.randint(0, w - tw) y1 = random.randint(0, h - th) left_img = left_img.crop((x1, y1, x1 + tw, y1 + th)) right_img = right_img.crop((x1, y1, x1 + tw, y1 + th)) dataL = dataL[y1:y1 + th, x1:x1 + tw] processed = preprocess.get_transform(augment=False) left_img = processed(left_img) right_img = processed(right_img) dataL = torch.from_numpy(dataL).float() return left_img, right_img, dataL