예제 #1
0
    def __getitem__(self, index):
        if isinstance(index, slice):
            return [self[ii] for ii in range(*index.indices(len(self)))]
        else:
            index = index % self.size
            img1 = frame_utils.read_gen(self.image_list[index][0])
            img2 = frame_utils.read_gen(self.image_list[index][1])

            image_size = img1.shape[:2]

            cropper = StaticCenterCrop(image_size, self.render_size)
            img1 = cropper(img1)
            img2 = cropper(img2)
            
            if self.image_size:
                img1 = cv2.resize(img1, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_LINEAR)
                img2 = cv2.resize(img2, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_LINEAR)
            if self.transform:
                img1 = self.transform(img1)
                img2 = self.transform(img2)

            
            if self.stack_imgs:
                images = torch.stack((img1, img2))
            else:
                images = torch.cat((img1, img2))

            flow = frame_utils.read_gen(self.flow_list[index]).astype(np.float32)
            flow = cropper(flow)
            if self.image_size: 
                flow = resize_flow(flow, self.image_size[0], self.image_size[1])
            flow = flow.transpose(2,0,1)
            flow = torch.from_numpy(flow)

            return images, flow
예제 #2
0
    def __getitem__(self, index):
        if isinstance(index, slice):
            return [self[ii] for ii in range(*index.indices(len(self)))]
        else:
            index = index % self.size
            #preproces images

            img1 = frame_utils.read_gen(self.image_list[index][0])
            img2 = frame_utils.read_gen(self.image_list[index][1])

            image_size = img1.shape[:2]

            cropper = StaticCenterCrop(image_size, self.render_size)
            img1 = cropper(img1)
            img2 = cropper(img2)

            if self.image_size:
                img1 = cv2.resize(img1,(self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_LINEAR)
                img2 = cv2.resize(img2, (self.image_size[1], self.image_size[0]), interpolation=cv2.INTER_LINEAR)

            if self.transform:
                img1 = self.transform(img1)
                img2 = self.transform(img2)

            
            if self.stack_imgs:
                images = torch.stack((img1, img2))
            else:
                images = torch.cat((img1, img2))

            #preprocess flow 

            flow = frame_utils.read_gen(self.flow_list[index]).astype(np.float32)
            flow = cropper(flow)
            if self.image_size: 
                flow = resize_flow(flow, self.image_size[0], self.image_size[1])
            flow = flow.transpose(2,0,1)
            flow = torch.from_numpy(flow)

            #preprocess occlusion mask

            occ = frame_utils.read_gen(self.occ_list[index]).astype(np.float32)
            occ = cropper(occ)
            toTensor = transforms.ToTensor()
            occ = toTensor(occ)
            if self.image_size: 
                resize = transforms.Resize(self.image_size)
                occ = resize(occ)
            occ[occ > 0.5] = 1.0
            occ[occ != 1.0] = 0.0
            return images, flow, occ