Exemplo n.º 1
0
    def initialize(self, opt):
        self.opt = opt
        self.root = opt.dataroot
        self.input_paths = []
        self.dataset_size = 0
        self.dp = DensePose((opt.loadSize, opt.loadSize), oob_ocluded=True, naive_warp = opt.naive_warp)

        sample_folders = os.listdir(opt.dataroot)
        for folder in sample_folders:
            current_path = os.path.join(opt.dataroot, folder)
            dp_target_folders = (make_dataset(os.path.join(current_path, "dp_target")))
            target_folders = (make_dataset(os.path.join(current_path, "target")))
            dp_target_folders.sort(key=natural_keys)
            target_folders.sort(key=natural_keys)
            for j in range(0, len(target_folders)-2):
                num = range(0,j) + range(j+1, len(target_folders))
                source_index = random.choice(num)
                self.input_paths.append({'dp_target': [dp_target_folders[j]], 'target': [target_folders[j]],
                'source': target_folders[source_index], 'dp_source': dp_target_folders[source_index]})
                for k in range(1, 3):
                    self.input_paths[-1]['dp_target'].append(dp_target_folders[j+k])
                    self.input_paths[-1]['target'].append(target_folders[j+k])

                self.dataset_size += 1
    def initialize(self, opt):
        self.opt = opt
        self.root = opt.dataroot
        self.input_paths = []
        self.dp = DensePose((opt.loadSize, opt.loadSize),
                            oob_ocluded=True,
                            naive_warp=opt.naive_warp)

        if opt.transfer:

            columns = defaultdict(
                list)  # each value in each column is appended to a list

            with open(opt.transfer_file) as f:
                reader = csv.DictReader(
                    f)  # read rows into a dictionary format
                for row in reader:  # read a row as {column1: value1, column2: value2,...}
                    for (k, v) in row.items(
                    ):  # go over each column name and value
                        columns[k].append(
                            v)  # append the value into the appropriate list
                        # based on column name k
            print len(columns["source"])
            for folder_source, folder_driving in zip(columns["source"],
                                                     columns["driving"]):
                current_path_source = os.path.join(
                    opt.dataroot,
                    folder_source.split(".mp4")[0])
                current_path_driving = os.path.join(
                    opt.dataroot,
                    folder_driving.split(".mp4")[0])
                dp_target_folders_source = (make_dataset(
                    os.path.join(current_path_source, "dp_target")))
                dp_target_folders_driving = (make_dataset(
                    os.path.join(current_path_driving, "dp_target")))
                target_folders_source = (make_dataset(
                    os.path.join(current_path_source, "target")))
                dp_target_folders_source.sort(key=natural_keys)
                target_folders_source.sort(key=natural_keys)
                dp_target_folders_driving.sort(key=natural_keys)
                self.input_paths.append({
                    'dp_target':
                    dp_target_folders_driving,
                    'dp_source':
                    dp_target_folders_source[0],
                    'source':
                    target_folders_source[0],
                    'path':
                    folder_source + folder_driving
                })
            self.dataset_size = len(columns["source"])
        else:
            sample_folders = os.listdir(opt.dataroot)
            sample_folders.sort(key=natural_keys)
            for folder in sample_folders:
                current_path = os.path.join(opt.dataroot, folder)
                dp_target_folders = (make_dataset(
                    os.path.join(current_path, "dp_target")))
                target_folders = (make_dataset(
                    os.path.join(current_path, "target")))
                dp_target_folders.sort(key=natural_keys)
                target_folders.sort(key=natural_keys)
                self.input_paths.append({
                    'dp_target': dp_target_folders,
                    'dp_source': dp_target_folders[0],
                    'source': target_folders[0],
                    'path': folder
                })
            self.dataset_size = len(sample_folders)
Exemplo n.º 3
0
class Train_Dataset(BaseDataset):
    def initialize(self, opt):
        self.opt = opt
        self.root = opt.dataroot
        self.input_paths = []
        self.dataset_size = 0
        self.dp = DensePose((opt.loadSize, opt.loadSize), oob_ocluded=True, naive_warp = opt.naive_warp)

        sample_folders = os.listdir(opt.dataroot)
        for folder in sample_folders:
            current_path = os.path.join(opt.dataroot, folder)
            dp_target_folders = (make_dataset(os.path.join(current_path, "dp_target")))
            target_folders = (make_dataset(os.path.join(current_path, "target")))
            dp_target_folders.sort(key=natural_keys)
            target_folders.sort(key=natural_keys)
            for j in range(0, len(target_folders)-2):
                num = range(0,j) + range(j+1, len(target_folders))
                source_index = random.choice(num)
                self.input_paths.append({'dp_target': [dp_target_folders[j]], 'target': [target_folders[j]],
                'source': target_folders[source_index], 'dp_source': dp_target_folders[source_index]})
                for k in range(1, 3):
                    self.input_paths[-1]['dp_target'].append(dp_target_folders[j+k])
                    self.input_paths[-1]['target'].append(target_folders[j+k])

                self.dataset_size += 1


    def __getitem__(self, index):
        transform_img = get_transform(self.opt, {})
        result_dict = {'input': [], 'target': [], 'source_frame': [], 'grid': [], 'grid_source': []}
        output = {}
        output["paths"] = self.input_paths[index]
        current_paths = self.input_paths[index]


        img_source = Image.open(current_paths['source'])
        img_source_tensor = transform_img(img_source.convert('RGB'))


        img_source_dp =  Image.open(current_paths['dp_source'])
        img_source_dp = img_source_dp.convert('RGB')
        np_source_dp = np.array(img_source_dp )

        ### FIRST FRAME OUT OF TWO

        img = Image.open(current_paths['target'][0])
        img_tensor = transform_img(img.convert('RGB'))
        result_dict['target'].append(img_tensor)

        img = Image.open(current_paths['dp_target'][0])
        img_target_dp = img.convert('RGB')
        np_target_dp = np.array(img_target_dp)
        img_tensor = transform_img(img_target_dp)
        result_dict['input'].append(img_tensor)


        grid_source= self.dp.get_grid_warp(np_source_dp, np_target_dp)
        #grid_warp = grid_source[:,:,0]
        #plt.imshow(grid_warp, cmap=plt.cm.gray)
        #plt.show()
        #print grid_source.shape
        grid_source = torch.from_numpy(grid_source).float()
        grid_source = grid_source.permute(2,0,1)

        result_dict['source_frame'].append(img_source_tensor)
        result_dict['grid'].append(grid_source)
        result_dict['grid_source'].append(grid_source)


        #### SECOND FRAME OUT OF TWO

        img_2 = Image.open(current_paths['target'][1])
        img_tensor_2 = transform_img(img_2.convert('RGB'))
        result_dict['target'].append(img_tensor_2)

        img_2 = Image.open(current_paths['dp_target'][1])
        img_target_dp_2 = img_2.convert('RGB')
        np_target_dp_2 = np.array(img_target_dp_2)
        img_tensor_2 = transform_img(img_target_dp_2)
        result_dict['input'].append(img_tensor_2)

        grid_source= self.dp.get_grid_warp(np_source_dp, np_target_dp_2)
        grid_source = torch.from_numpy(grid_source).float()
        grid_source = grid_source.permute(2,0,1)

        grid = self.dp.get_grid_warp(np_target_dp, np_target_dp_2)
        grid = torch.from_numpy(grid).float()
        grid = grid.permute(2,0,1)


        result_dict['source_frame'].append(img_source_tensor)
        result_dict['grid'].append(grid)
        result_dict['grid_source'].append(grid_source)



        #### SECOND FRAME OUT OF TWO

        img_3 = Image.open(current_paths['target'][2])
        img_tensor_3 = transform_img(img_3.convert('RGB'))
        result_dict['target'].append(img_tensor_3)

        img_3 = Image.open(current_paths['dp_target'][2])
        img_target_dp_3 = img_3.convert('RGB')
        np_target_dp_3 = np.array(img_target_dp_3)
        img_tensor_3 = transform_img(img_target_dp_3)
        result_dict['input'].append(img_tensor_3)

        grid_source= self.dp.get_grid_warp(np_source_dp, np_target_dp_3)
        grid_source = torch.from_numpy(grid_source).float()
        grid_source = grid_source.permute(2,0,1)

        grid = self.dp.get_grid_warp(np_target_dp_2, np_target_dp_3)
        grid = torch.from_numpy(grid).float()
        grid = grid.permute(2,0,1)


        result_dict['source_frame'].append(img_source_tensor)
        result_dict['grid'].append(grid)
        result_dict['grid_source'].append(grid_source)



        for key, value in result_dict.iteritems():
            output[key] = torch.stack(value, dim = 0)

        return output

    def __len__(self):
        return self.dataset_size // self.opt.batchSize * self.opt.batchSize

    def name(self):
        return 'Train_Dataset'
class Video_Test_Dataset_Runtime(BaseDataset):
    def initialize(self, opt):
        self.opt = opt
        self.root = opt.dataroot
        self.input_paths = []
        self.dp = DensePose((opt.loadSize, opt.loadSize),
                            oob_ocluded=True,
                            naive_warp=opt.naive_warp)

        if opt.transfer:

            columns = defaultdict(
                list)  # each value in each column is appended to a list

            with open(opt.transfer_file) as f:
                reader = csv.DictReader(
                    f)  # read rows into a dictionary format
                for row in reader:  # read a row as {column1: value1, column2: value2,...}
                    for (k, v) in row.items(
                    ):  # go over each column name and value
                        columns[k].append(
                            v)  # append the value into the appropriate list
                        # based on column name k
            print len(columns["source"])
            for folder_source, folder_driving in zip(columns["source"],
                                                     columns["driving"]):
                current_path_source = os.path.join(
                    opt.dataroot,
                    folder_source.split(".mp4")[0])
                current_path_driving = os.path.join(
                    opt.dataroot,
                    folder_driving.split(".mp4")[0])
                dp_target_folders_source = (make_dataset(
                    os.path.join(current_path_source, "dp_target")))
                dp_target_folders_driving = (make_dataset(
                    os.path.join(current_path_driving, "dp_target")))
                target_folders_source = (make_dataset(
                    os.path.join(current_path_source, "target")))
                dp_target_folders_source.sort(key=natural_keys)
                target_folders_source.sort(key=natural_keys)
                dp_target_folders_driving.sort(key=natural_keys)
                self.input_paths.append({
                    'dp_target':
                    dp_target_folders_driving,
                    'dp_source':
                    dp_target_folders_source[0],
                    'source':
                    target_folders_source[0],
                    'path':
                    folder_source + folder_driving
                })
            self.dataset_size = len(columns["source"])
        else:
            sample_folders = os.listdir(opt.dataroot)
            sample_folders.sort(key=natural_keys)
            for folder in sample_folders:
                current_path = os.path.join(opt.dataroot, folder)
                dp_target_folders = (make_dataset(
                    os.path.join(current_path, "dp_target")))
                target_folders = (make_dataset(
                    os.path.join(current_path, "target")))
                dp_target_folders.sort(key=natural_keys)
                target_folders.sort(key=natural_keys)
                self.input_paths.append({
                    'dp_target': dp_target_folders,
                    'dp_source': dp_target_folders[0],
                    'source': target_folders[0],
                    'path': folder
                })
            self.dataset_size = len(sample_folders)

    def __getitem__(self, index):
        dp_target_video = []
        texture_video = []
        current_paths = self.input_paths[index]

        for dp_target_path in current_paths['dp_target']:
            img = Image.open(dp_target_path)
            params = get_params(self.opt, img.size)
            transform_img = get_transform(self.opt, params)
            img_tensor = transform_img(img.convert('RGB'))
            dp_target_video.append(img_tensor)

        dp_target = torch.stack(dp_target_video, 0)

        dp_source = Image.open(current_paths['dp_source'])
        params = get_params(self.opt, dp_source.size)
        transform_img = get_transform(self.opt, params)
        dp_source_tensor = transform_img(dp_source.convert('RGB'))

        source = Image.open(current_paths['source'])
        params = get_params(self.opt, source.size)
        transform_img = get_transform(self.opt, params)
        source_tensor = transform_img(source.convert('RGB'))

        grid_source_tensors = []
        img_source_dp = Image.open(current_paths['dp_source'])
        img_source_dp = img_source_dp.convert('RGB')
        np_source_dp = np.array(img_source_dp)
        for dp_target_path in current_paths['dp_target']:
            img = Image.open(dp_target_path)
            img_target_dp = img.convert('RGB')
            img_target_dp = np.array(img_target_dp)
            grid_source = self.dp.get_grid_warp(np_source_dp, img_target_dp)
            grid_source = torch.from_numpy(grid_source).float()
            grid_source = grid_source.permute(2, 0, 1)
            grid_source_tensors.append(grid_source)

        grid_source_tensor = torch.stack(grid_source_tensors, 0)

        grid_tensors = []
        for i in xrange(len(current_paths['dp_target']) - 1):
            previous_frame, current_frame = current_paths['dp_target'][
                i], current_paths['dp_target'][i + 1]
            previous_frame = Image.open(previous_frame)
            previous_frame = previous_frame.convert('RGB')
            np_previous_frame = np.array(previous_frame)

            current_frame = Image.open(current_frame)
            current_frame = current_frame.convert('RGB')
            np_current_frame = np.array(current_frame)

            grid = self.dp.get_grid_warp(np_previous_frame, np_current_frame)
            grid = torch.from_numpy(grid).float()
            grid = grid.permute(2, 0, 1)
            grid_tensors.append(grid)

        grid_tensor = torch.stack(grid_tensors, 0)

        input_dict = {
            'dp_target': dp_target,
            'dp_source': dp_source_tensor,
            'source_frame': source_tensor,
            'grid': grid_tensor,
            'grid_source': grid_source_tensor,
            'path': current_paths['path']
        }

        return input_dict

    def __len__(self):
        return self.dataset_size // self.opt.batchSize * self.opt.batchSize

    def name(self):
        return 'Video_Test_Dataset_Runtime'