Example #1
0
def main(is_debug):
    # configs
    dataset_dir = '/content/everybody_dance_now_pytorch/datasets/cardio_dance_512'
    pose_name = '/content/everybody_dance_now_pytorch/datasets/cardio_dance_512/poses.npy'
    ckpt_dir = '/content/everybody_dance_now_pytorch/checkpoints/dance_test_new_down2_res6'
    log_dir = '/content/everybody_dance_now_pytorch/logs/dance_test_new_down2_res6'
    batch_num = 0
    batch_size = 64

    image_folder = dataset.ImageFolderDataset(dataset_dir,
                                              cache=os.path.join(
                                                  dataset_dir, 'local.db'))
    face_dataset = dataset.FaceCropDataset(
        image_folder, pose_name, image_transforms,
        crop_size=48)  # 48 for 512-frame, 96 for HD frame
    data_loader = DataLoader(face_dataset,
                             batch_size=batch_size,
                             drop_last=True,
                             num_workers=4,
                             shuffle=True)

    generator, discriminator, batch_num = load_models(ckpt_dir, batch_num)

    if is_debug:
        trainer = Trainer(ckpt_dir,
                          log_dir,
                          face_dataset,
                          data_loader,
                          log_every=1,
                          save_every=1)
    else:
        trainer = Trainer(ckpt_dir, log_dir, face_dataset, data_loader)
    trainer.train(generator, discriminator, batch_num)
Example #2
0
def main(is_debug):
    # configs
    import os

    dataset_dir = '../data/face'
    pose_name = '../data/target/pose.npy'
    ckpt_dir = '../checkpoints/face'
    log_dir = '../checkpoints/face/logs'
    batch_num = 10
    batch_size = 10

    image_folder = dataset.ImageFolderDataset(dataset_dir,
                                              cache=os.path.join(
                                                  dataset_dir, 'local.db'))
    face_dataset = dataset.FaceCropDataset(
        image_folder, pose_name, image_transforms,
        crop_size=48)  # 48 for 512-frame, 96 for HD frame
    data_loader = DataLoader(face_dataset,
                             batch_size=batch_size,
                             drop_last=True,
                             num_workers=4,
                             shuffle=True)

    generator, discriminator, batch_num = load_models(ckpt_dir, batch_num)

    if is_debug:
        trainer = Trainer(ckpt_dir,
                          log_dir,
                          face_dataset,
                          data_loader,
                          log_every=1,
                          save_every=1)
    else:
        trainer = Trainer(ckpt_dir, log_dir, face_dataset, data_loader)
    trainer.train(generator, discriminator, batch_num)
Example #3
0
def test_face_enhancer(target_dir, source_dir, run_name):
    torch.backends.cudnn.benchmark = True
    checkpoints_dir = os.path.join(dir_name, '../../checkpoints')
    dataset_dir = os.path.join(target_dir, 'face_transfer')   # save test_sync in this folder
    pose_name = os.path.join(source_dir, 'pose_source_norm.npy') # coordinate save every heads
    ckpt_dir = os.path.join(checkpoints_dir, run_name, 'face')
    result_dir = os.path.join(dir_name, '../../results')
    save_dir = os.path.join(result_dir, run_name + '_enhanced', 'test_latest/images')
    os.makedirs(save_dir, exist_ok=True)

    image_folder = dataset.ImageFolderDataset(dataset_dir, cache=os.path.join(dataset_dir, 'local.db'), is_test=True)
    face_dataset = dataset.FaceCropDataset(image_folder, pose_name, image_transforms, crop_size=48)
    length = len(face_dataset)
    print('Picture number', length)

    generator = load_models(os.path.join(ckpt_dir))

    for i in tqdm(range(length)):
        _, fake_head, top, bottom, left, right, real_full, fake_full \
            = face_dataset.get_full_sample(i)

        with torch.no_grad():
            fake_head.unsqueeze_(0)
            fake_head = fake_head.to(device)
            residual = generator(fake_head)
            enhanced = fake_head + residual

        enhanced.squeeze_()
        enhanced = torch2numpy(enhanced)
        fake_full_old = fake_full.copy()
        fake_full[top: bottom, left: right, :] = enhanced

        b, g, r = cv2.split(fake_full)
        fake_full = cv2.merge([r, g, b])
        cv2.imwrite(os.path.join(save_dir, '{:05}_synthesized_image.png'.format(i)), fake_full)
Example #4
0
def train_face_enhancer(target_dir, run_name, is_debug):
    checkpoints_dir = os.path.join(dir_name, '../../checkpoints')
    dataset_dir = os.path.join(target_dir, 'face')
    pose_name = os.path.join(target_dir, 'pose_source.npy')
    ckpt_dir = os.path.join(checkpoints_dir, run_name, 'face')
    log_dir = os.path.join(ckpt_dir, 'logs')
    batch_num = 10
    batch_size = 10

    image_folder = dataset.ImageFolderDataset(dataset_dir, cache=os.path.join(dataset_dir, 'local.db'))
    face_dataset = dataset.FaceCropDataset(image_folder, pose_name, image_transforms, crop_size=48)  # 48 for 512-frame, 96 for HD frame
    data_loader = DataLoader(face_dataset, batch_size=batch_size,
                             drop_last=True, num_workers=4, shuffle=True)

    generator, discriminator, batch_num = load_models(ckpt_dir, batch_num)

    if is_debug:
        trainer = Trainer(ckpt_dir, log_dir, face_dataset, data_loader, log_every=1, save_every=1)
    else:
        trainer = Trainer(ckpt_dir, log_dir, face_dataset, data_loader)
    trainer.train(generator, discriminator, batch_num)
Example #5
0
def main():
    # configs
    dataset_dir = 'data/dance_test'
    pose_name = 'data/dance_test/dance_poses.npy'
    ckpt_dir = 'checkpoints/dance_test'
    log_dir = 'logs/dance_test'
    batch_num = 0
    batch_size = 64

    image_folder = dataset.ImageFolderDataset(dataset_dir,
                                              cache=os.path.join(
                                                  dataset_dir, 'local.db'))
    face_dataset = dataset.FaceCropDataset(image_folder, pose_name,
                                           image_transforms)
    data_loader = DataLoader(face_dataset,
                             batch_size=batch_size,
                             drop_last=True,
                             num_workers=0,
                             shuffle=True)

    generator, discriminator, batch_num = load_models(ckpt_dir, batch_num)

    trainer = Trainer(ckpt_dir, log_dir, face_dataset, data_loader)
    trainer.train(generator, discriminator, batch_num)
Example #6
0
if __name__ == '__main__':
    torch.backends.cudnn.benchmark = True
    dataset_dir = '../data/face'  # save test_sync in this folder
    pose_name = '../data/source/pose_source_norm.npy'  # coordinate save every heads
    ckpt_dir = '../checkpoints/face'
    result_dir = './results'
    save_dir = dataset_dir + '/full_fake/'

    if not os.path.exists(save_dir):
        print('generate %s' % save_dir)
        os.mkdir(save_dir)
    else:
        print(save_dir, 'is existing...')

    image_folder = dataset.ImageFolderDataset(dataset_dir,
                                              cache=os.path.join(
                                                  dataset_dir, 'local.db'),
                                              is_test=True)
    face_dataset = dataset.FaceCropDataset(image_folder,
                                           pose_name,
                                           image_transforms,
                                           crop_size=48)
    length = len(face_dataset)
    print('Picture number', length)

    generator = load_models(os.path.join(ckpt_dir))

    for i in tqdm(range(length)):
        _, fake_head, top, bottom, left, right, real_full, fake_full \
            = face_dataset.get_full_sample(i)

        with torch.no_grad():