Example #1
0
def main(args):
    print(args)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)

    if args.mode == "train":
        args.img_datatype = find_img_datatype(args.train_img_dir)
    elif args.mode == "sample":
        args.img_datatype = find_img_datatype(args.src_dir)

    solver = Solver(args)

    if args.mode == 'train':
        assert len(subdirs(args.train_img_dir)) == args.num_domains
        assert len(subdirs(args.val_img_dir)) == args.num_domains
        loaders = Munch(src=get_train_loader(root=args.train_img_dir,
                                             which='source',
                                             img_type=args.img_datatype,
                                             img_size=args.img_size,
                                             batch_size=args.batch_size,
                                             prob=args.randcrop_prob,
                                             num_workers=args.num_workers),
                        ref=get_train_loader(root=args.train_img_dir,
                                             which='reference',
                                             img_type=args.img_datatype,
                                             img_size=args.img_size,
                                             batch_size=args.batch_size,
                                             prob=args.randcrop_prob,
                                             num_workers=args.num_workers),
                        val=get_test_loader(root=args.val_img_dir,
                                            img_type=args.img_datatype,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers))
        solver.train(loaders)
    elif args.mode == 'sample':
        assert len(subdirs(args.src_dir)) == args.num_domains
        assert len(subdirs(args.ref_dir)) == args.num_domains
        loaders = Munch(src=get_test_loader(root=args.src_dir,
                                            img_type=args.img_datatype,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers),
                        ref=get_test_loader(root=args.ref_dir,
                                            img_type=args.img_datatype,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers))
        solver.sample(loaders)
    elif args.mode == 'eval':
        solver.evaluate()
    elif args.mode == 'align':
        from core.wing import align_faces
        align_faces(args, args.inp_dir, args.out_dir)
    else:
        raise NotImplementedError
Example #2
0
def main(_):
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.4)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        if cfg.env_type == 'simple':
            env = SimpleGymEnvironment(cfg)
        else:
            env = GymEnvironment(cfg)

        if not os.path.exists('/tmp/model_dir'):
            os.mkdir('/tmp/model_dir')

        solver = Solver(cfg, env, sess, '/tmp/model_dir')

        solver.train()
Example #3
0
def main(args):
    print(args)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)

    solver = Solver(args)

    if args.mode == 'train':
        sintel_path = "/home/tomstrident/datasets/"
        video_id = "temple_2"
        test_loader = getTestDatasetLoader(sintel_path, video_id)
        train_loader, eval_loader = get_loaderFC2(
            args.data_dir, args.style_dir, args.temp_dir, args.batch_size,
            args.num_workers, args.num_domains, args.mode)
        print("start training ...")
        print("args.num_domains:", args.num_domains)
        solver.train([train_loader, test_loader])
    elif args.mode == 'sample':
        assert len(subdirs(args.src_dir)) == args.num_domains
        assert len(subdirs(args.ref_dir)) == args.num_domains
        loaders = Munch(src=get_test_loader(root=args.src_dir,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers),
                        ref=get_test_loader(root=args.ref_dir,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers))
        solver.sample(loaders)
    elif args.mode == 'eval':
        _, eval_loader = get_loaderFC2(args.data_dir, args.style_dir,
                                       args.temp_dir, args.batch_size,
                                       args.num_workers, args.num_domains,
                                       args.mode)
        print("len(eval_loader)", len(eval_loader))
        solver.evaluate(loader=eval_loader)
        #solver.eval_sintel()
    elif args.mode == 'align':
        from core.wing import align_faces
        align_faces(args, args.inp_dir, args.out_dir)
    else:
        raise NotImplementedError
Example #4
0
def main(args):
    print(args)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)

    solver = Solver(args)
    if args.mode == 'train':
        loaders = Munch(src=get_train_loader(root=args.train_img_dir,
                                             which='source',
                                             img_size=args.img_size,
                                             batch_size=args.batch_size,
                                             prob=args.randcrop_prob,
                                             num_workers=args.num_workers,
                                             dataset_dir=args.dataset_dir),
                        ref=get_train_loader(root=args.train_img_dir,
                                             which='reference',
                                             img_size=args.img_size,
                                             batch_size=args.batch_size,
                                             prob=args.randcrop_prob,
                                             num_workers=args.num_workers,
                                             dataset_dir=args.dataset_dir),
                        val=get_val_loader(root=args.val_img_dir,
                                           img_size=args.img_size,
                                           batch_size=args.val_batch_size,
                                           shuffle=True,
                                           num_workers=args.num_workers,
                                           dataset_dir=args.dataset_dir))
        solver.train(loaders)

    elif args.mode == 'sample':  ## 'styling_ref' 모드에 맞게 hyun 추가
        solver.sample()

        # hyun 추가
        #parsing(respth='./results/label/src', dspth= os.path.join(args.src_dir,trg_domain)) # parsing src_image
        #parsing(respth='./results/label/others', dspth= os.path.join(args.result_dir,trg_domain) # parsing fake_image
        #reconstruct() # 'styling' 모드

    elif args.mode == 'eval':
        fid_values, fid_mean = solver.evaluate()
        for key, value in fid_values.items():
            print(key, value)
    else:
        raise NotImplementedError
Example #5
0
def train_net(cfg):
    net = ReconstructionNet(cfg)
    print('Network definition:')
    print(inspect.getsource(ReconstructionNet.network_definition))

    # Generate the solver
    solver = Solver(cfg, net)

    # Prefetching data processes
    #
    # Create worker and data queue for data processing. For training data, use
    # multiple processes to speed up the loading. For validation data, use 1
    # since the queue will be popped every TRAIN.NUM_VALIDATION_ITERATIONS.
    global train_queue, val_queue, train_processes, val_processes
    train_queue = Queue(cfg.TRAIN.QUEUE_SIZE)
    val_queue = Queue(cfg.TRAIN.QUEUE_SIZE)

    train_processes = make_processes(cfg,
                                     train_queue,
                                     category_model_id_pair(
                                         cfg.DIR.DATASET_TAXONOMY_FILE_PATH,
                                         cfg.DIR.DATASET_QUERY_PATH,
                                         cfg.TRAIN.DATASET_PORTION),
                                     cfg.TRAIN.NUM_WORKER,
                                     repeat=True)
    val_processes = make_processes(cfg,
                                   val_queue,
                                   category_model_id_pair(
                                       cfg.DIR.DATASET_TAXONOMY_FILE_PATH,
                                       cfg.DIR.DATASET_QUERY_PATH,
                                       cfg.TEST.DATASET_PORTION),
                                   1,
                                   repeat=True,
                                   train=False)

    # Train the network
    solver.train(train_queue, val_queue)

    # Cleanup the processes and the queue.
    kill_processes(train_queue, train_processes)
    kill_processes(val_queue, val_processes)
Example #6
0
def main(config):
    # For fast training.
    cudnn.benchmark = True

    # Create directories if not exist.
    if not os.path.exists(config.log_dir):
        os.makedirs(config.log_dir)
    if not os.path.exists(config.model_save_dir):
        os.makedirs(config.model_save_dir)
    if not os.path.exists(config.sample_dir):
        os.makedirs(config.sample_dir)
    if not os.path.exists(config.result_dir):
        os.makedirs(config.result_dir)

    # Data loader.
    celeba_loader = None
    rafd_loader = None

    if config.dataset in ['CelebA', 'Both']:
        celeba_loader = get_loader(config.celeba_image_dir, config.attr_path, config.selected_attrs,
                                   config.celeba_crop_size, config.image_size, config.batch_size,
                                   'CelebA', config.mode, config.num_workers)
    if config.dataset in ['RaFD', 'Both']:
        rafd_loader = get_loader(config.rafd_image_dir, None, None,
                                 config.rafd_crop_size, config.image_size, config.batch_size,
                                 'RaFD', config.mode, config.num_workers)
    

    # Solver for training and testing StarGAN.
    solver = Solver(celeba_loader, rafd_loader, config)

    if config.mode == 'train':
        if config.dataset in ['CelebA', 'RaFD']:
            solver.train()
        elif config.dataset in ['Both']:
            solver.train_multi()
    elif config.mode == 'test':
        if config.dataset in ['CelebA', 'RaFD']:
            solver.test()
        elif config.dataset in ['Both']:
            solver.test_multi()
Example #7
0
def main(args):
    print(args)
    cudnn.benchmark = True
    if args.mode == 'train':
        torch.manual_seed(args.seed)

    solver = Solver(args)

    transform = transforms.Compose([
        transforms.Resize([args.img_size, args.img_size]),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
    ])

    if args.mode == 'train':
        assert len(subdirs(args.train_img_dir)) == args.num_domains
        assert len(subdirs(args.val_img_dir)) == args.num_domains
        if args.resume_iter > 0:
            solver._load_checkpoint(args.resume_iter)
        loaders = Munch(src=get_train_loader(root=args.train_img_dir,
                                             which='source',
                                             img_size=args.img_size,
                                             batch_size=args.batch_size,
                                             prob=args.randcrop_prob,
                                             num_workers=args.num_workers),
                        ref=get_train_loader(root=args.train_img_dir,
                                             which='reference',
                                             img_size=args.img_size,
                                             batch_size=args.batch_size,
                                             prob=args.randcrop_prob,
                                             num_workers=args.num_workers),
                        val=get_test_loader(root=args.val_img_dir,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=True,
                                            num_workers=args.num_workers))
        solver.train(loaders)
    elif args.mode == 'eval':
        solver.evaluate()

    elif args.mode == 'align':
        from core.wing import align_faces
        align_faces(args, args.inp_dir, args.out_dir)

    elif args.mode == 'inter':  # interpolation
        save_dir = args.save_dir
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)

        solver._load_checkpoint(args.resume_iter)
        nets_ema = solver.nets_ema
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        image_name = os.path.basename(args.input)
        image = Variable(
            transform(Image.open(
                args.input).convert('RGB')).unsqueeze(0).to(device))
        masks = nets_ema.fan.get_heatmap(image) if args.w_hpf > 0 else None
        y1 = torch.tensor([args.y1]).long().cuda()
        y2 = torch.tensor([args.y2]).long().cuda()
        outputs = interpolations(nets_ema,
                                 args.latent_dim,
                                 image,
                                 masks,
                                 lerp_step=0.1,
                                 y1=y1,
                                 y2=y2,
                                 lerp_mode=args.lerp_mode)
        path = os.path.join(save_dir, image_name)
        vutils.save_image(outputs.data, path, padding=0)

    elif args.mode == 'test':
        save_dir = args.save_dir
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)

        solver._load_checkpoint(args.resume_iter)
        nets_ema = solver.nets_ema

        image_name = os.path.basename(args.input)
        image = Variable(
            transform(Image.open(
                args.input).convert('RGB')).unsqueeze(0)).cuda()
        masks = nets_ema.fan.get_heatmap(image) if args.w_hpf > 0 else None

        image_ref = None
        if args.test_mode == 'reference':
            image_ref = Variable(
                transform(Image.open(
                    args.input_ref).convert("RGB")).unsqueeze(0)).cuda()

        fake = test_single(nets_ema, image, masks, args.latent_dim, image_ref,
                           args.target_domain, args.single_mode)
        fake = torch.clamp(fake * 0.5 + 0.5, 0, 1)
        path = os.path.join(save_dir, image_name)
        vutils.save_image(fake.data, path, padding=0)

    elif args.mode == 'video':
        save_dir = args.save_dir
        if not os.path.exists(save_dir):
            os.mkdir(save_dir)

        solver._load_checkpoint(args.resume_iter)
        nets_ema = solver.nets_ema

        image_name = os.path.basename(args.input)
        image = Variable(
            transform(Image.open(
                args.input).convert('RGB')).unsqueeze(0)).cuda()
        masks = nets_ema.fan.get_heatmap(image) if args.w_hpf > 0 else None

        y1 = torch.tensor([args.y1]).long().cuda()
        y2 = torch.tensor([args.y2]).long().cuda()
        outputs = interpolations_loop(nets_ema,
                                      args.latent_dim,
                                      image,
                                      masks,
                                      lerp_step=0.02,
                                      y1=y1,
                                      y2=y2,
                                      lerp_mode=args.lerp_mode)
        outputs = torch.cat(outputs)
        outputs = tensor2ndarray255(outputs)
        path = os.path.join(save_dir, '{}-video.mp4'.format(image_name))
        save_video(path, outputs)

    else:
        raise NotImplementedError
Example #8
0
def main(args):
    print(args)
    cudnn.benchmark = True
    torch.manual_seed(args.seed)

    solver = Solver(args)

    #create csv file
    with open(args.loss_csv_path, 'wb') as csvfile:
        filewriter = csv.writer(csvfile,
                                delimiter=',',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)

    with open(args.loss_csv_path, 'a') as file:
        writer = csv.writer(file)
        writer.writerow([
            "epoch", "d_loss_z_trg", "d_loss_x_ref", "g_loss_z_trg",
            "g_loss_x_ref"
        ])

    if args.mode == 'train':
        assert len(subdirs(args.train_img_dir)) == args.num_domains
        assert len(subdirs(args.val_img_dir)) == args.num_domains
        loaders = Munch(src=get_train_loader(root=args.train_img_dir,
                                             which='source',
                                             img_size=args.img_size,
                                             batch_size=args.batch_size,
                                             prob=args.randcrop_prob,
                                             num_workers=args.num_workers),
                        src_skt=get_train_loader(
                            root=args.train_sketch_img_dir,
                            which='source',
                            img_size=args.img_size,
                            batch_size=args.batch_size,
                            prob=args.randcrop_prob,
                            num_workers=args.num_workers),
                        ref=get_train_loader(root=args.train_img_dir,
                                             which='reference',
                                             img_size=args.img_size,
                                             batch_size=args.batch_size,
                                             prob=args.randcrop_prob,
                                             num_workers=args.num_workers),
                        val=get_test_loader(root=args.val_img_dir,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=True,
                                            num_workers=args.num_workers))
        solver.train(loaders)
    elif args.mode == 'sample':
        assert len(subdirs(args.src_dir)) == args.num_domains
        assert len(subdirs(args.ref_dir)) == args.num_domains
        loaders = Munch(src=get_test_loader(root=args.src_dir,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=True,
                                            num_workers=args.num_workers),
                        ref=get_test_loader(root=args.ref_dir,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=True,
                                            num_workers=args.num_workers))
        solver.sample(loaders)
    elif args.mode == 'eval':
        solver.evaluate()
    elif args.mode == 'align':
        from core.wing import align_faces
        align_faces(args, args.inp_dir, args.out_dir)
    else:
        raise NotImplementedError
Example #9
0
def main(args):
    print(args)
    #wandb.init(project="stargan", entity="stacey", config=args, name=args.model_name)
    #cfg = wandb.config
    #cfg.update({"dataset" : "afhq", "type" : "train"})
    cudnn.benchmark = True
    torch.manual_seed(args.seed)

    solver = Solver(args)

    if args.mode == 'train':
        assert len(subdirs(args.train_img_dir)) == args.num_domains
        assert len(subdirs(args.val_img_dir)) == args.num_domains
        loaders = Munch(src=get_train_loader(root=args.train_img_dir,
                                             which='source',
                                             img_size=args.img_size,
                                             batch_size=args.batch_size,
                                             prob=args.randcrop_prob,
                                             num_workers=args.num_workers),
                        ref=get_train_loader(root=args.train_img_dir,
                                             which='reference',
                                             img_size=args.img_size,
                                             batch_size=args.batch_size,
                                             prob=args.randcrop_prob,
                                             num_workers=args.num_workers),
                        val=get_test_loader(root=args.val_img_dir,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=True,
                                            num_workers=args.num_workers))
        solver.train(loaders)
    elif args.mode == 'sample':
        assert len(subdirs(args.src_dir)) == args.num_domains
        assert len(subdirs(args.ref_dir)) == args.num_domains
        loaders = Munch(src=get_test_loader(root=args.src_dir,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers),
                        ref=get_test_loader(root=args.ref_dir,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers))
        solver.sample(loaders)
    elif args.mode == 'eval':
        solver.evaluate(args)
    elif args.mode == 'align':
        from core.wing import align_faces
        align_faces(args, args.inp_dir, args.out_dir)
    elif args.mode == 'custom':
        # override some default arguments
        wandb.init(project="stargan", config=args, name=args.model_name)
        # src or ref may each be a dir or an image
        # make temporary folders for images
        if os.path.isfile(args.custom_src):
            src_dir = "tmp_src"
            full_src = src_dir + "/src"
            if os.path.exists(src_dir):
                shutil.rmtree(src_dir)
            os.makedirs(full_src)
            shutil.copy2(args.custom_src, full_src)
            src_images = src_dir
        else:
            src_images = args.custom_src
        if os.path.isfile(args.custom_ref):
            ref_dir = "tmp_ref"
            full_ref = ref_dir + "/ref"
            if os.path.exists(ref_dir):
                shutil.rmtree(ref_dir)
            os.makedirs(full_ref)
            shutil.copy2(args.custom_ref, full_ref)
            if args.extend_domain:
                # make some extra domains
                for d in [ref_dir + "/ref2", ref_dir + "/ref3"]:
                    os.makedirs(d)
                    shutil.copy2(args.custom_ref, d)
            ref_images = ref_dir
        else:
            ref_images = args.custom_ref
        loaders = Munch(src=get_test_loader(root=src_images,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers),
                        ref=get_test_loader(root=ref_images,
                                            img_size=args.img_size,
                                            batch_size=args.val_batch_size,
                                            shuffle=False,
                                            num_workers=args.num_workers))
        solver.custom(loaders)
    else:
        raise NotImplementedError