Esempio n. 1
0
def main():
    allow_memory_growth()

    checkpoints = [
        {
            'res': 1024,
            'ckpt_dir': './official-converted/cuda',
            'use_custom_cuda': True,
            'out_fn': None,
        },
        {
            'res': 256,
            'ckpt_dir':
            '/mnt/vision-nas/moono/trained_models/stylegan2-tf-2.x/gold/stylegan2-ffhq-256x256',
            'use_custom_cuda': True,
            'out_fn': 'out_256x256_0.png',
        },
    ]

    for run_item in checkpoints:
        res = run_item['res']
        ckpt_dir = run_item['ckpt_dir']
        use_custom_cuda = run_item['use_custom_cuda']
        out_fn = run_item['out_fn']
        message = f'{res}x{res} with custom cuda' if use_custom_cuda else f'{res}x{res} without custom cuda'

        print(message)
        inference(ckpt_dir, use_custom_cuda, res, out_fn)
    return
Esempio n. 2
0
def main():
    from tf_utils import allow_memory_growth

    allow_memory_growth()

    # common variables
    ckpt_dir_base = './official-converted'

    # saving phase
    for use_custom_cuda in [True, False]:
        ckpt_dir = os.path.join(ckpt_dir_base, 'cuda') if use_custom_cuda else os.path.join(ckpt_dir_base, 'ref')
        convert_official_weights_together(ckpt_dir, use_custom_cuda)

    # inference phase
    ckpt_dir_cuda = os.path.join(ckpt_dir_base, 'cuda')
    ckpt_dir_ref = os.path.join(ckpt_dir_base, 'ref')

    # 1. inference cuda saved weight from cuda model
    test_generator(ckpt_dir_cuda, use_custom_cuda=True, out_fn='from-cuda-to-cuda.png')

    # 2. inference cuda saved weight from ref model
    test_generator(ckpt_dir_cuda, use_custom_cuda=False, out_fn='from-cuda-to-ref.png')

    # 3. inference ref saved weight from ref model
    test_generator(ckpt_dir_ref, use_custom_cuda=False, out_fn='from-ref-to-ref.png')

    # 4. inference ref saved weight from cuda model
    test_generator(ckpt_dir_ref, use_custom_cuda=True, out_fn='from-ref-to-cuda.png')
    return
Esempio n. 3
0
def main():
    from tf_utils import allow_memory_growth

    allow_memory_growth()

    ckpt_dir_base = './official-converted'
    for use_custom_cuda in [True, False]:
        ckpt_dir = os.path.join(ckpt_dir_base,
                                'cuda') if use_custom_cuda else os.path.join(
                                    ckpt_dir_base, 'ref')

        convert_official_weights_together(ckpt_dir, use_custom_cuda)

        # convert_official_discriminator_weights(ckpt_dir, use_custom_cuda)
        # for is_g_clone in [True, False]:
        #     convert_official_generator_weights(ckpt_dir, is_g_clone, use_custom_cuda)
    return
Esempio n. 4
0
def main():
    # global program arguments parser
    parser = argparse.ArgumentParser(description='')
    parser.add_argument('--allow_memory_growth', type=str_to_bool, nargs='?', const=True, default=True)
    parser.add_argument('--debug_split_gpu', type=str_to_bool, nargs='?', const=True, default=False)
    parser.add_argument('--use_tf_function', type=str_to_bool, nargs='?', const=True, default=True)
    parser.add_argument('--use_custom_cuda', type=str_to_bool, nargs='?', const=True, default=True)
    parser.add_argument('--model_base_dir', default='./models', type=str)
    parser.add_argument('--tfrecord_dir', default='./tfrecords', type=str)
    parser.add_argument('--train_res', default=256, type=int)
    parser.add_argument('--shuffle_buffer_size', default=1000, type=int)
    parser.add_argument('--batch_size_per_replica', default=4, type=int)
    args = vars(parser.parse_args())

    # check tensorflow version
    cur_tf_ver = check_tf_version()

    # GPU environment settings
    if args['allow_memory_growth']:
        allow_memory_growth()
    if args['debug_split_gpu']:
        split_gpu_for_testing(mem_in_gb=4.5)

    # network params
    resolutions = [4, 8, 16, 32, 64, 128, 256, 512, 1024]
    featuremaps = [512, 512, 512, 512, 512, 256, 128, 64, 32]
    train_resolutions, train_featuremaps = filter_resolutions_featuremaps(resolutions, featuremaps, args['train_res'])
    g_params = {
        'z_dim': 512,
        'w_dim': 512,
        'labels_dim': 0,
        'n_mapping': 8,
        'resolutions': train_resolutions,
        'featuremaps': train_featuremaps,
    }
    d_params = {
        'labels_dim': 0,
        'resolutions': train_resolutions,
        'featuremaps': train_featuremaps,
    }

    # prepare distribute strategy
    strategy = tf.distribute.MirroredStrategy()
    global_batch_size = args['batch_size_per_replica'] * strategy.num_replicas_in_sync

    # prepare dataset
    dataset = get_ffhq_dataset(args['tfrecord_dir'], args['train_res'], batch_size=global_batch_size, epochs=None)

    with strategy.scope():
        # distribute dataset
        dist_dataset = strategy.experimental_distribute_dataset(dataset)

        # training parameters
        training_parameters = {
            # global params
            'cur_tf_ver': cur_tf_ver,
            'use_tf_function': args['use_tf_function'],
            'use_custom_cuda': args['use_custom_cuda'],
            'model_base_dir': args['model_base_dir'],

            # network params
            'g_params': g_params,
            'd_params': d_params,

            # training params
            'g_opt': {'learning_rate': 0.002, 'beta1': 0.0, 'beta2': 0.99, 'epsilon': 1e-08, 'reg_interval': 8},
            'd_opt': {'learning_rate': 0.002, 'beta1': 0.0, 'beta2': 0.99, 'epsilon': 1e-08, 'reg_interval': 16},
            'batch_size': global_batch_size,
            'n_total_image': 25000000,
            'n_samples': 3,
            'train_res': args['train_res'],
        }

        trainer = Trainer(training_parameters, name=f'stylegan2-ffhq-{args["train_res"]}x{args["train_res"]}')
        trainer.train(dist_dataset, strategy)
    return