Esempio n. 1
0
                        type=str,
                        default='samples/',
                        help='output directory for samples')
    parser.add_argument('--path',
                        type=str,
                        default='./stylegan-ffhq-1024px-new.params',
                        help='path to checkpoint file')

    args = parser.parse_args()
    if args.gpu_id == '-1':
        device = mx.cpu()
    else:
        device = mx.gpu(int(args.gpu_id.strip()))

    generator = StyledGenerator(code_dim=512)
    generator.initialize()
    generator.collect_params().reset_ctx(device)
    generator.load_parameters(args.path, ctx=device)

    mean_style = get_mean_style(generator, device)

    step = int(math.log(args.size, 2)) - 2

    imgs = sample(generator, step, mean_style, args.n_sample, device)

    if not os.path.isdir(args.out_dir):
        os.makedirs(args.out_dir)

    for i in range(args.n_sample):
        save_image(imgs[i],
                   os.path.join(args.out_dir, 'sample_{}.png'.format(i)),
Esempio n. 2
0
    streamhandler = logging.StreamHandler()
    logger = logging.getLogger('')
    logger.setLevel(logging.INFO)
    logger.addHandler(filehandler)
    logger.addHandler(streamhandler)
    logger.info(args)

    if args.gpu_ids == '-1':
        context = [mx.cpu()]
    else:
        context = [
            mx.gpu(int(i)) for i in args.gpu_ids.split(',') if i.strip()
        ]

    generator = StyledGenerator(code_size)
    generator.initialize(ctx=context)
    generator.collect_params().reset_ctx(context)

    g_optimizer = gluon.Trainer(generator.collect_params(),
                                optimizer='adam',
                                optimizer_params={
                                    'learning_rate': args.lr_default,
                                    'beta1': 0.0,
                                    'beta2': 0.99
                                },
                                kvstore='local')

    # Set a different learning rate for style by setting the lr_mult of 0.01
    for k in generator.collect_params().keys():
        if k.startswith('hybridsequential2'):
            generator.collect_params()[k].lr_mult = 0.01