Beispiel #1
0
        256: 8
    }
    #desc += '-2gpu'; submit_config.num_gpus = 2; sched.minibatch_base = 8; sched.minibatch_dict = {4: 256, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16, 256: 8}
    #desc += '-4gpu'; submit_config.num_gpus = 4; sched.minibatch_base = 16; sched.minibatch_dict = {4: 512, 8: 256, 16: 128, 32: 64, 64: 32, 128: 16}
    desc += '-8gpu'
    submit_config.num_gpus = 8
    sched.minibatch_base = 32
    sched.minibatch_dict = {
        4: 512,
        8: 256,
        16: 128,
        32: 64,
        64: 32
    }
    desc += '-fp16'
    G.dtype = 'float16'
    D.dtype = 'float16'
    G.pixelnorm_epsilon = 1e-4
    G_opt.use_loss_scaling = True
    D_opt.use_loss_scaling = True  #; sched.max_minibatch_per_gpu = {512: 16, 1024: 8}

    # Default options.
    train.total_kimg = 25000
    sched.lod_initial_resolution = 8
    sched.G_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
    sched.D_lrate_dict = EasyDict(sched.G_lrate_dict)

    # WGAN-GP loss for CelebA-HQ.
    #desc += '-wgangp'; G_loss = EasyDict(func_name='training.loss.G_wgan'); D_loss = EasyDict(func_name='training.loss.D_wgan_gp'); sched.G_lrate_dict = {k: min(v, 0.002) for k, v in sched.G_lrate_dict.items()}; sched.D_lrate_dict = EasyDict(sched.G_lrate_dict)

    # Table 1.