Пример #1
0
_C.depthScales = [512, 512, 512, 512, 256, 128, 64, 32, 16]

# Mini batch size
_C.miniBatchSize = 16

# Dimension of the latent vector
_C.dimLatentVector = 512

# Should bias be initialized to zero ?
_C.initBiasToZero = True

# Per channel normalization
_C.perChannelNormalization = True

# Loss mode
_C.lossMode = 'WGANGP'
_C.ac_gan = False

# Gradient penalty coefficient (WGANGP)
_C.lambdaGP = 10.

# Leakyness of the leakyRelU activation function
_C.leakyness = 0.2

# Weight penalty on |D(x)|^2
_C.epsilonD = 0.001

# Mini batch regularization
_C.miniBatchStdDev = True

# Base learning rate
Пример #2
0
_C.dimD = 64

# Image dimension
_C.imageSize = 64

# Learning rate for optimizers
# _C.learningRate = 0.0002

# Base learning rate
_C.baseLearningRate = 0.0002

# Beta1 hyperparam for Adam optimizers
_C.beta1 = 0.5

# Loss mode
_C.lossMode = 'DCGAN'

# Gradient penalty coefficient (WGANGP)
_C.lambdaGP = 0.

# Noise standard deviation in case of instance noise (0 <=> no Instance noise)
_C.sigmaNoise = 0.

# Weight penalty on |D(x)|^2
_C.epsilonD = 0.

# In case of AC GAN, weight on the classification loss (per scale)
_C.weightConditionG = 0.0
_C.weightConditionD = 0.0

# Activate GDPP loss ?