示例#1
0
 def readTrainConfig(self, config):
     r"""
     Load a permanent configuration describing a models. The variables
     described in this file are constant through the training.
     """
     self.modelConfig = BaseConfig()
     getConfigFromDict(self.modelConfig, config, self.getDefaultConfig())
示例#2
0
    def __init__(self,
                 depthScales,
                 dimLatentVector=512,
                 initBiasToZero=True,
                 leakyness=0.2,
                 perChannelNormalization=True,
                 miniBatchStdDev=False,
                 equalizedlR=True,
                 sizeScale0=[8, 8],
                 transposed=False,
                 **kwargs):
        r"""
        Args:

        Specific Arguments:
            - depthScale0 (int)
            - initBiasToZero (bool): should layer's bias be initialized to
                                     zero ?
            - leakyness (float): negative slope of the leakyRelU activation
                                 function
            - perChannelNormalization (bool): do we normalize the output of each
                                              convolutional layer ?
            - miniBatchStdDev (bool): mini batch regularization for the
                                      discriminator
            - equalizedlR (bool): if True, forces the optimizer to see weights
                                  in range (-1, 1)

        """
        if not hasattr(self, 'config'):
            self.config = BaseConfig()

        # HACK to have different depth scales in G and D
        # We assume that depthScales[0][0] is G and [0][1] D
        if type(depthScales[0]) is list:
            self.config.depthScale0 = depthScales[0][0]
            self.config.depthScale0D = depthScales[0][1]
        else:
            self.config.depthScale0 = depthScales[0]
            self.config.depthScale0D = depthScales[0]

        self.config.initBiasToZero = initBiasToZero
        self.config.leakyReluLeak = leakyness
        self.config.depthOtherScales = []
        self.config.perChannelNormalization = perChannelNormalization
        self.config.alpha = 0
        self.config.miniBatchStdDev = miniBatchStdDev
        self.config.equalizedlR = equalizedlR
        self.config.sizeScale0 = sizeScale0
        self.config.nScales = len(depthScales)
        self.config.output_shape = kwargs.get('output_shape')
        self.config.downSamplingFactor = kwargs.get('downSamplingFactor')
        self.initScaleShapes()

        self.config.transposed = transposed
        BaseGAN.__init__(self, dimLatentVector, **kwargs)
示例#3
0
def test_substitutions():
    """Test substitution of environment variables in lists and  maps
    """
    with mock.patch("builtins.open", mock.mock_open(read_data=subs_yaml)), \
         mock.patch.dict("os.environ", clear=True,
                         values={"MYPACKAGE": my_package,
                                 "MYREPO": my_repo}):
        config = BaseConfig("vars.yaml")
        assert len(config.packages.additional_pkgs) == 1
        assert config.packages.additional_pkgs[0] == my_package
        assert config.packages.additional_repos["my_repo"] == my_repo
示例#4
0
def test_defaults():
    """Test defaults according to the [documentation](../README.md)
    """
    with mock.patch("builtins.open", mock.mock_open(read_data=empty_yaml)), \
         mock.patch.dict("os.environ", clear=True,
                         values={"WORKSPACE": workspace,
                                 "HOME": home,
                                 "USER": user}):
        config = BaseConfig("vars.yaml")
        assert config.skuba.workdir == workspace
        assert config.skuba.binpath == os.path.join(workspace, skuba_binpath)
        assert config.skuba.cluster == "test-cluster"
        assert config.terraform.tfdir == os.path.join(workspace,
                                                      terraform_tfdir)
        assert config.terraform.stack_name == user
        assert config.terraform.workdir == workspace
        assert config.terraform.tfvars == tfvars
        assert config.terraform.plugin_dir is None
        assert config.utils.ssh_key == os.path.join(home, ssh_key)
示例#5
0
    def __init__(self,
                 dimLatentVector,
                 dimOutput,
                 learning_rate,
                 useGPU=True,
                 lossMode='WGANGP',
                 ac_gan=False,
                 attribKeysOrder=None,
                 skipAttDfake=None,
                 weightConditionD=0.0,
                 weightConditionG=0.0,
                 logisticGradReal=0.0,
                 lambdaGP=0.,
                 epsilonD=0.,
                 GDPP=False,
                 GDPP_weigth=1,
                 mix_true_fake=False,
                 mix_true_fake_scale=-1,
                 true_fake_split=-1,
                 soft_labels=False,
                 iter_d_g_ratio=-1,
                 formatLayerType="RandomZ",
                 generationActivation=None,
                 **kwargs):
        r"""
        Args:
            dimLatentVector (int): dimension of the latent vector in the model
            dimOutput (int): number of channels of the output image
            useGPU (bool): set to true if the computation should be distribued
                           in the availanle GPUs
            baseLearningRate (float): target learning rate.
            lossMode (string): loss used by the model. Must be one of the
                               following options
                              * 'MSE' : mean square loss.
                              * 'DCGAN': cross entropy loss
                              * 'WGANGP': https://arxiv.org/pdf/1704.00028.pdf
                              * 'Logistic': https://arxiv.org/pdf/1801.04406.pdf
            attribKeysOrder (dict): if not None, activate AC-GAN. In this case,
                                    both the generator and the discrimator are
                                    trained on abelled data.
            weightConditionD (float): in AC-GAN, weight of the classification
                                      loss applied to the discriminator
            weightConditionG (float): in AC-GAN, weight of the classification
                                      loss applied to the generator
            logisticGradReal (float): gradient penalty for the logistic loss
            lambdaGP (float): if > 0, weight of the gradient penalty (WGANGP)
            epsilonD (float): if > 0, penalty on |D(X)|**2
            GDPP (bool): if true activate GDPP loss https://arxiv.org/abs/1812.00068

        """

        # This params should go in a trainer class or similar
        self.mix_true_fake = mix_true_fake
        self.mix_true_fake_scale = mix_true_fake_scale
        self.true_fake_split = true_fake_split
        self.soft_labels = soft_labels
        self.iter_D_G_ratio = iter_d_g_ratio
        ####################################################
        if lossMode not in ['MSE', 'WGANGP', 'DCGAN', 'Logistic']:
            raise ValueError(
                "lossMode should be one of the following : ['MSE', 'WGANGP', \
                'DCGAN', 'Logistic']")

        if 'config' not in vars(self):
            self.config = BaseConfig()

        if 'trainTmp' not in vars(self):
            self.trainTmp = BaseConfig()

        self.useGPU = useGPU and torch.cuda.is_available()
        if self.useGPU:
            self.device = torch.device("cuda:0")
            self.n_devices = torch.cuda.device_count()
        else:
            self.device = torch.device("cpu")
            self.n_devices = 1
        # Latent vector dimension
        self.config.noiseVectorDim = dimLatentVector

        # Output image dimension
        self.config.dimOutput = dimOutput

        # Actual learning rate
        self.config.learning_rate = learning_rate

        # Input formatLayer type
        self.config.formatLayerType = formatLayerType

        # AC-GAN ?
        self.config.skipAttDfake = skipAttDfake
        self.config.attribKeysOrder = deepcopy(attribKeysOrder)
        self.config.categoryVectorDim = 0
        self.config.categoryVectorDim_G = 0
        self.config.weightConditionG = weightConditionG
        self.config.weightConditionD = weightConditionD
        self.ClassificationCriterion = None
        self.config.ac_gan = ac_gan
        self.initializeClassificationCriterion()

        # GDPP
        self.config.GDPP = GDPP
        self.config.GDPP_weigth = GDPP_weigth

        self.config.latentVectorDim = self.config.noiseVectorDim \
            + self.config.categoryVectorDim_G

        # Loss criterion
        self.config.lossCriterion = lossMode
        self.lossCriterion = getattr(base_loss_criterions,
                                     lossMode)(self.device)

        # Overwrite generationActivation to loss mode if specified in config
        if generationActivation == "tanh":
            self.lossCriterion.generationActivation = nn.Tanh()

        # WGAN-GP
        self.config.lambdaGP = lambdaGP

        # Weight on D's output
        self.config.epsilonD = epsilonD

        # Initialize the generator and the discriminator
        self.netD = self.getNetD()
        self.netG = self.getNetG()

        # Move the networks to the gpu
        self.updateSolversDevice()

        # Logistic loss
        self.config.logisticGradReal = logisticGradReal

        # Register grads?
        self.register_grads = False
示例#6
0
from utils.config import BaseConfig
# Default configuration for ProgressiveGANTrainer
_C = BaseConfig()

# Maximum number of iteration at each scale
_C.maxIterAtScale = [48000, 96000, 96000,
                     96000, 96000, 96000, 96000, 96000, 200000]

# Blending mode.

############################################################


# 2 possible values are possible:
# - custom: iterations at which alpha should be updated and new value after the update
# are fully described by the user
# - linear: The user just inputs the number of updates of alpha, and the number of iterations
# between two updates for each scale
_C.alphaJumpMode = "linear"

# If _C.alphaJumpMode == "custom", then the following fields should be completed

# For each scale, iteration at wich the blending factor alpha should be
# updated
_C.iterAlphaJump = [[], [0, 1000, 2000], [0, 1000, 4000, 8000, 16000],
                    [0, 2000, 4000, 8000]]

# New value of the blending factor alpha during the update (see above)
_C.alphaJumpVals = [[], [1., 0.5, 0], [
    1, 0.75, 0.5, 0.25, 0.], [1., 0.75, 0.5, 0.]]
from utils.config import BaseConfig

# Default configuration for StyleGAN
_C = BaseConfig()

############################################################

# Dimension of the latent vector
_C.dimLatentVector = 512

# Image dimension
_C.imageSize = 256

# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# Source: https://github.com/facebookresearch/pytorch_GAN_zoo

from utils.config import BaseConfig

# Default configuration for ProgressiveGANTrainer
_C = BaseConfig()

############################################################

# Depth of a convolutional layer for each scale
_C.depth = 3

# Mini batch size
_C.miniBatchSize = 256  # 64

# Dimension of the latent vector
_C.dimLatentVector = 100

# Dimension of the output image
_C.dimOutput = 3

# Dimension of the generator
_C.dimG = 64

# Dimension of the discrimator
_C.dimD = 64

# Image dimension
_C.imageSize = 64
示例#9
0
from utils.config import BaseConfig

# Default configuration for DCGAN image inpainting
_dcgan_inpaint_config = BaseConfig()

############################################################

# Importance weighting term window size
_dcgan_inpaint_config.w_size = 7

# Back-propagation iteration count
_dcgan_inpaint_config.iter = 1500

# Default configuration for StyleGAN image inpainting
_stylegan_inpaint_config = BaseConfig()

############################################################

#
_stylegan_inpaint_config.alpha = 1

#
_stylegan_inpaint_config.style_weight = 0.7

# Importance weighting term window size
_stylegan_inpaint_config.w_size = 28

# Back-propagation iteration count
_stylegan_inpaint_config.iter = 3000