Esempio n. 1
0
    def build_decoder(self, reupsample=True):
        """ helper function to build convolutional or dense decoder

        :returns: a decoder
        :rtype: nn.Module

        """
        if self.config['decoder_layer_type'] == "pixelcnn":
            assert self.config['nll_type'] == "disc_mix_logistic", \
                "pixelcnn only works with disc_mix_logistic"

        decoder = get_decoder(self.config, reupsample)(
            input_size=self.reparameterizer.output_size,
            output_shape=self.input_shape,
            activation_fn=self.activation_fn)
        # append the variance as necessary
        return self._append_variance_projection(decoder)
Esempio n. 2
0
    def build_decoder(self, reupsample=True):
        """ helper function to build convolutional or dense decoder

        :returns: a decoder
        :rtype: nn.Module

        """
        dec_conf = deepcopy(self.config)
        if dec_conf['nll_type'] == 'pixel_wise':
            dec_conf['input_shape'][0] *= 256

        decoder = get_decoder(
            output_shape=dec_conf['input_shape'],
            **dec_conf)(input_size=self.config['latent_size'] * 2)

        # append the variance as necessary
        return self._append_variance_projection(decoder)
Esempio n. 3
0
    def build_decoder(self, reupsample=True):
        """ helper function to build convolutional or dense decoder

        :returns: a decoder
        :rtype: nn.Module

        """
        dec_conf = deepcopy(self.config)
        if dec_conf['nll_type'] == 'pixel_wise':
            dec_conf['input_shape'][0] *= 256

        decoder = layers.get_decoder(
            output_shape=dec_conf['input_shape'],
            **dec_conf)(input_size=self.config['continuous_size'])

        # append the variance as necessary
        decoder = self._append_variance_projection(decoder)
        return torch.jit.script(decoder) if self.config['jit'] else decoder
Esempio n. 4
0
    def __init__(self, input_shape, **kwargs):
        """ Implements a VAE which decodes many samples and averages outputs.

        :param input_shape: the input shape
        :returns: an object of MSG-VAE
        :rtype: MSGVAE

        """
        super(MSGVAE, self).__init__(input_shape, **kwargs)
        reparam_dict = {
            'beta':
            Beta,
            'bernoulli':
            Bernoulli,
            'discrete':
            GumbelSoftmax,
            'isotropic_gaussian':
            IsotropicGaussian,
            'mixture':
            partial(Mixture,
                    num_discrete=self.config['discrete_size'],
                    num_continuous=self.config['continuous_size'])
        }
        self.reparameterizer = reparam_dict[self.config['reparam_type']](
            config=self.config)

        # build the encoder and decoder
        self.encoder = self.build_encoder()
        self.decoder = self.build_decoder()

        # build the gates
        self.gates = nn.ModuleList([
            get_decoder(self.config, reupsample=True,
                        name='gate_{}'.format(i))(
                            input_size=self.reparameterizer.output_size,
                            output_shape=self.input_shape,
                            activation_fn=self.activation_fn)
            for i in range(self.config['max_time_steps'])
        ])

        # over-ride the reparam prior
        self.single_prior = self.reparameterizer.prior
        self.reparameterizer.prior = self._prior_override
Esempio n. 5
0
    def build_decoder(self, reupsample=True):
        """ helper function to build convolutional or dense decoder

        :returns: a decoder
        :rtype: nn.Module

        """
        dec_conf = deepcopy(self.config)
        if dec_conf['nll_type'] == 'pixel_wise':
            dec_conf['input_shape'][0] *= 256

        decoder = layers.get_decoder(
            output_shape=dec_conf['input_shape'],
            **dec_conf)(input_size=self.reparameterizer.output_size)
        print('decoder has {} parameters\n'.format(
            utils.number_of_parameters(decoder) / 1e6))

        # append the variance as necessary
        decoder = self._append_variance_projection(decoder)
        return torch.jit.script(decoder) if self.config['jit'] else decoder