Beispiel #1
0
    def prior_params(self, batch_size, **kwargs):
        """ Helper to get prior parameters

        :param batch_size: the size of the batch
        :returns: a dictionary of parameters
        :rtype: dict

        """
        mu = same_type(self.config['half'], self.config['cuda'])(
            batch_size, self.output_size).zero_()  # zero mean

        # variance = 1 unless otherwise specified
        scale_var = 1.0 if 'scale_var' not in kwargs else kwargs['scale_var']
        sigma = same_type(self.config['half'], self.config['cuda'])(
            batch_size, self.output_size).zero_() + scale_var

        return {'gaussian': {'mu': mu, 'logvar': sigma}}
Beispiel #2
0
    def prior(self, batch_size, **kwargs):
        """ Returns a Kerman beta prior.

        Kerman, J. (2011). Neutral noninformative and informative
        conjugate beta and gamma prior distributions. Electronic
        Journal of Statistics, 5, 1450-1470.

        :param batch_size: the number of prior samples
        :returns: prior
        :rtype: torch.Tensor

        """
        conc1 = Variable(
            same_type(self.config['half'], self.config['cuda'])
            (batch_size, self.output_size).zero_() + 1 / 3)
        conc2 = Variable(
            same_type(self.config['half'], self.config['cuda'])
            (batch_size, self.output_size).zero_() + 1 / 3)
        return PD.Beta(conc1, conc2).sample()
Beispiel #3
0
    def prior(self, batch_size, **kwargs):
        """ Sample the prior for batch_size samples.

        :param batch_size: number of prior samples.
        :returns: prior
        :rtype: torch.Tensor

        """
        scale_var = 1.0 if 'scale_var' not in kwargs else kwargs['scale_var']
        return same_type(self.config['half'], self.config['cuda'])(
            batch_size, self.output_size).normal_(mean=0, std=scale_var)
Beispiel #4
0
    def generate_synthetic_samples(self, batch_size, **kwargs):
        """ Generates samples with VAE.

        :param batch_size: the number of samples to generate.
        :returns: decoded logits
        :rtype: torch.Tensor

        """
        z_samples = utils.same_type(self.config['half'], self.config['cuda'])(
            batch_size, self.config['continuous_size']).normal_(mean=0.0,
                                                                std=1.0)
        return self.nll_activation(self.decode(z_samples))
Beispiel #5
0
        def _init(batch_size, cuda):
            """ Return a single initialized state

            :param batch_size: batch size
            :param cuda: is on cuda or not
            :returns: a single state init
            :rtype: (torch.Tensor, torch.Tensor)

            """
            num_directions = 2 if self.bidirectional else 1
            if override_noisy_state or \
               (self.training and self.config['use_noisy_rnn_state']):
                # add some noise to initial state
                # consider also: nn.init.xavier_uniform_(
                return same_type(self.config['half'],
                                 cuda)(num_directions * self.n_layers,
                                       batch_size, self.h_dim).normal_(
                                           0, 0.01).requires_grad_()

            # return zeros for testing
            return same_type(self.config['half'],
                             cuda)(num_directions * self.n_layers, batch_size,
                                   self.h_dim).zero_().requires_grad_()
Beispiel #6
0
    def prior_params(self, batch_size, **kwargs):
        """ Helper to get prior parameters

        :param batch_size: the size of the batch
        :returns: a dictionary of parameters
        :rtype: dict

        """
        uniform_probs = same_type(self.config['half'], self.config['cuda'])(
            batch_size, self.output_size).zero_()
        uniform_probs += 0.5
        return {
            'discrete': {
                'logits': D.Bernoulli(probs=uniform_probs).logits
            }
        }
Beispiel #7
0
    def mut_info(self, dist_params, batch_size):
        """ Returns mutual information between z <-> x

        :param dist_params: the distribution dict
        :returns: tensor of dimension batch_size
        :rtype: torch.Tensor

        """
        mut_info = utils.same_type(self.config['half'],
                                   self.config['cuda'])(batch_size).zero_()

        # only grab the mut-info if the scalars above are set
        if self.config.get('continuous_mut_info', 0) > 0 or self.config.get(
                'discrete_mut_info', 0) > 0:
            mut_info = self._clamp_mut_info(
                self.reparameterizer.mutual_info(dist_params))

        return mut_info
Beispiel #8
0
    def _reparametrize_gaussian(self, mu, logvar):
        """ Internal member to reparametrize gaussian.

        :param mu: mean logits
        :param logvar: log-variance.
        :returns: reparameterized tensor and param dict
        :rtype: torch.Tensor, dict

        """
        if self.training:  # returns a stochastic sample for training
            std = logvar.mul(0.5).exp()
            eps = same_type(is_half(logvar),
                            logvar.is_cuda)(logvar.size()).normal_()
            eps = Variable(eps)
            nan_check_and_break(logvar, "logvar")
            return eps.mul(std).add_(mu), {'mu': mu, 'logvar': logvar}

        return mu, {'mu': mu, 'logvar': logvar}
Beispiel #9
0
    def generate_synthetic_sequential_samples(self,
                                              num_original_discrete,
                                              num_rows=8):
        """ Iterates over all discrete positions and generates samples (for mix or disc only).

        :param num_original_discrete: The original discrete size (useful for LLVAE).
        :param num_rows: for visdom
        :returns: decoded logits
        :rtype: torch.Tensor

        """
        assert self.has_discrete()

        # create a grid of one-hot vectors for displaying in visdom
        # uses one row for original dimension of discrete component
        discrete_indices = np.array([
            np.random.randint(begin, end, size=num_rows) for begin, end in zip(
                range(0, self.reparameterizer.config['discrete_size'],
                      num_original_discrete),
                range(num_original_discrete,
                      self.reparameterizer.config['discrete_size'] +
                      1, num_original_discrete))
        ])
        discrete_indices = discrete_indices.reshape(-1)

        self.eval()  # lock BN / Dropout, etc
        with torch.no_grad():
            z_samples = Variable(
                torch.from_numpy(
                    utils.one_hot_np(
                        self.reparameterizer.config['discrete_size'],
                        discrete_indices)))
            z_samples = z_samples.type(
                utils.same_type(self.config['half'], self.config['cuda']))

            if self.config['reparam_type'] == 'mixture' and self.config[
                    'vae_type'] != 'sequential':
                ''' add in the gaussian prior '''
                z_cont = self.reparameterizer.continuous.prior(
                    z_samples.size(0))
                z_samples = torch.cat([z_cont, z_samples], dim=-1)

            # the below is to handle the issues with BN
            # pad the z to be full batch size
            number_to_return = z_samples.shape[0]  # original generate number
            number_batches_z = int(
                max(
                    1,
                    np.ceil(
                        float(self.config['batch_size']) /
                        float(number_to_return))))
            z_padded = torch.cat([z_samples for _ in range(number_batches_z)],
                                 0)[0:self.config['batch_size']]

            # generate and return the requested number
            number_batches_to_generate = int(
                max(
                    1,
                    np.ceil(
                        float(number_to_return) /
                        float(self.config['batch_size']))))
            generated = torch.cat([
                self.generate_synthetic_samples(self.config['batch_size'],
                                                z_samples=z_padded)
                for _ in range(number_batches_to_generate)
            ], 0)
            return generated[0:number_to_return]  # only return num_requested