Example #1
0
def discretized_logistic_logp(mu, scale, x):
    # [0,255] -> [-1.1] (this means bin sizes of 2./255.)
    x_rescaled = (x - 127.5) / 127.5
    invscale = 1. / scale

    x_centered = x_rescaled - mu

    plus_in = invscale * (x_centered + 1. / 255.)
    cdf_plus = torch.sigmoid(plus_in)
    min_in = invscale * (x_centered - 1. / 255.)
    cdf_min = torch.sigmoid(min_in)

    # log-probability for edge case of 0
    log_cdf_plus = plus_in - modules.softplus(plus_in)

    # log-probability for edge case of 255
    log_one_minus_cdf_min = - modules.softplus(min_in)

    # other cases
    cdf_delta = cdf_plus - cdf_min

    mid_in = invscale * x_centered

    # log-probability in the center of the bin, to be used in extreme cases
    log_pdf_mid = mid_in - torch.log(scale) - 2. * modules.softplus(mid_in)

    # now select the right output: left edge case, right edge case, normal case, extremely low-probability case
    cond1 = torch.where(cdf_delta > 1e-5, torch.log(torch.clamp(cdf_delta, min=1e-12, max=None)),
                        log_pdf_mid - np.log(127.5))
    cond2 = torch.where(x_rescaled > .999, log_one_minus_cdf_min, cond1)
    logps = torch.where(x_rescaled < -.999, log_cdf_plus, cond2)

    logp = logps.flatten(1)
    return logp
Example #2
0
        def distribution(given):
            h = given

            # if compressing, the input is flattened, so we'll have to convert it back to a Tensor
            # also, the input might not be float32, so we'll have to convert it first
            if self.compressing:
                type = h.type()
                h = h.float()
                h = h.view((-1, ) + self.zdim)

            # bottom latent layer
            if i == 0:
                # input convolution
                h = self.gen_in(h)

                # processing ResNet blocks
                h = self.gen_res1(h)

                # other ResNet blocks
                h = self.gen_res0(h)

                # mu parameter of the conditional Logistic distribution
                mu = self.gen_mu(h)

                # scale parameter of the conditional Logistic distribution
                # set a minimal value for the scale parameter of the bottom generative model
                scale = ((2. / 255.) / 8.) + modules.softplus(self.gen_std)

            # deeper latent layers
            else:
                # input convolution
                h = self.deepgen_in[i - 1](h)

                # other ResNet blocks
                h = self.deepgen_res[i - 1](h)

                # mu parameter of the conditional Logistic distribution
                mu = self.deepgen_mu[i - 1](h)

                # scale parameter of the conditional Logistic distribution
                # clamp the output of the scale parameter between [0.1, 1.0] for stability
                scale = 0.1 + 0.9 * modules.softplus(
                    self.deepgen_std[i - 1](h) + np.log(np.exp(1.) - 1.))

            if self.compressing:
                # if compressing, the "batch-size" can only be 1
                assert mu.shape[0] == 1

                # flatten the Tensors back and convert back to the input datatype
                mu = mu.view(
                    np.prod(self.xs if i == 0 else self.zdim)).type(type)
                scale = scale.view(
                    np.prod(self.xs if i == 0 else self.zdim)).type(type)
            return mu, scale
Example #3
0
def logistic_logp(mu, scale, x):
    _y = -(x - mu) / scale
    _logp = -_y - torch.log(scale) - 2 * modules.softplus(-_y)
    logp = _logp.flatten(2)
    return logp