def __init__(self, input_size, convexp_coeff=0.9):
        super(ConvExp, self).__init__()
        kernel_size = [input_size[0], input_size[0], 3, 3]

        self.kernel = torch.nn.Parameter(
            torch.randn(kernel_size) / np.prod(kernel_size[1:]))

        self.stride = (1, 1)
        self.padding = (1, 1)

        # Probably not useful.
        self.pre_transform_bias = torch.nn.Parameter(
            torch.zeros((1, *input_size)))

        # Again probably not useful.
        self.post_transform_bias = torch.nn.Parameter(
            torch.zeros((1, *input_size)))

        if input_size[0] <= 64:
            self.conv1x1 = Conv1x1(input_size[0])
        else:
            self.conv1x1 = Conv1x1Householder(input_size[0], 64)

        spectral_norm_conv(self,
                           coeff=convexp_coeff,
                           input_dim=input_size,
                           name='kernel',
                           n_power_iterations=1,
                           eps=1e-12)

        self.n_terms_train = 6
        self.n_terms_eval = self.n_terms_train * 2 + 1
Beispiel #2
0
def create_model(num_blocks=3,
                 block_size=48,
                 sym_recon_grad=False,
                 actnorm=False,
                 split_prior=False,
                 recon_loss_weight=1.0):
    current_size = (3, 32, 32)

    alpha = 1e-6
    layers = [
        Dequantization(UniformDistribution(size=current_size)),
        Normalization(translation=0, scale=256),
        Normalization(translation=-alpha, scale=1 / (1 - 2 * alpha)),
        LogitTransform(),
    ]

    for l in range(num_blocks):
        layers.append(Squeeze())
        current_size = (current_size[0] * 4, current_size[1] // 2,
                        current_size[2] // 2)

        for k in range(block_size):
            if actnorm:
                layers.append(ActNorm(current_size[0]))
            layers.append(Conv1x1(current_size[0]))
            layers.append(Coupling(current_size))

        if split_prior and l < num_blocks - 1:
            layers.append(SplitPrior(current_size, NegativeGaussianLoss))
            current_size = (current_size[0] // 2, current_size[1],
                            current_size[2])

    return FlowSequential(NegativeGaussianLoss(size=current_size), *layers)
Beispiel #3
0
    def __init__(self, n_channels):
        super(Emerging, self).__init__()

        self.transformations = torch.nn.ModuleList([
            Conv1x1(n_channels),
            SquareAutoRegressiveConv2d(n_channels),
            Flip2d(),
            SquareAutoRegressiveConv2d(n_channels),
            Flip2d(),
        ])
def test_inverses(input_size=(12, 4, 16, 16)):
    check_inverse(LearnableLeakyRelu().to('cuda'), input_size)
    check_inverse(SplineActivation(input_size).to('cuda'), input_size)
    check_inverse(SmoothLeakyRelu().to('cuda'), input_size)
    check_inverse(SmoothTanh().to('cuda'), input_size)
    check_inverse(Identity().to('cuda'), input_size)
    check_inverse(ActNorm(input_size[1]).to('cuda'), input_size)
    check_inverse(Conv1x1(input_size[1]).to('cuda'), input_size)
    check_inverse(Conv1x1Householder(input_size[1], 10).to('cuda'), input_size)
    check_inverse(Coupling(input_size[1:]).to('cuda'), input_size)
    check_inverse(
        Normalization(translation=-1e-6, scale=1 / (1 - 2 * 1e-6)).to('cuda'),
        input_size)
    check_inverse(Squeeze().to('cuda'), input_size)
    check_inverse(UnSqueeze().to('cuda'), input_size)
    test_snf_layer_inverses(input_size)

    print("All inverse tests passed")