Пример #1
0
def create_model(data_size, layer='conv'):
    alpha = 1e-6
    layers = []

    c_in = data_size[0]
    h = data_size[1]
    w = data_size[2]

    if layer == 'fc':
        size = c_in * h * w
        layers.append(
            SelfNormFC(size,
                       size,
                       bias=True,
                       sym_recon_grad=False,
                       only_R_recon=False))
        model = FlowSequential(NegativeGaussianLoss(size=(size, )), *layers)

    elif layer == 'conv':
        layers.append(
            SelfNormConv(c_in,
                         c_in, (3, 3),
                         bias=True,
                         stride=1,
                         padding=1,
                         sym_recon_grad=False,
                         only_R_recon=False))
        model = FlowSequential(NegativeGaussianLoss(size=(1, 28, 28)), *layers)

    return model
def create_model(num_layers=100, sym_recon_grad=False, 
                 activation='Spline', recon_loss_weight=1.0,
                 num_blocks=3):
    block_size = int(num_layers / num_blocks)
    act = activations[activation]

    alpha = 1e-6
    layers = [
        Dequantization(UniformDistribution(size=(1, 28, 28))),
        Normalization(translation=0, scale=256),
        Normalization(translation=-alpha, scale=1 / (1 - 2 * alpha)),
        LogitTransform(),
    ]

    current_size = (1, 28, 28)

    for b in range(num_blocks):
        for l in range(block_size):
            layers.append(ConvExp(current_size))
            if not (b == num_blocks - 1 and l == block_size - 1):
                layers.append(act(current_size))

        if not (b == num_blocks - 1):
            layers.append(Squeeze())
            current_size = (current_size[0]*4,
                            current_size[1]//2,
                            current_size[2]//2)

    return FlowSequential(NegativeGaussianLoss(size=current_size), 
                         *layers)
def create_model(num_layers=2,
                 sym_recon_grad=False,
                 only_R_recon=False,
                 activation='Spline',
                 recon_loss_weight=1.0,
                 data_size=(1, 28, 28)):
    alpha = 1e-6
    layers = [
        Dequantization(UniformDistribution(size=data_size)),
        Normalization(translation=0, scale=256),
        Normalization(translation=-alpha, scale=1 / (1 - 2 * alpha)),
        LogitTransform(),
    ]
    act = activations[activation]

    size = reduce(lambda x, y: x * y, data_size)

    for l in range(num_layers):
        layers.append(
            SelfNormFC(size,
                       size,
                       bias=True,
                       sym_recon_grad=sym_recon_grad,
                       only_R_recon=only_R_recon,
                       recon_loss_weight=recon_loss_weight))
        if (l + 1) < num_layers:
            layers.append(act((size, )))

    return FlowSequential(NegativeGaussianLoss(size=(size, )), *layers)
Пример #4
0
def create_model(num_blocks=3,
                 block_size=48,
                 sym_recon_grad=False,
                 actnorm=False,
                 split_prior=False,
                 recon_loss_weight=1.0):
    current_size = (3, 32, 32)

    alpha = 1e-6
    layers = [
        Dequantization(UniformDistribution(size=current_size)),
        Normalization(translation=0, scale=256),
        Normalization(translation=-alpha, scale=1 / (1 - 2 * alpha)),
        LogitTransform(),
    ]

    for l in range(num_blocks):
        layers.append(Squeeze())
        current_size = (current_size[0] * 4, current_size[1] // 2,
                        current_size[2] // 2)

        for k in range(block_size):
            if actnorm:
                layers.append(ActNorm(current_size[0]))
            layers.append(Conv1x1(current_size[0]))
            layers.append(Coupling(current_size))

        if split_prior and l < num_blocks - 1:
            layers.append(SplitPrior(current_size, NegativeGaussianLoss))
            current_size = (current_size[0] // 2, current_size[1],
                            current_size[2])

    return FlowSequential(NegativeGaussianLoss(size=current_size), *layers)
def create_model(num_blocks=2, block_size=16, sym_recon_grad=False, 
                 actnorm=False, split_prior=False, recon_loss_weight=100.0):
    alpha = 1e-6
    layers = [
        Dequantization(UniformDistribution(size=(1, 28, 28))),
        Normalization(translation=0, scale=256),
        Normalization(translation=-alpha, scale=1 / (1 - 2 * alpha)),
        LogitTransform(),
    ]

    current_size = (1, 28, 28)

    for l in range(num_blocks):
        layers.append(Squeeze())
        current_size = (current_size[0]*4, current_size[1]//2, current_size[2]//2)

        for k in range(block_size):
            if actnorm:
                layers.append(ActNorm(current_size[0]))
            
            layers.append(SelfNormConv(current_size[0], current_size[0], (1, 1), 
                                       bias=True, stride=1, padding=0,
                                       sym_recon_grad=sym_recon_grad, 
                                       recon_loss_weight=recon_loss_weight))
            layers.append(Coupling(current_size))

        if split_prior and l < num_blocks - 1:
            layers.append(SplitPrior(current_size, NegativeGaussianLoss))
            current_size = (current_size[0] // 2, current_size[1], current_size[2])

    return FlowSequential(NegativeGaussianLoss(size=current_size), 
                         *layers)
Пример #6
0
def create_model(num_layers=100,
                 sym_recon_grad=False,
                 activation='Spline',
                 recon_loss_weight=1.0,
                 num_blocks=3):
    block_size = int(num_layers / num_blocks)
    act = activations[activation]

    alpha = 1e-6
    layers = [
        Dequantization(UniformDistribution(size=(1, 28, 28))),
        Normalization(translation=0, scale=256),
        Normalization(translation=-alpha, scale=1 / (1 - 2 * alpha)),
        LogitTransform(),
    ]

    current_size = (1, 28, 28)

    for b in range(num_blocks):
        for l in range(block_size):
            layers.append(
                SelfNormConv(current_size[0],
                             current_size[0], (3, 3),
                             bias=True,
                             stride=1,
                             padding=1,
                             sym_recon_grad=sym_recon_grad,
                             recon_loss_weight=recon_loss_weight))

            if not (b == num_blocks - 1 and l == block_size - 1):
                # Dont place activation at end of last block
                layers.append(act(current_size))

        if not (b == num_blocks - 1):
            # Only squeeze between blocks
            layers.append(Squeeze())
            current_size = (current_size[0] * 4, current_size[1] // 2,
                            current_size[2] // 2)

    return FlowSequential(NegativeGaussianLoss(size=current_size), *layers)