Example #1
0
def create_model_brick():
    layers = [
        conv_brick(2, 1, 64),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 128),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(5, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(4, 1, 512),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 2 * NLAT)
    ]
    encoder_mapping = ConvolutionalSequence(layers=layers,
                                            num_channels=NUM_CHANNELS,
                                            image_size=IMAGE_SIZE,
                                            use_bias=False,
                                            name='encoder_mapping')
    encoder = GaussianConditional(encoder_mapping, name='encoder')

    layers = [
        conv_transpose_brick(4, 1, 512),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(7, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(5, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(7, 2, 128),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(2, 1, 64),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, NUM_CHANNELS),
        Logistic()
    ]
    decoder_mapping = ConvolutionalSequence(layers=layers,
                                            num_channels=NLAT,
                                            image_size=(1, 1),
                                            use_bias=False,
                                            name='decoder_mapping')
    decoder = DeterministicConditional(decoder_mapping, name='decoder')

    layers = [
        conv_brick(2, 1, 64),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 128),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(5, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_brick(4, 1, 512),
        bn_brick(),
        LeakyRectifier(leak=LEAK)
    ]
    x_discriminator = ConvolutionalSequence(layers=layers,
                                            num_channels=NUM_CHANNELS,
                                            image_size=IMAGE_SIZE,
                                            use_bias=False,
                                            name='x_discriminator')
    x_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 1024),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 1024),
        LeakyRectifier(leak=LEAK)
    ]
    z_discriminator = ConvolutionalSequence(layers=layers,
                                            num_channels=NLAT,
                                            image_size=(1, 1),
                                            use_bias=False,
                                            name='z_discriminator')
    z_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 2048),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 2048),
        LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 1)
    ]
    joint_discriminator = ConvolutionalSequence(
        layers=layers,
        num_channels=(x_discriminator.get_dim('output')[0] +
                      z_discriminator.get_dim('output')[0]),
        image_size=(1, 1),
        name='joint_discriminator')

    discriminator = XZJointDiscriminator(x_discriminator,
                                         z_discriminator,
                                         joint_discriminator,
                                         name='discriminator')

    ali = ALI(encoder,
              decoder,
              discriminator,
              weights_init=GAUSSIAN_INIT,
              biases_init=ZERO_INIT,
              name='ali')
    ali.push_allocation_config()
    encoder_mapping.layers[-1].use_bias = True
    encoder_mapping.layers[-1].tied_biases = False
    decoder_mapping.layers[-2].use_bias = True
    decoder_mapping.layers[-2].tied_biases = False
    x_discriminator.layers[0].use_bias = True
    x_discriminator.layers[0].tied_biases = True
    ali.initialize()
    raw_marginals, = next(
        create_celeba_data_streams(500, 500)[0].get_epoch_iterator())
    b_value = get_log_odds(raw_marginals)
    decoder_mapping.layers[-2].b.set_value(b_value)

    return ali
Example #2
0
    out = encoder_mapping_fun(features, test_labels)
    print(out.shape)

    ## Testing Gaussian encoder blocks
    embeddings = embedder.apply(y)
    encoder = GaussianConditional(mapping=encoder_mapping)
    encoder.initialize()
    encoder_fun = function([x, y], encoder.apply(x, embeddings))
    z_hat = encoder_fun(features, test_labels)
    # print(out)
    print(z_hat)

    # Decoder
    z = tensor.tensor4('z')
    layers = [
        conv_transpose_brick(4, 1, 256),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(4, 2, 128),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(4, 1, 64),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(4, 2, 32),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(5, 1, 32),
        bn_brick(),
        LeakyRectifier(leak=LEAK),
        conv_transpose_brick(1, 1, 32),
def create_model_brick():
    # Encoder
    enc_layers = [
        conv_brick(2, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 2 * NLAT)]

    encoder_mapping = EncoderMapping(layers=enc_layers,
                                     num_channels=NUM_CHANNELS,
                                     n_emb=NEMB,
                                     image_size=IMAGE_SIZE, weights_init=GAUSSIAN_INIT,
                                     biases_init=ZERO_INIT,
                                     use_bias=False)

    encoder = GaussianConditional(encoder_mapping, name='encoder')
    # Decoder
    dec_layers = [
        conv_transpose_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(2, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, NUM_CHANNELS), Logistic()]

    decoder = Decoder(
        layers=dec_layers, num_channels=NLAT + NEMB, image_size=(1, 1), use_bias=False,
        name='decoder_mapping')
    # Discriminator
    layers = [
        conv_brick(2, 1, 64), LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK)]
    x_discriminator = ConvolutionalSequence(
        layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE,
        use_bias=False, name='x_discriminator')
    x_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 1024), LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 1024), LeakyRectifier(leak=LEAK)]
    z_discriminator = ConvolutionalSequence(
        layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False,
        name='z_discriminator')
    z_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 2048), LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 2048), LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 1)]
    joint_discriminator = ConvolutionalSequence(
        layers=layers,
        num_channels=(x_discriminator.get_dim('output')[0] +
                      z_discriminator.get_dim('output')[0] +
                      NEMB),
        image_size=(1, 1),
        name='joint_discriminator')

    discriminator = XZYJointDiscriminator(
        x_discriminator, z_discriminator, joint_discriminator,
        name='discriminator')

    ali = ConditionalALI(encoder, decoder, discriminator,
                         n_cond=NCLASSES, n_emb=NEMB,
                         weights_init=GAUSSIAN_INIT, biases_init=ZERO_INIT,
                         name='ali')
    ali.push_allocation_config()
    encoder_mapping.layers[-1].use_bias = True
    encoder_mapping.layers[-1].tied_biases = False
    decoder.layers[-2].use_bias = True
    decoder.layers[-2].tied_biases = False
    x_discriminator.layers[0].use_bias = True
    x_discriminator.layers[0].tied_biases = True
    ali.initialize()
    raw_marginals, = next(
        create_celeba_data_streams(500, 500)[0].get_epoch_iterator())
    b_value = get_log_odds(raw_marginals)
    decoder.layers[-2].b.set_value(b_value)

    return ali
Example #4
0
def create_model_brick():
    layers = [
        conv_brick(5, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(4, 2, 64), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(4, 1, 128), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(4, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, 2 * NLAT)]
    encoder_mapping = ConvolutionalSequence(
        layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE,
        use_bias=False, name='encoder_mapping')
    encoder = GaussianConditional(encoder_mapping, name='encoder')

    layers = [
        conv_transpose_brick(4, 1, 256), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(4, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(4, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(4, 2, 32), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(5, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(1, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, NUM_CHANNELS), Logistic()]
    decoder_mapping = ConvolutionalSequence(
        layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False,
        name='decoder_mapping')
    decoder = DeterministicConditional(decoder_mapping, name='decoder')

    layers = [
        conv_brick(5, 1, 32), ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 2, 64), ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 1, 128), ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 2, 256), ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(4, 1, 512), ConvMaxout(num_pieces=NUM_PIECES)]
    x_discriminator = ConvolutionalSequence(
        layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE,
        name='x_discriminator')
    x_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 512), ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(1, 1, 512), ConvMaxout(num_pieces=NUM_PIECES)]
    z_discriminator = ConvolutionalSequence(
        layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False,
        name='z_discriminator')
    z_discriminator.push_allocation_config()

    layers = [
        conv_brick(1, 1, 1024), ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(1, 1, 1024), ConvMaxout(num_pieces=NUM_PIECES),
        conv_brick(1, 1, 1)]
    joint_discriminator = ConvolutionalSequence(
        layers=layers,
        num_channels=(x_discriminator.get_dim('output')[0] +
                      z_discriminator.get_dim('output')[0]),
        image_size=(1, 1),
        name='joint_discriminator')

    discriminator = XZJointDiscriminator(
        x_discriminator, z_discriminator, joint_discriminator,
        name='discriminator')

    ali = ALI(encoder, decoder, discriminator,
              weights_init=GAUSSIAN_INIT, biases_init=ZERO_INIT,
              name='ali')
    ali.push_allocation_config()
    encoder_mapping.layers[-1].use_bias = True
    encoder_mapping.layers[-1].tied_biases = False
    decoder_mapping.layers[-2].use_bias = True
    decoder_mapping.layers[-2].tied_biases = False
    ali.initialize()
    raw_marginals, = next(
        create_cifar10_data_streams(500, 500)[0].get_epoch_iterator())
    b_value = get_log_odds(raw_marginals)
    decoder_mapping.layers[-2].b.set_value(b_value)

    return ali
    out = encoder_mapping_fun(features, test_labels)
    print(out.shape)

    ## Testing Gaussian encoder blocks
    embeddings = embedder.apply(y)
    encoder = GaussianConditional(mapping=encoder_mapping)
    encoder.initialize()
    encoder_fun = function([x, y], encoder.apply(x, embeddings))
    z_hat = encoder_fun(features, test_labels)
    # print(out)
    print(z_hat)

    # Decoder
    z = tensor.tensor4('z')
    layers = [
        conv_transpose_brick(4, 1, 256), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(4, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(4, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(4, 2, 32), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(5, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_transpose_brick(1, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK),
        conv_brick(1, 1, NUM_CHANNELS), Logistic()]

    decoder = Decoder(layers=layers, num_channels=(NLAT + NEMB), image_size=(1, 1),
                      weights_init=WEIGHTS_INIT, biases_init=BIASES_INIT)
    decoder.initialize()
    decoder_fun = function([z, y], decoder.apply(z, embeddings))
    out = decoder_fun(z_hat, test_labels)

    # Discriminator