def create_model_brick(): layers = [ conv_brick(2, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(1, 1, 2 * NLAT) ] encoder_mapping = ConvolutionalSequence(layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE, use_bias=False, name='encoder_mapping') encoder = GaussianConditional(encoder_mapping, name='encoder') layers = [ conv_transpose_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(2, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(1, 1, NUM_CHANNELS), Logistic() ] decoder_mapping = ConvolutionalSequence(layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False, name='decoder_mapping') decoder = DeterministicConditional(decoder_mapping, name='decoder') layers = [ conv_brick(2, 1, 64), LeakyRectifier(leak=LEAK), conv_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK) ] x_discriminator = ConvolutionalSequence(layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE, use_bias=False, name='x_discriminator') x_discriminator.push_allocation_config() layers = [ conv_brick(1, 1, 1024), LeakyRectifier(leak=LEAK), conv_brick(1, 1, 1024), LeakyRectifier(leak=LEAK) ] z_discriminator = ConvolutionalSequence(layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False, name='z_discriminator') z_discriminator.push_allocation_config() layers = [ conv_brick(1, 1, 2048), LeakyRectifier(leak=LEAK), conv_brick(1, 1, 2048), LeakyRectifier(leak=LEAK), conv_brick(1, 1, 1) ] joint_discriminator = ConvolutionalSequence( layers=layers, num_channels=(x_discriminator.get_dim('output')[0] + z_discriminator.get_dim('output')[0]), image_size=(1, 1), name='joint_discriminator') discriminator = XZJointDiscriminator(x_discriminator, z_discriminator, joint_discriminator, name='discriminator') ali = ALI(encoder, decoder, discriminator, weights_init=GAUSSIAN_INIT, biases_init=ZERO_INIT, name='ali') ali.push_allocation_config() encoder_mapping.layers[-1].use_bias = True encoder_mapping.layers[-1].tied_biases = False decoder_mapping.layers[-2].use_bias = True decoder_mapping.layers[-2].tied_biases = False x_discriminator.layers[0].use_bias = True x_discriminator.layers[0].tied_biases = True ali.initialize() raw_marginals, = next( create_celeba_data_streams(500, 500)[0].get_epoch_iterator()) b_value = get_log_odds(raw_marginals) decoder_mapping.layers[-2].b.set_value(b_value) return ali
test_labels = np.zeros(shape=(5, 10)) idx = npr.randint(0, 9, size=5) for n, id in enumerate(idx): test_labels[n, id] = 1 embeddings = embedder_test(test_labels) print(embeddings) print(embeddings.shape) # Generate synthetic 4D tensor features = npr.random(size=(5, 3, 32, 32)) # Testing Encoder layers = [ # 32 X 32 X 3 conv_brick(5, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK), # 28 X 28 X 32 conv_brick(4, 2, 64), bn_brick(), LeakyRectifier(leak=LEAK), # 13 X 13 X 64 conv_brick(4, 1, 128), bn_brick(), LeakyRectifier(leak=LEAK), # 10 X 10 X 128 conv_brick(4, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), # 4 X 4 X 256 conv_brick(4, 1, 512),
def create_model_brick(): # Encoder enc_layers = [ conv_brick(2, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(1, 1, 2 * NLAT)] encoder_mapping = EncoderMapping(layers=enc_layers, num_channels=NUM_CHANNELS, n_emb=NEMB, image_size=IMAGE_SIZE, weights_init=GAUSSIAN_INIT, biases_init=ZERO_INIT, use_bias=False) encoder = GaussianConditional(encoder_mapping, name='encoder') # Decoder dec_layers = [ conv_transpose_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(2, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(1, 1, NUM_CHANNELS), Logistic()] decoder = Decoder( layers=dec_layers, num_channels=NLAT + NEMB, image_size=(1, 1), use_bias=False, name='decoder_mapping') # Discriminator layers = [ conv_brick(2, 1, 64), LeakyRectifier(leak=LEAK), conv_brick(7, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(5, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(7, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK)] x_discriminator = ConvolutionalSequence( layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE, use_bias=False, name='x_discriminator') x_discriminator.push_allocation_config() layers = [ conv_brick(1, 1, 1024), LeakyRectifier(leak=LEAK), conv_brick(1, 1, 1024), LeakyRectifier(leak=LEAK)] z_discriminator = ConvolutionalSequence( layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False, name='z_discriminator') z_discriminator.push_allocation_config() layers = [ conv_brick(1, 1, 2048), LeakyRectifier(leak=LEAK), conv_brick(1, 1, 2048), LeakyRectifier(leak=LEAK), conv_brick(1, 1, 1)] joint_discriminator = ConvolutionalSequence( layers=layers, num_channels=(x_discriminator.get_dim('output')[0] + z_discriminator.get_dim('output')[0] + NEMB), image_size=(1, 1), name='joint_discriminator') discriminator = XZYJointDiscriminator( x_discriminator, z_discriminator, joint_discriminator, name='discriminator') ali = ConditionalALI(encoder, decoder, discriminator, n_cond=NCLASSES, n_emb=NEMB, weights_init=GAUSSIAN_INIT, biases_init=ZERO_INIT, name='ali') ali.push_allocation_config() encoder_mapping.layers[-1].use_bias = True encoder_mapping.layers[-1].tied_biases = False decoder.layers[-2].use_bias = True decoder.layers[-2].tied_biases = False x_discriminator.layers[0].use_bias = True x_discriminator.layers[0].tied_biases = True ali.initialize() raw_marginals, = next( create_celeba_data_streams(500, 500)[0].get_epoch_iterator()) b_value = get_log_odds(raw_marginals) decoder.layers[-2].b.set_value(b_value) return ali
def create_model_brick(): layers = [ conv_brick(5, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(4, 2, 64), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(4, 1, 128), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(4, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(1, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(1, 1, 2 * NLAT)] encoder_mapping = ConvolutionalSequence( layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE, use_bias=False, name='encoder_mapping') encoder = GaussianConditional(encoder_mapping, name='encoder') layers = [ conv_transpose_brick(4, 1, 256), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(4, 2, 128), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(4, 1, 64), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(4, 2, 32), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(5, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK), conv_transpose_brick(1, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK), conv_brick(1, 1, NUM_CHANNELS), Logistic()] decoder_mapping = ConvolutionalSequence( layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False, name='decoder_mapping') decoder = DeterministicConditional(decoder_mapping, name='decoder') layers = [ conv_brick(5, 1, 32), ConvMaxout(num_pieces=NUM_PIECES), conv_brick(4, 2, 64), ConvMaxout(num_pieces=NUM_PIECES), conv_brick(4, 1, 128), ConvMaxout(num_pieces=NUM_PIECES), conv_brick(4, 2, 256), ConvMaxout(num_pieces=NUM_PIECES), conv_brick(4, 1, 512), ConvMaxout(num_pieces=NUM_PIECES)] x_discriminator = ConvolutionalSequence( layers=layers, num_channels=NUM_CHANNELS, image_size=IMAGE_SIZE, name='x_discriminator') x_discriminator.push_allocation_config() layers = [ conv_brick(1, 1, 512), ConvMaxout(num_pieces=NUM_PIECES), conv_brick(1, 1, 512), ConvMaxout(num_pieces=NUM_PIECES)] z_discriminator = ConvolutionalSequence( layers=layers, num_channels=NLAT, image_size=(1, 1), use_bias=False, name='z_discriminator') z_discriminator.push_allocation_config() layers = [ conv_brick(1, 1, 1024), ConvMaxout(num_pieces=NUM_PIECES), conv_brick(1, 1, 1024), ConvMaxout(num_pieces=NUM_PIECES), conv_brick(1, 1, 1)] joint_discriminator = ConvolutionalSequence( layers=layers, num_channels=(x_discriminator.get_dim('output')[0] + z_discriminator.get_dim('output')[0]), image_size=(1, 1), name='joint_discriminator') discriminator = XZJointDiscriminator( x_discriminator, z_discriminator, joint_discriminator, name='discriminator') ali = ALI(encoder, decoder, discriminator, weights_init=GAUSSIAN_INIT, biases_init=ZERO_INIT, name='ali') ali.push_allocation_config() encoder_mapping.layers[-1].use_bias = True encoder_mapping.layers[-1].tied_biases = False decoder_mapping.layers[-2].use_bias = True decoder_mapping.layers[-2].tied_biases = False ali.initialize() raw_marginals, = next( create_cifar10_data_streams(500, 500)[0].get_epoch_iterator()) b_value = get_log_odds(raw_marginals) decoder_mapping.layers[-2].b.set_value(b_value) return ali
test_labels = np.zeros(shape=(5, 10)) idx = npr.randint(0, 9, size=5) for n, id in enumerate(idx): test_labels[n, id] = 1 embeddings = embedder_test(test_labels) print(embeddings) print(embeddings.shape) # Generate synthetic 4D tensor features = npr.random(size=(5, 3, 32, 32)) # Testing Encoder layers = [ # 32 X 32 X 3 conv_brick(5, 1, 32), bn_brick(), LeakyRectifier(leak=LEAK), # 28 X 28 X 32 conv_brick(4, 2, 64), bn_brick(), LeakyRectifier(leak=LEAK), # 13 X 13 X 64 conv_brick(4, 1, 128), bn_brick(), LeakyRectifier(leak=LEAK), # 10 X 10 X 128 conv_brick(4, 2, 256), bn_brick(), LeakyRectifier(leak=LEAK), # 4 X 4 X 256 conv_brick(4, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK), # 1 X 1 X 512 conv_brick(1, 1, 512), bn_brick(), LeakyRectifier(leak=LEAK), # 1 X 1 X 512 conv_brick(1, 1, 2 * NLAT) # 1 X 1 X 2 * NLAT ]