def decoder(x): print print 'x: ', x d_fc1 = lrelu(fc_layer(x, 256, False, 'd_fc2')) print 'd_fc1: ', d_fc1 d_fc2 = lrelu(fc_layer(d_fc1, 512, False, 'd_fc3')) print 'd_fc2: ', d_fc2 # reshape for use in transpose convolution (deconvolution) # must match conv layers in encoder d_fc2 = tf.reshape(d_fc2, (batch_size, 4, 4, 32)) print 'd_fc2: ', d_fc2 # transpose convolution with a leaky relu activation e_transpose_conv1 = lrelu(conv2d_transpose(d_fc2, 2, 2, 32, 'e_transpose_conv1')) print 'e_transpose_conv1: ', e_transpose_conv1 # transpose convolution with a leaky relu activation e_transpose_conv2 = lrelu(conv2d_transpose(e_transpose_conv1, 2, 2, 64, 'e_transpose_conv2')) print 'e_transpose_conv2: ', e_transpose_conv2 # transpose convolution with a leaky relu activation e_transpose_conv3 = lrelu(conv2d_transpose(e_transpose_conv2, 2, 2, 1, 'e_transpose_conv3')) print 'e_transpose_conv3: ', e_transpose_conv3 # since transpose convs make the resolution go 4->8->16->32 (because stride 2) # we need to crop to original mnist size (28,28) e_transpose_conv3 = e_transpose_conv3[:,:28,:28,:] return e_transpose_conv3
def encoder(x): # convolutional layer with a leaky Relu activation e_conv1 = lrelu(conv2d(x, 2, 2, 32, 'e_conv1')) print print 'conv1: ', e_conv1 # convolutional layer with a leaky Relu activation e_conv2 = lrelu(conv2d(e_conv1, 2, 2, 64, 'e_conv2')) print 'conv2: ', e_conv2 # convolutional layer with a leaky Relu activation e_conv3 = lrelu(conv2d(e_conv2, 2, 2, 32, 'e_conv3')) print 'conv3: ', e_conv3 # fully connected layer with a leaky Relu activation # The 'True' here means that we are flattening the input e_fc1 = lrelu(fc_layer(e_conv2, 512, True, 'e_fc1')) print 'fc1: ', e_fc1 # fully connected layer with a leaky Relu activation # the output from the previous fully connected layer is # already flat, so no need to flatten, hence 'False' e_fc2 = lrelu(fc_layer(e_fc1, 256, False, 'e_fc2')) print 'fc2: ', e_fc2 e_fc3 = lrelu(fc_layer(e_fc2, 128, False, 'e_fc3')) print 'fc3: ', e_fc3 return e_fc3