def generator_2(self, z): """ :param z: random array input dimension (batch_size, z_dim) :return: image (Gz) """ z_, self.h0_w, self.h0_b = linear(z, 64 * 4 * 4 * 8, 'g_h0_lin', with_w=True) z_resize = tf.reshape(z_, [-1, 4, 4, 64 * 8]) # add a-relu z_resize = tf.nn.relu(z_resize) # up_1 = upsampling(z_resize, [self.batch_size, 8, 8], 512, 1024, 2, name='g_up2') up_2 = upsampling(z_resize, [self.batch_size, 8, 8], 256, 512, 2, name='g_up3') up_3 = upsampling(up_2, [self.batch_size, 16, 16], 128, 256, 2, name='g_up4') up_4 = upsampling(up_3, [self.batch_size, 32, 32], 32, 128, 2, name='g_up5') up_5 = upsampling(up_4, [self.batch_size, 64, 64], 1, 32, 2, name='g_up6') # up_6 = upsampling(up_5, [self.batch_size, 128, 128], 16, 32, 2, name='g_up7') # up_7 = upsampling(up_6, [self.batch_size, 256, 256], 1,16 , 2, name='g_up8') return tf.nn.tanh(up_5)
def create_conditional_u_net(input, y, num_classes, keep_prob): # Pass the image to obtain input = tf.nn.lrn(input) # normalisation step for intensity scaling image = tf.concat([input, y], axis=3) print("Generator input image") print_shape(image) conv1_1 = convolution_block(image, [3, 3, 2, 64], [1, 1, 1, 1], 'SAME', keep_prob, 'g_conv1_1', batch_normalisation=True, tanh=False) conv1_2 = convolution_block(conv1_1, [3, 3, 64, 64], [1, 2, 2, 1], 'SAME', keep_prob, name='g_conv1_2', batch_normalisation=True, tanh=False) conv2_1 = convolution_block(conv1_2, [3, 3, 64, 128], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv2_1', batch_normalisation=True, tanh=False) conv2_2 = convolution_block(conv2_1, [3, 3, 128, 128], [1, 2, 2, 1], 'SAME', keep_prob, name='g_conv2_2', batch_normalisation=True, tanh=False) conv3_1 = convolution_block(conv2_2, [3, 3, 128, 256], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv3_1', batch_normalisation=True, tanh=False) conv3_2 = convolution_block(conv3_1, [3, 3, 256, 256], [1, 2, 2, 1], 'SAME', keep_prob, name='g_conv3_2', batch_normalisation=True, tanh=False) conv4_1 = convolution_block(conv3_2, [3, 3, 256, 512], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv4_1', batch_normalisation=True, tanh=False) conv4_2 = convolution_block(conv4_1, [3, 3, 512, 512], [1, 2, 2, 1], 'SAME', keep_prob, name='g_conv4_2', batch_normalisation=True, tanh=False) conv5_1 = convolution_block(conv4_2, [3, 3, 512, 1024], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv5_1', batch_normalisation=True, tanh=False) conv5_2 = convolution_block(conv5_1, [3, 3, 1024, 1024], [1, 2, 2, 1], 'SAME', keep_prob, name='g_conv5_2', batch_normalisation=True, tanh=False) up_6 = upsampling(conv5_2, tf.shape(conv4_2), 512, 1024, 2, name='g_up6') concat_6 = tf.concat([up_6, conv4_2], axis=3) conv6_1 = convolution_block(concat_6, [3, 3, 1024, 512], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv6_1', batch_normalisation=True, tanh=False) conv6_2 = convolution_block(conv6_1, [3, 3, 512, 512], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv6_2', batch_normalisation=True, tanh=False) up_7 = upsampling(conv6_2, tf.shape(conv3_2), 256, 512, 2, name='g_up7') concat_7 = tf.concat([up_7, conv3_2], axis=3) conv7_1 = convolution_block(concat_7, [3, 3, 512, 256], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv7_1', batch_normalisation=True, tanh=False) conv7_2 = convolution_block(conv7_1, [3, 3, 256, 256], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv7_2', batch_normalisation=True, tanh=False) up_8 = upsampling(conv7_2, tf.shape(conv2_2), 128, 256, 2, name='g_up8') concat_8 = tf.concat([up_8, conv2_2], axis=3) conv8_1 = convolution_block(concat_8, [3, 3, 256, 128], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv8_1', batch_normalisation=True, tanh=False) conv8_2 = convolution_block(conv8_1, [3, 3, 128, 128], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv8_2', batch_normalisation=True, tanh=False) up_9 = upsampling(conv8_2, tf.shape(conv1_2), 64, 128, 2, name='g_up9') concat_9 = tf.concat([up_9, conv1_2], axis=3) conv9_1 = convolution_block(concat_9, [3, 3, 128, 64], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv9_1', batch_normalisation=True, tanh=False) conv9_2 = convolution_block(conv9_1, [3, 3, 64, 64], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv9_2', batch_normalisation=True, tanh=False) conv_10 = convolution_block(conv9_2, [1, 1, 64, num_classes], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv10', batch_normalisation=False, tanh=True) print("Completed creating U-NET ") print("Number of classes is:" + str(num_classes)) ##with tf.variable_scope('g_logits'): # logits=tf.nn.softmax(conv_10, name='generator_output') # get logits here return conv_10
def create_encorder(self): #create the encorder side input = tf.nn.lrn(self.X_train) #normalisation step conv1_1 = convolution(input, [3, 3, 1, 64], [1, 1, 1, 1], 'SAME', self.keep_prob, 'conv1_1') conv1_2 = convolution(conv1_1, [3, 3, 64, 64], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv1_2') max1 = max_pooling(conv1_2, [1, 2, 2, 1], [1, 2, 2, 1], name='max1') conv2_1 = convolution(max1, [3, 3, 64, 128], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv2_1') conv2_2 = convolution(conv2_1, [3, 3, 128, 128], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv2_2') max2 = max_pooling(conv2_2, [1, 2, 2, 1], [1, 2, 2, 1], name='max2') conv3_1 = convolution(max2, [3, 3, 128, 256], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv3_1') conv3_2 = convolution(conv3_1, [3, 3, 256, 256], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv3_2') max3 = max_pooling(conv3_2, [1, 2, 2, 1], [1, 2, 2, 1], name='max3') conv4_1 = convolution(max3, [3, 3, 256, 512], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv4_1') conv4_2 = convolution(conv4_1, [3, 3, 512, 512], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv4_2') max4 = max_pooling(conv4_2, [1, 2, 2, 1], [1, 2, 2, 1], name='max4') conv5_1 = convolution(max4, [3, 3, 512, 1024], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv5_1') conv5_2 = convolution(conv5_1, [3, 3, 1024, 1024], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv5_2') up_6 = upsampling(conv5_2, tf.shape(conv4_2), 512, 1024, 2, name='up6') concat_6 = tf.concat([up_6, conv4_2], axis=3) conv6_1 = convolution(concat_6, [3, 3, 1024, 512], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv6_1') conv6_2 = convolution(conv6_1, [3, 3, 512, 512], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv6_2') up_7 = upsampling(conv6_2, tf.shape(conv3_2), 256, 512, 2, name='up7') concat_7 = tf.concat([up_7, conv3_2], axis=3) conv7_1 = convolution(concat_7, [3, 3, 512, 256], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv7_1') conv7_2 = convolution(conv7_1, [3, 3, 256, 256], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv7_2') up_8 = upsampling(conv7_2, tf.shape(conv2_2), 128, 256, 2, name='up8') concat_8 = tf.concat([up_8, conv2_2], axis=3) conv8_1 = convolution(concat_8, [3, 3, 256, 128], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv8_1') conv8_2 = convolution(conv8_1, [3, 3, 128, 128], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv8_2') up_9 = upsampling(conv8_2, tf.shape(conv1_2), 64, 128, 2, name='up9') concat_9 = tf.concat([up_9, conv1_2], axis=3) conv9_1 = convolution(concat_9, [3, 3, 128, 64], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv9_1') conv9_2 = convolution(conv9_1, [3, 3, 64, 64], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv9_2') conv_10 = convolution(conv9_2, [1, 1, 64, 1], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv10') print("Down-sampling") self.print_shape(conv1_2) self.print_shape(max1) self.print_shape(conv2_1) self.print_shape(max2) self.print_shape(conv3_2) self.print_shape(max3) self.print_shape(conv4_1) self.print_shape(max4) self.print_shape(conv5_2) print("Upsampling") self.print_shape(concat_6) self.print_shape(conv6_2) self.print_shape(concat_7) self.print_shape(conv7_2) self.print_shape(concat_8) self.print_shape(conv8_2) self.print_shape(concat_9) self.print_shape(conv9_2) self.print_shape(conv_10) logits = tf.nn.softmax(conv_10) # get logits here return logits
def create_u_net_small(input, num_classes, keep_prob): # u net with 5 block layers # Pass the image to obtain # input=tf.nn.lrn(input) #normalisation step for intensity scaling conv1_1 = convolution_block(input, [3, 3, 1, 64], [1, 1, 1, 1], 'SAME', keep_prob, 'g_conv1_1', batch_normalisation=True, tanh=False) conv1_2 = convolution_block(conv1_1, [3, 3, 64, 64], [1, 2, 2, 1], 'SAME', keep_prob, name='g_conv1_2', batch_normalisation=True, tanh=False) conv2_1 = convolution_block(conv1_2, [3, 3, 64, 128], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv2_1', batch_normalisation=True, tanh=False) conv2_2 = convolution_block(conv2_1, [3, 3, 128, 128], [1, 2, 2, 1], 'SAME', keep_prob, name='g_conv2_2', batch_normalisation=True, tanh=False) conv3_1 = convolution_block(conv2_2, [3, 3, 128, 256], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv3_1', batch_normalisation=True, tanh=False) conv3_2 = convolution_block(conv3_1, [3, 3, 256, 256], [1, 2, 2, 1], 'SAME', keep_prob, name='g_conv3_2', batch_normalisation=True, tanh=False) """ conv4_1=convolution_block(conv3_2, [3,3,256,512], [1,1,1,1],'SAME', keep_prob, name='g_conv4_1') conv4_2=convolution_block(conv4_1,[3,3,512,512], [1,2,2,1],'SAME', keep_prob, name='g_conv4_2') conv5_1=convolution_block(conv4_2, [3,3,512,1024], [1,1,1,1],'SAME', keep_prob, name='g_conv5_1') conv5_2=convolution_block(conv5_1,[3,3,1024,1024], [1,2,2,1],'SAME', keep_prob, name='g_conv5_2') up_6 = upsampling(conv5_2, tf.shape(conv4_2), 512, 1024,2, name='g_up6') concat_6 = tf.concat([up_6, conv4_2], axis=3) conv6_1 = convolution_block(concat_6, [3,3,1024,512], [1,1,1,1], 'SAME', keep_prob, name='g_conv6_1') conv6_2 = convolution_block(conv6_1, [3,3,512,512], [1,1,1,1], 'SAME', keep_prob, name='g_conv6_2') """ up_7 = upsampling(conv3_2, tf.shape(conv2_2), 128, 256, 2, name='g_up7') concat_7 = tf.concat([up_7, conv2_2], axis=3) conv7_1 = convolution_block(concat_7, [3, 3, 256, 128], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv7_1', batch_normalisation=True, tanh=False) conv7_2 = convolution_block(conv7_1, [3, 3, 128, 128], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv7_2', batch_normalisation=True, tanh=False) up_8 = upsampling(conv7_2, tf.shape(conv1_2), 64, 128, 2, name='g_up8') concat_8 = tf.concat([up_8, conv1_2], axis=3) conv8_1 = convolution_block(concat_8, [3, 3, 128, 64], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv8_1', batch_normalisation=True, tanh=False) conv8_2 = convolution_block(conv8_1, [3, 3, 64, 64], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv8_2', batch_normalisation=True, tanh=False) # up_9 = upsampling(conv8_2, tf.shape(conv1_2), 32, 64, 2, name='g_up9') # concat_9 = tf.concat([up_9, conv1_2], axis=3) # conv9_1 = convolution_block(concat_9, [3,3,64,32], [1,1,1,1], 'SAME', keep_prob, name='g_conv9_1', batch_normalisation=True) # conv9_2 = convolution_block(conv9_1, [3,3,32, 32], [1,1,1,1], 'SAME', keep_prob, name='g_conv9_2', batch_normalisation=True) conv_10 = convolution_block(conv8_2, [1, 1, 64, num_classes], [1, 1, 1, 1], 'SAME', keep_prob, name='g_conv10', batch_normalisation=False, tanh=True) print("Completed creating U-NET (small) ") ##with tf.variable_scope('g_logits'): ## logits=tf.nn.softmax(conv_10, name='g_output') # get logits here return conv_10
def create_decoder(self, input_tensor): # create the decorder side conv11_1 = convolution(input_tensor, [3, 3, 1, 64], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv11_1') max11 = max_pooling(conv11_1, [1, 2, 2, 1], [1, 2, 2, 1], name='max11') conv12_1 = convolution(max11, [3, 3, 64, 128], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv12_1') conv12_2 = convolution(conv12_1, [3, 3, 128, 128], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv12_2') max12 = max_pooling(conv12_2, [1, 2, 2, 1], [1, 2, 2, 1], name='max12') conv13_1 = convolution(max12, [3, 3, 128, 256], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv13_1') conv13_2 = convolution(conv13_1, [3, 3, 256, 256], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv13_2') max13 = max_pooling(conv13_2, [1, 2, 2, 1], [1, 2, 2, 1], name='max13') conv14_1 = convolution(max13, [3, 3, 256, 512], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv14_1') conv14_2 = convolution(conv14_1, [3, 3, 512, 512], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv14_2') max14 = max_pooling(conv14_2, [1, 2, 2, 1], [1, 2, 2, 1], name='max14') conv15_1 = convolution(max14, [3, 3, 512, 1024], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv15_1') conv15_2 = convolution(conv15_1, [3, 3, 1024, 1024], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv15_2') up_16 = upsampling(conv15_2, tf.shape(conv14_2), 512, 1024, 2, name='up16') concat_16 = tf.concat([up_16, conv14_2], axis=3) conv16_1 = convolution(concat_16, [3, 3, 1024, 512], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv16_1') conv16_2 = convolution(conv16_1, [3, 3, 512, 512], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv16_2') up_17 = upsampling(conv16_2, tf.shape(conv13_2), 256, 512, 2, name='up17') concat_17 = tf.concat([up_17, conv13_2], axis=3) conv17_1 = convolution(concat_17, [3, 3, 512, 256], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv17_1') conv17_2 = convolution(conv17_1, [3, 3, 256, 256], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv17_2') up_18 = upsampling(conv17_2, tf.shape(conv12_2), 128, 256, 2, name='up18') concat_18 = tf.concat([up_18, conv12_2], axis=3) conv18_1 = convolution(concat_18, [3, 3, 256, 128], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv18_1') conv18_2 = convolution(conv18_1, [3, 3, 128, 128], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv18_2') up_19 = upsampling(conv18_2, tf.shape(conv11_1), 64, 128, 2, name='up19') concat_19 = tf.concat([up_19, conv11_1], axis=3) conv19_1 = convolution(concat_19, [3, 3, 128, 64], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv19_1') conv19_2 = convolution(conv19_1, [3, 3, 64, 64], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv19_2') conv20 = convolution(conv19_2, [1, 1, 64, self.num_classes], [1, 1, 1, 1], 'SAME', self.keep_prob, name='conv20') logits = tf.nn.softmax(conv20) return logits