def model_simple_upsampling__reshape(img_shape, class_n=None): from keras.layers import Input, Dense, Convolution3D, MaxPooling3D, UpSampling3D, Reshape, Flatten from keras.models import Sequential, Model from keras.layers.core import Activation from aitom.classify.deep.unsupervised.autoencoder.seg_util import conv_block NUM_CHANNELS = 1 input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS) # use relu activation for hidden layer to guarantee non-negative outputs are passed # to the max pooling layer. In such case, as long as the output layer is linear activation, # the network can still accomodate negative image intendities, # just matter of shift back using the bias term input_img = Input(shape=input_shape[1:]) x = input_img x = conv_block(x, 32, 3, 3, 3) x = MaxPooling3D((2, 2, 2), border_mode='same')(x) x = conv_block(x, 32, 3, 3, 3) x = MaxPooling3D((2, 2, 2), border_mode='same')(x) x = conv_block(x, 32, 3, 3, 3) x = UpSampling3D((2, 2, 2))(x) x = conv_block(x, 32, 3, 3, 3) x = UpSampling3D((2, 2, 2))(x) x = conv_block(x, 32, 3, 3, 3) x = Convolution3D(class_n, 1, 1, 1, border_mode='same')(x) x = Reshape((N.prod(img_shape), class_n))(x) x = Activation('softmax')(x) model = Model(input=input_img, output=x) print('model layers:') for l in model.layers: print(l.output_shape, l.name) return model
def encoder_simple_conv(img_shape, encoding_dim=32, NUM_CHANNELS=1): # workaround for Dropout to work #import tensorflow as tf #tf.python.control_flow_ops = tf # from seg_util import conv_block from aitom.classify.deep.unsupervised.autoencoder.seg_util import conv_block from keras.layers import Input, Dense, Convolution3D, MaxPooling3D, UpSampling3D, Reshape, Flatten from keras.models import Sequential, Model from keras import regularizers input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS) # use relu activation for hidden layer to guarantee non-negative outputs are passed to the max pooling layer. In such case, as long as the output layer is linear activation, the network can still accomodate negative image intendities, just matter of shift back using the bias term input_img = Input(shape=input_shape[1:]) x = input_img x = conv_block(x, 32, 3, 3, 3) x = MaxPooling3D((2, 2, 2), border_mode='same')(x) x = conv_block(x, 32, 3, 3, 3) x = MaxPooling3D((2, 2, 2), border_mode='same')(x) encoder_conv_shape = [_.value for _ in x.get_shape()] # x.get_shape() returns a list of tensorflow.python.framework.tensor_shape.Dimension objects x = Flatten()(x) if False: x = Dense(encoding_dim, activation='relu')(x) else: x = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(x) # with sparsity encoded = x encoder = Model(input=input_img, output=encoded) print ('encoder', 'input shape', encoder.input_shape, 'output shape', encoder.output_shape) input_img_decoder = Input(shape=encoder.output_shape[1:]) x = input_img_decoder x = Dense(N.prod(encoder_conv_shape[1:]), activation='relu')(x) x = Reshape(encoder_conv_shape[1:])(x) x = UpSampling3D((2, 2, 2))(x) x = conv_block(x, 32, 3, 3, 3) x = UpSampling3D((2, 2, 2))(x) x = conv_block(x, 32, 3, 3, 3) x = Convolution3D(1, 3, 3, 3, activation='linear', border_mode='same')(x) # keep the output layer linear activation, so that the image intensity can be negative decoded = x decoder = Model(input=input_img_decoder, output=decoded) autoencoder = Sequential() if True: # model with expanded layers, and hopefully allow parallel training for l in encoder.layers: autoencoder.add(l) for l in decoder.layers: autoencoder.add(l) else: # build encoder according to ~/src/tmp/proj/gan/dcgan.py autoencoder.add(encoder) autoencoder.add(decoder) print('autoencoder layers:') for l in autoencoder.layers: print (l.output_shape) return {'autoencoder':autoencoder, 'encoder':encoder, 'decoder':decoder}