def self(x_train, y_train, x_test, y_test): int_put = input_data(shape=[None, 224, 5, 5, 1], ) conv1 = conv_3d( int_put, 24, [24, 3, 3], padding='VALID', strides=[1, 1, 1, 1, 1], activation='prelu', ) print('conv1', conv1.get_shape().as_list()) batch_norm = batch_normalization(conv1) conv2 = conv_3d( batch_norm, 12, [24, 3, 3], padding='VALID', strides=[1, 1, 1, 1, 1], activation='prelu', ) print('conv2', conv2.get_shape().as_list()) batch_norm_con = batch_normalization(conv2) decon2 = conv_3d_transpose(batch_norm_con, 24, [24, 3, 3], padding='VALID', output_shape=[201, 3, 3, 24]) batch_norm = batch_normalization(decon2) print('a') decon2 = conv_3d_transpose(batch_norm, 1, [24, 3, 3], padding='VALID', output_shape=[224, 5, 5, 1]) batch_norm = batch_normalization(decon2) network = regression(batch_norm, optimizer='Adagrad', loss='mean_square', learning_rate=0.01, metric='R2') feature_model = tflearn.DNN(network) feature_model.load('my_model_self.tflearn') x_feature = feature_model.predict(x_train) save_hdf5(x_feature) print('asd')
def conv_transpose_layer(input, n_filters, stride, output_shape): return conv.conv_3d_transpose(input, n_filters, 3, output_shape=output_shape, strides=stride, padding='same', activation='elu', bias_init='zeros', scope=None, name='Conv3D')
def up_block(input_tensor, concat, reuse, scope, init): n_channels = int(input_tensor.get_shape()[-1]) x = conv_3d_transpose(input_tensor, n_channels, filter_size=2, strides=2, output_shape=[concat.get_shape().as_list()[1]] * 3, activation='linear', padding='same', weights_init=init, scope=scope + "_1", reuse=reuse) # x = tflearn.layers.normalization.batch_normalization(x, reuse=reuse, scope=scope + "bn") x_out = tflearn.activation(x, "prelu") return x_out
def arch_fusionnet_translator_3d_iso_tflearn(img, feats=[None, None, None], last_dim=1, nl=INLReLU3D, nb_filters=32): # Add decorator to tflearn source code # sudo nano /usr/local/lib/python2.7/dist-packages/tflearn/layers/conv.py # @tf.contrib.framework.add_arg_scope with tf.contrib.framework.arg_scope([conv_3d], filter_size=4, strides=[1, 2, 2, 2, 1], activation='leaky_relu'): with tf.contrib.framework.arg_scope([conv_3d_transpose], filter_size=4, strides=[1, 2, 2, 2, 1], activation='leaky_relu'): shape = img.get_shape().as_list() dimb, dimz, dimy, dimx, dimc = shape e1a = conv_3d(incoming=img, name="e1a", nb_filter=nb_filters * 1, bias=False) r1a = tf_bottleneck(e1a, name="r1a", nb_filter=nb_filters * 1) r1a = tf.nn.dropout(r1a, keep_prob=0.5) e2a = conv_3d(incoming=r1a, name="e2a", nb_filter=nb_filters * 1, bias=False) r2a = tf_bottleneck(e2a, name="r2a", nb_filter=nb_filters * 1) r2a = tf.nn.dropout(r2a, keep_prob=0.5) e3a = conv_3d(incoming=r2a, name="e3a", nb_filter=nb_filters * 2, bias=False) r3a = tf_bottleneck(e3a, name="r3a", nb_filter=nb_filters * 2) r3a = tf.nn.dropout(r3a, keep_prob=0.5) e4a = conv_3d(incoming=r3a, name="e4a", nb_filter=nb_filters * 2, bias=False) r4a = tf_bottleneck(e4a, name="r4a", nb_filter=nb_filters * 2) r4a = tf.nn.dropout(r4a, keep_prob=0.5) e5a = conv_3d(incoming=r4a, name="e5a", nb_filter=nb_filters * 4, bias=False) r5a = tf_bottleneck(e5a, name="r5a", nb_filter=nb_filters * 4) r5a = tf.nn.dropout(r5a, keep_prob=0.5) # e6a = conv_3d(incoming=r5a, name="e6a", nb_filter=nb_filters*4, bias=False) # r6a = tf_bottleneck(e6a, name="r6a", nb_filter=nb_filters*4) # e7a = conv_3d(incoming=r6a, name="e7a", nb_filter=nb_filters*8) , bias=False # r7a = tf_bottleneck(e7a, name="r7a", nb_filter=nb_filters*8) # r7a = dropout(incoming=r7a, keep_prob=0.5) print "In1 :", img.get_shape().as_list() print "E1a :", e1a.get_shape().as_list() print "R1a :", r1a.get_shape().as_list() print "E2a :", e2a.get_shape().as_list() print "R2a :", r2a.get_shape().as_list() print "E3a :", e3a.get_shape().as_list() print "R3a :", r3a.get_shape().as_list() print "E4a :", e4a.get_shape().as_list() print "R4a :", r4a.get_shape().as_list() print "E5a :", e5a.get_shape().as_list() print "R5a :", r5a.get_shape().as_list() r5b = tf_bottleneck(r5a, name="r5b", nb_filter=nb_filters * 4) d4b = conv_3d_transpose(incoming=r5b, name="d4b", nb_filter=nb_filters * 2, output_shape=[ -(-dimz // (2**4)), -(-dimy // (2**4)), -(-dimx / (2**4)) ], bias=False) a4b = tf.add(d4b, r4a, name="a4b") r4b = tf_bottleneck(a4b, name="r4b", nb_filter=nb_filters * 2) d3b = conv_3d_transpose(incoming=r4b, name="d3b", nb_filter=nb_filters * 2, output_shape=[ -(-dimz // (2**3)), -(-dimy // (2**3)), -(-dimx / (2**3)) ], bias=False) a3b = tf.add(d3b, r3a, name="a3b") r3b = tf_bottleneck(a3b, name="r3b", nb_filter=nb_filters * 2) d2b = conv_3d_transpose(incoming=r3b, name="d2b", nb_filter=nb_filters * 1, output_shape=[ -(-dimz // (2**2)), -(-dimy // (2**2)), -(-dimx / (2**2)) ], bias=False) a2b = tf.add(d2b, r2a, name="a2b") r2b = tf_bottleneck(a2b, name="r2b", nb_filter=nb_filters * 1) d1b = conv_3d_transpose(incoming=r2b, name="d1b", nb_filter=nb_filters * 1, output_shape=[ -(-dimz // (2**1)), -(-dimy // (2**1)), -(-dimx / (2**1)) ], bias=False) a1b = tf.add(d1b, r1a, name="a1b") out = conv_3d_transpose(incoming=a1b, name="out", nb_filter=last_dim, activation='tanh', output_shape=[ -(-dimz // (2**0)), -(-dimy // (2**0)), -(-dimx / (2**0)) ]) # print "R7b :", r7b.get_shape().as_list() # print "D6b :", d6b.get_shape().as_list() # print "A6b :", a6b.get_shape().as_list() # print "R6b :", r6b.get_shape().as_list() # print "D5b :", d5b.get_shape().as_list() # print "A5b :", a5b.get_shape().as_list() print "R5b :", r5b.get_shape().as_list() print "D4b :", d4b.get_shape().as_list() print "A4b :", a4b.get_shape().as_list() print "R4b :", r4b.get_shape().as_list() print "D3b :", d3b.get_shape().as_list() print "A3b :", a3b.get_shape().as_list() print "R3b :", r3b.get_shape().as_list() print "D2b :", d2b.get_shape().as_list() print "A2b :", a2b.get_shape().as_list() print "R2b :", r2b.get_shape().as_list() print "D1b :", d1b.get_shape().as_list() print "A1b :", a1b.get_shape().as_list() print "Out :", out.get_shape().as_list() return out
def train(X=None, gpu_id=0, sparsity=False, latent=64, num_filters=32, filter_size=5, sparsity_level=DEFAULT_SPARSITY_LEVEL, sparsity_weight=DEFAULT_SPARSITY_WEIGHT, epochs=10, conv=False, checkpoint=None, is_training=True): assert checkpoint is not None or X is not None,\ 'Either data to train on or model to restore is required.' print(' * [INFO] Using GPU %s' % gpu_id) print(' * [INFO]', 'Using' if sparsity else 'Not using', 'sparsity') print(' * [INFO] Latent dimensions: %d' % latent) print(' * [INFO]', 'Using' if conv else 'Not using', 'convolutional layers.') with tf.device(None if gpu_id is None else '/gpu:%s' % gpu_id): # Building the encoder if conv: encoder = tflearn.input_data(shape=[None, 30, 30, 30, 1]) encoder = conv_3d(encoder, num_filters, filter_size, activation=tf.nn.sigmoid) encoder = tflearn.fully_connected(encoder, latent, activation=tf.nn.sigmoid) else: encoder = tflearn.input_data(shape=[None, 27000]) encoder = tflearn.fully_connected(encoder, 256, activation=tf.nn.relu) encoder = tflearn.fully_connected(encoder, 64, activation=tf.nn.relu) encoder = tflearn.fully_connected(encoder, latent, activation=tf.nn.relu) if sparsity: avg_activations = tf.reduce_mean(encoder, axis=1) div = tf.reduce_mean(kl_divergence(avg_activations, sparsity_level)) # Building the decoder if conv: decoder = tflearn.fully_connected(encoder, (30**3) * num_filters, activation=tf.nn.sigmoid) decoder = tflearn.reshape(decoder, [-1, 30, 30, 30, num_filters]) decoder = conv_3d_transpose(decoder, 1, filter_size, [30, 30, 30], activation=tf.nn.sigmoid) else: decoder = tflearn.fully_connected(encoder, 64, activation=tf.nn.relu) decoder = tflearn.fully_connected(decoder, 256, activation=tf.nn.relu) decoder = tflearn.fully_connected(decoder, 27000, activation=tf.nn.relu) def sparsity_loss(y_pred, y_true): return tf.reduce_mean(tf.square(y_pred - y_true)) + \ sparsity_weight * div # Regression, with mean square error net = tflearn.regression(decoder, optimizer='adam', learning_rate=1e-4, loss=sparsity_loss if sparsity else 'mean_square', metric=None) # Training the auto encoder model = tflearn.DNN(net, tensorboard_verbose=0) encoding_model = tflearn.DNN(encoder, session=model.session) saver = tf.train.Saver() checkpoint_path = CKPT_FORMAT.format(id=checkpoint or ID_) if is_training: model.fit(X, X, n_epoch=epochs, run_id="auto_encoder", batch_size=256) saver.save(encoding_model.session, checkpoint_path) else: saver.restore(encoding_model.models, checkpoint_path) return {'model': model, 'encoding_model': encoding_model}
def generator_fusionnet(images, name='generator'): dimx = DIMX dimy = DIMY dimz = DIMZ with tf.variable_scope(name): # return images e1 = conv_3d(incoming=images, nb_filter=NB_FILTERS*1, filter_size=4, strides=[1, 1, 1, 1, 1], # DIMZ/1, DIMY/2, DIMX/2, regularizer='L1', activation='elu') e1 = batch_normalization(incoming=e1) ### e2 = conv_3d(incoming=e1, nb_filter=NB_FILTERS*1, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/2, DIMY/4, DIMX/4, regularizer='L1', activation='elu') e2 = batch_normalization(incoming=e2) ### e3 = conv_3d(incoming=e2, nb_filter=NB_FILTERS*2, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/4, DIMY/8, DIMX/8, regularizer='L1', activation='elu') e3 = batch_normalization(incoming=e3) ### e4 = conv_3d(incoming=e3, nb_filter=NB_FILTERS*2, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/8, DIMY/16, DIMX/16, regularizer='L1', activation='elu') e4 = batch_normalization(incoming=e4) ### e5 = conv_3d(incoming=e4, nb_filter=NB_FILTERS*4, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/16, DIMY/32, DIMX/32, regularizer='L1', activation='elu') e5 = batch_normalization(incoming=e5) ### e6 = conv_3d(incoming=e5, nb_filter=NB_FILTERS*4, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/32, DIMY/64, DIMX/64, regularizer='L1', activation='elu') e6 = batch_normalization(incoming=e6) ### e7 = conv_3d(incoming=e6, nb_filter=NB_FILTERS*8, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/64, DIMY/128, DIMX/128, regularizer='L1', activation='elu') e7 = batch_normalization(incoming=e7) ### Middle e8 = conv_3d(incoming=e7, nb_filter=NB_FILTERS*8, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/128, DIMY/256, DIMX/256, regularizer='L1', activation='elu') # print "Dim8: ", dimz, dimy, dimx dimz, dimy, dimx = dimz/2, dimy/2, dimx/2 e8 = batch_normalization(incoming=e8) ################### Decoder # print "Dim D7a: ", dimz, dimy, dimx d7 = conv_3d_transpose(incoming=e8, nb_filter=NB_FILTERS*8, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/64, DIMY/128, DIMX/128, regularizer='L1', activation='elu', output_shape=[2, 4, 4]) d7 = batch_normalization(incoming=d7) d7 = dropout(incoming=d7, keep_prob=0.5) d7 = merge(tensors_list=[d7, e7], mode='elemwise_sum') # d7 = d7+e7 ### d6 = conv_3d_transpose(incoming=d7, nb_filter=NB_FILTERS*4, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/32, DIMY/64, DIMX/64, regularizer='L1', activation='elu', output_shape=[4, 8, 8]) d6 = batch_normalization(incoming=d6) d6 = dropout(incoming=d6, keep_prob=0.5) d6 = merge(tensors_list=[d6, e6], mode='elemwise_sum') # d6 = d6+e6 ### d5 = conv_3d_transpose(incoming=d6, nb_filter=NB_FILTERS*4, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/16, DIMY/32, DIMX/32, regularizer='L1', activation='elu', output_shape=[8, 16, 16]) d5 = batch_normalization(incoming=d5) d5 = dropout(incoming=d5, keep_prob=0.5) d5 = merge(tensors_list=[d5, e5], mode='elemwise_sum') # d5 = d5+e5 ### d4 = conv_3d_transpose(incoming=d5, nb_filter=NB_FILTERS*2, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/8, DIMY/16, DIMX/16, regularizer='L1', activation='elu', output_shape=[16, 32, 32]) d4 = batch_normalization(incoming=d4) d4 = merge(tensors_list=[d4, e4], mode='elemwise_sum') # d4 = d4+e4 ### d3 = conv_3d_transpose(incoming=d4, nb_filter=NB_FILTERS*2, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/4, DIMY/8, DIMX/8, regularizer='L1', activation='elu', output_shape=[32, 64, 64]) d3 = batch_normalization(incoming=d3) d3 = merge(tensors_list=[d3, e3], mode='elemwise_sum') # d3 = d3+e3 ### d2 = conv_3d_transpose(incoming=d3, nb_filter=NB_FILTERS*1, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/2, DIMY/4, DIMX/4, regularizer='L1', activation='elu', output_shape=[64, 128, 128]) d2 = batch_normalization(incoming=d2) d2 = merge(tensors_list=[d2, e2], mode='elemwise_sum') # d2 = d2+e2 ### d1 = conv_3d_transpose(incoming=d2, nb_filter=NB_FILTERS*1, filter_size=4, strides=[1, 2, 2, 2, 1], # DIMZ/1, DIMY/2, DIMX/2, regularizer='L1', activation='elu', output_shape=[128, 256, 256]) d1 = batch_normalization(incoming=d1) d1 = merge(tensors_list=[d1, e1], mode='elemwise_sum') # d1 = d1+e1 ### out = conv_3d_transpose(incoming=d1, nb_filter=1, filter_size=4, strides=[1, 1, 1, 1, 1], # DIMZ/1, DIMY/1, DIMX/1, regularizer='L1', activation='tanh', output_shape=[128, 256, 256]) return out, e8
def vol3d_decoder(self, x, name='Vol3D_Decoder'): with argscope([Conv3DTranspose], kernel_shape=4, padding='SAME', nl=tf.nn.elu): # x = x - VGG19_MEAN_TENSOR # x = BNLReLU(x) x = tf_2tanh(x) # x = x/255.0 x = tf.space_to_batch(x, paddings=[[0,0],[0,0]], block_size=64 ,name='s2b') x = tf.reshape(x, [-1, 4, 4, 4, 3]) x = tf.transpose(x, [4, 1, 2, 3, 0]) # # """ x = (LinearWrap(x) .Conv3DTranspose('conv6a', 256, strides = 2, padding='SAME') # .Conv3DTranspose('conv5a', 128, strides = 2, padding='SAME') # .Conv3DTranspose('conv4a', 64, strides = 2, padding='SAME') # .Conv3DTranspose('conv3a', 32, strides = 2, padding='SAME') # .Conv3DTranspose('conv2a', 16, strides = 2, padding='SAME') # .Conv3DTranspose('conv1a', 1, strides = 2, padding='SAME', use_bias=True, nl=tf.tanh) # ()) """ with tf.contrib.framework.arg_scope([conv_3d], filter_size=4, strides=[1, 2, 2, 2, 1], activation='relu', reuse=False): with tf.contrib.framework.arg_scope([conv_3d_transpose], filter_size=4, strides=[1, 2, 2, 2, 1], activation='relu', reuse=False): d6b = conv_3d_transpose(incoming=x , name="d6b", nb_filter=256, output_shape=[int(s) for s in [-(-DIMZ//(2**5)), -(-DIMY//(2**5)), -(-DIMX/(2**5))]], bias=False) r6b = tf_bottleneck(d6b, name="r6b", nb_filter=256) # Decoder d5b = conv_3d_transpose(incoming=r6b, name="d5b", nb_filter=128, output_shape=[int(s) for s in [-(-DIMZ//(2**4)), -(-DIMY//(2**4)), -(-DIMX//(2**4))]], bias=False) r5b = tf_bottleneck(d5b, name="r5b", nb_filter=128) d4b = conv_3d_transpose(incoming=r5b, name="d4b", nb_filter=64, output_shape=[int(s) for s in [-(-DIMZ//(2**3)), -(-DIMY//(2**3)), -(-DIMX//(2**3))]], bias=False) r4b = tf_bottleneck(d4b, name="r4b", nb_filter=64) d3b = conv_3d_transpose(incoming=r4b, name="d3b", nb_filter=32, output_shape=[int(s) for s in [-(-DIMZ//(2**2)), -(-DIMY//(2**2)), -(-DIMX//(2**2))]], bias=False) r3b = tf_bottleneck(d3b, name="r3b", nb_filter=32) d2b = conv_3d_transpose(incoming=r3b, name="d1b", nb_filter=16, output_shape=[int(s) for s in [-(-DIMZ//(2**1)), -(-DIMY//(2**1)), -(-DIMX//(2**1))]], bias=False) r2b = tf_bottleneck(d2b, name="r2b", nb_filter=16) out = conv_3d_transpose(incoming=r2b, name="out", nb_filter=1, activation='tanh', output_shape=[int(s) for s in [-(-DIMZ//(2**0)), -(-DIMY//(2**0)), -(-DIMX//(2**0))]]) print("D6b :", d6b.get_shape().as_list()) print("R6b :", r6b.get_shape().as_list()) print("D5b :", d5b.get_shape().as_list()) print("R5b :", r5b.get_shape().as_list()) print("D4b :", d4b.get_shape().as_list()) print("R4b :", r4b.get_shape().as_list()) print("D3b :", d3b.get_shape().as_list()) print("R3b :", r3b.get_shape().as_list()) print("D2b :", d2b.get_shape().as_list()) print("R2b :", r2b.get_shape().as_list()) print("Out :", out.get_shape().as_list()) x = out x = tf.transpose(x, [4, 1, 2, 3, 0]) # x = tf.squeeze(x) # x = x*255.0 x = tf_2imag(x) # x = x + VGG19_MEAN_TENSOR return x
def self(x_train, y_train, x_test, y_test): int_put = input_data(shape=[None, 224, 5, 5, 1], ) conv1 = conv_3d( int_put, 24, [24, 3, 3], padding='VALID', strides=[1, 1, 1, 1, 1], activation='prelu', ) print('conv1', conv1.get_shape().as_list()) batch_norm = batch_normalization(conv1) conv2 = conv_3d( batch_norm, 12, [24, 3, 3], padding='VALID', strides=[1, 1, 1, 1, 1], activation='prelu', ) print('conv2', conv2.get_shape().as_list()) batch_norm_con = batch_normalization(conv2) decon2 = conv_3d_transpose(batch_norm_con, 24, [24, 3, 3], padding='VALID', output_shape=[201, 3, 3, 24]) batch_norm = batch_normalization(decon2) print('a') decon2 = conv_3d_transpose(batch_norm, 1, [24, 3, 3], padding='VALID', output_shape=[224, 5, 5, 1]) batch_norm = batch_normalization(decon2) network = regression(batch_norm, optimizer='Adagrad', loss='mean_square', learning_rate=0.01, metric='R2') model = tflearn.DNN(network, tensorboard_verbose=0, tensorboard_dir="./tflearn_logs/") for i in range(10): model.fit(x_train, x_train, n_epoch=20, shuffle=True, show_metric=True, validation_set=(x_test, x_test), batch_size=32, run_id='3d_net_self') x_pre = model.predict(x_train) x_pre = np.array(x_pre) x_true = np.array(x_train) psnr(x_true, x_pre) model.save('my_model_self.tflearn') '''