Example #1
0
def input_block(bmode, pd, num_filters, reuse, scope, init):
    x_bmode = conv_3d(bmode,
                      num_filters // 2,
                      5,
                      activation='linear',
                      padding='same',
                      reuse=reuse,
                      scope=scope + "_1_bmode",
                      weights_init=init)
    x_pd = conv_3d(pd,
                   num_filters // 2,
                   5,
                   activation='linear',
                   padding='same',
                   reuse=reuse,
                   scope=scope + "_1_pd",
                   weights_init=init)
    x = tflearn.layers.merge_ops.merge([x_bmode, x_pd], axis=4, mode='concat')
    x = conv_3d(x,
                num_filters,
                5,
                activation='linear',
                padding='same',
                reuse=reuse,
                scope=scope + "_1",
                weights_init=init)
    x = tflearn.activation(x, "prelu")
    return x
Example #2
0
def sentnet_v0(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height, frame_count, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)
    
    #network = local_response_normalization(network)
    
    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)

    #network = local_response_normalization(network)
    
    network = conv_3d(network, 384, 3, 3, activation='relu')
    network = conv_3d(network, 384, 3, 3, activation='relu')
    network = conv_3d(network, 256, 3, 3, activation='relu')

    network = max_pool_3d(network, 3, strides=2)

    #network = local_response_normalization(network)
    
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
Example #3
0
def sentnet_v0(width, height, frame_count, lr, output=9):
    network = input_data(shape=[None, width, height, frame_count, 1], name='input')
    network = conv_3d(network, 96, 11, strides=4, activation='relu')
    network = max_pool_3d(network, 3, strides=2)

    #network = local_response_normalization(network)

    network = conv_3d(network, 256, 5, activation='relu')
    network = max_pool_3d(network, 3, strides=2)

    #network = local_response_normalization(network)

    network = conv_3d(network, 384, 3, 3, activation='relu')
    network = conv_3d(network, 384, 3, 3, activation='relu')
    network = conv_3d(network, 256, 3, 3, activation='relu')

    network = max_pool_3d(network, 3, strides=2)

    #network = local_response_normalization(network)

    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, 4096, activation='tanh')
    network = dropout(network, 0.5)
    network = fully_connected(network, output, activation='softmax')
    network = regression(network, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path='model_alexnet',
                        max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')

    return model
def get_network(do_dropout=True):
    # input images
    p_sz = config.patch_size
    network = input_data(shape=[None, p_sz, p_sz, p_sz, 2], name='input')

    network = conv_3d(network, 100, 3, activation='relu', regularizer="L2")
    #network = max_pool_3d(network, 2)
    network = batch_normalization(network)  # local_response_normalization

    network = conv_3d(network, 100, 3, activation='relu', regularizer="L2")
    #network = max_pool_3d(network, 2)
    network = batch_normalization(network)

    if do_dropout:
        network = dropout(network, 0.5)

    network = conv_3d(network, 100, 3, activation='relu', regularizer="L2")
    #network = max_pool_3d(network, 2)
    network = batch_normalization(network)

    network = conv_3d(network, 100, 3, activation='relu', regularizer="L2")
    #network = max_pool_3d(network, 2)
    network = batch_normalization(network)

    if do_dropout:
        network = dropout(network, 0.5)

    '''network = conv_3d(network, 100, 3, activation='relu', regularizer="L2")
    #network = max_pool_3d(network, 2)
    network = batch_normalization(network)
    
    network = conv_3d(network, 100, 3, activation='relu', regularizer="L1")
    #network = max_pool_3d(network, 2)
    network = batch_normalization(network)'''
    
    # fully connected layers
    network = fully_connected(network, 100, activation='relu', regularizer="L2") # 50
    if do_dropout:
        network = dropout(network, 0.5)

    #network = local_response_normalization(network)
    network = batch_normalization(network)
    network = fully_connected(network, 50, activation='relu', regularizer="L2") # 30
    if do_dropout:
        network = dropout(network, 0.5)
    network = batch_normalization(network)

    # softmax + output layers
    network = fully_connected(network, 3, activation='softmax', name='soft')
    network = regression(
        network,
        optimizer='adam',
        learning_rate=0.0001,
        loss='categorical_crossentropy',
        name='target',
        batch_size=75)  # 0.000005
    return network
Example #5
0
def conv_block2(input, feature, num_convs, reuse, init):
    x = tflearn.layers.merge_ops.merge([input, feature], axis=4, mode='concat')
    n_channels = int(input.get_shape()[-1])
    if num_convs == 1:
        with tf.variable_scope("conv_" + str(1)):
            x = conv_3d(x,
                        n_channels * 2,
                        filter_size=5,
                        strides=1,
                        activation='linear',
                        padding='same',
                        weights_init=init,
                        scope="conv_" + str(1),
                        reuse=reuse)
            # x = tflearn.layers.normalization.batch_normalization(x, reuse=reuse, scope="bn_" + str(1))
            # input = tflearn.layers.normalization.batch_normalization(x, reuse=reuse, scope="bn_inp_" + str(1))
            input = x
            x = x + input
            # x = tflearn.layers.normalization.batch_normalization(x, reuse=reuse, scope="bn_merged_" + str(1))
            x = tflearn.activation(x, "prelu")
        return x

    with tf.variable_scope("conv_" + str(1)):
        x = conv_3d(x,
                    n_channels * 2,
                    filter_size=5,
                    strides=1,
                    activation='linear',
                    padding='same',
                    weights_init=init,
                    scope="conv_" + str(1),
                    reuse=reuse)
        # x = tflearn.layers.normalization.batch_normalization(x, reuse=reuse, scope="bn_" + str(1))
        x = tflearn.activation(x, "prelu")

    for i in range(1, num_convs):
        with tf.variable_scope("conv_" + str(i + 1)):
            x = conv_3d(x,
                        n_channels,
                        filter_size=5,
                        strides=1,
                        activation='linear',
                        padding='same',
                        weights_init=init,
                        scope="conv2_" + str(i + 1),
                        reuse=reuse)
            # x = tflearn.layers.normalization.batch_normalization(x, reuse=reuse, scope="bn_" + str(i+1))
            # input = tflearn.layers.normalization.batch_normalization(x, reuse=reuse, scope="bn_inp_" + str(i+1))
            input = x
            if i == num_convs - 1:
                x = x + input
            # x = tflearn.layers.normalization.batch_normalization(x, reuse=reuse, scope="bn_merged_" + str(i + 1))
            x = tflearn.activation(x, "prelu")
    return x
def tf_bottleneck(inputs, nb_filter, name="bottleneck"):
	with tf.variable_scope(name):
		original  = tf.identity(inputs, name="identity")

		with tf.contrib.framework.arg_scope([conv_3d, conv_3d_transpose], strides=[1, 1, 1, 1, 1], activation='relu', reuse=False):
			shape = original.get_shape().as_list()
			conv_4x4i = original 
			conv_4x4i = conv_3d(incoming=conv_4x4i, name="conv_4x4i", filter_size=4, nb_filter=nb_filter, bias=False) 
			conv_4x4o = conv_3d(incoming=conv_4x4i, name="conv_4x4o", filter_size=4, nb_filter=nb_filter, bias=False, activation=tf.identity, 
										  )
		summation = tf.add(original, conv_4x4o, name="summation")
		return summation
def create_cnn_3d_alex():

    #img_prep = ImagePreprocessing()
    #img_prep.add_featurewise_zero_center(mean=0.25)

    network = input_data(
        shape=[None, IMG_SIZE_PX, IMG_SIZE_PX, IMG_SIZE_PX, 1])

    network = conv_3d(network, 96, 11, strides=4, regularizer='L2')
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = max_pool_3d(network, 3, strides=2)

    network = conv_3d(network, 256, 5, regularizer='L2')
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = max_pool_3d(network, 3, strides=2)

    network = conv_3d(network, 384, 3, regularizer='L2')
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 384, 3, regularizer='L2')
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 256, 3, regularizer='L2')
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = max_pool_3d(network, 3, strides=2)

    network = fully_connected(network, 4096, regularizer='L2')
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = dropout(network, keep_rate)

    network = fully_connected(network, 4096, regularizer='L2')
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = dropout(network, keep_rate)

    output = fully_connected(network, num_class, activation='softmax')

    network = regression(output,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.0001)
    return network
Example #8
0
def self(x_train, y_train, x_test, y_test):
    int_put = input_data(shape=[None, 224, 5, 5, 1], )

    conv1 = conv_3d(
        int_put,
        24,
        [24, 3, 3],
        padding='VALID',
        strides=[1, 1, 1, 1, 1],
        activation='prelu',
    )
    print('conv1', conv1.get_shape().as_list())
    batch_norm = batch_normalization(conv1)

    conv2 = conv_3d(
        batch_norm,
        12,
        [24, 3, 3],
        padding='VALID',
        strides=[1, 1, 1, 1, 1],
        activation='prelu',
    )
    print('conv2', conv2.get_shape().as_list())
    batch_norm_con = batch_normalization(conv2)

    decon2 = conv_3d_transpose(batch_norm_con,
                               24, [24, 3, 3],
                               padding='VALID',
                               output_shape=[201, 3, 3, 24])
    batch_norm = batch_normalization(decon2)
    print('a')
    decon2 = conv_3d_transpose(batch_norm,
                               1, [24, 3, 3],
                               padding='VALID',
                               output_shape=[224, 5, 5, 1])
    batch_norm = batch_normalization(decon2)

    network = regression(batch_norm,
                         optimizer='Adagrad',
                         loss='mean_square',
                         learning_rate=0.01,
                         metric='R2')

    feature_model = tflearn.DNN(network)
    feature_model.load('my_model_self.tflearn')
    x_feature = feature_model.predict(x_train)
    save_hdf5(x_feature)
    print('asd')
def create_cnn_3d_network():
    # Building 'AlexNet'
    network = input_data(
        shape=[None, IMG_SIZE_PX, IMG_SIZE_PX, IMG_SIZE_PX, 1])
    network = conv_3d(network, 32, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')
    network = max_pool_3d(network, 3, strides=2)

    network = conv_3d(network, 64, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')
    network = max_pool_3d(network, 3, strides=2)

    network = conv_3d(network, 128, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')

    network = conv_3d(network, 256, 3)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')
    network = max_pool_3d(network, 3, strides=2)

    network = fully_connected(network, 2048)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')
    network = dropout(network, keep_rate)

    network = fully_connected(network, 2048)
    network = tflearn.activation(tflearn.batch_normalization(network),
                                 activation='relu')
    network = dropout(network, keep_rate)

    network = fully_connected(network, num_class)

    network = regression(network,
                         optimizer='adam',
                         loss='softmax_categorical_crossentropy',
                         learning_rate=0.0001)

    return network
Example #10
0
def tf_bottleneck(inputs, nb_filter, name="bottleneck"):
    with tf.variable_scope(name):
            original  = tf.identity(inputs, name="identity")

            with tf.contrib.framework.arg_scope([conv_3d, conv_3d_transpose], strides=[1, 1, 1, 1, 1], activation='leaky_relu'):
                    shape = original.get_shape().as_list()
                    conv_4x4i = original #conv_3d(incoming=original,    name="conv_4x4i", filter_size=4, nb_filter=nb_filter) # From 256 to 64 in Residual pape, bias=Falser
                    # original  = tf.nn.dropout(original, keep_prob=0.5)
                    conv_4x4i = conv_3d(incoming=conv_4x4i, name="conv_4x4i", filter_size=4, nb_filter=nb_filter, bias=False) # From 256 to 64 in Residual paper
                    # conv_4x4i = tf.nn.dropout(conv_4x4i, keep_prob=0.5)
                    conv_4x4m = conv_3d(incoming=conv_4x4i, name="conv_4x4m", filter_size=4, nb_filter=nb_filter, bias=False)
                    # conv_4x4o = tf.nn.dropout(conv_4x4o, keep_prob=0.5)
                    conv_4x4o = conv_3d(incoming=conv_4x4m, name="conv_4x4o", filter_size=4, nb_filter=nb_filter, bias=False, activation=tf.identity,
                                                                              # output_shape=[shape[1], shape[2], shape[3]]
                                                                              )
            summation = tf.add(original, conv_4x4o, name="summation")
            # summation = elu(summation)
            # return batch_normalization(summation)
            ret = InstanceNorm('bn', tf.squeeze(summation, axis=0))
            ret = tf.expand_dims(ret, axis=0)
            return ret
Example #11
0
 def add_conv_3d_layers(self, num_filters, filter_size, activation, num_layers=1):
     """
 
     Args:
         num_filters (int): Number of convolutional filters to use.
         filter_size (int): Size of each filter.
         activation (str): Activation function to use.
         num_layers (int): The number of 3d convolutional layers to add.
     """
     for i in range(num_layers):
         self.network = conv_3d(self.network, num_filters, filter_size, 
                                 activation=activation)
Example #12
0
def down_block(input_tensor, reuse, scope, init):
    n_channels = int(input_tensor.get_shape()[-1])
    x = conv_3d(input_tensor,
                n_channels * 2,
                filter_size=2,
                strides=2,
                activation='linear',
                padding='same',
                weights_init=init,
                scope=scope + "_1_ds",
                reuse=reuse)
    # x = tflearn.layers.normalization.batch_normalization(x, reuse=reuse, scope=scope + "bn")
    x = tflearn.activation(x, "prelu")
    return x
Example #13
0
def conv_block(input, num_convs, reuse, scope, init):
    x = input
    n_channels = int(x.get_shape()[-1])
    for i in range(num_convs):
        with tf.variable_scope("conv_" + str(i + 1)):
            x = conv_3d(x,
                        n_channels,
                        filter_size=5,
                        strides=1,
                        activation='linear',
                        padding='same',
                        weights_init=init,
                        scope=scope + "_2",
                        reuse=reuse)
            if i == num_convs - 1:
                x = x + input
            # x = tflearn.layers.normalization.batch_normalization(x, reuse=reuse, scope=scope+"bn"+str(i))
            x = tflearn.activation(x, "prelu")
    return x
Example #14
0
from tflearn.data_utils import shuffle, to_categorical
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.conv import conv_3d, max_pool_3d, avg_pool_3d
from tflearn.layers.estimator import regression

num_of_categories = int(max(Y)+1)
Y = to_categorical(Y, num_of_categories)

img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()

# Building 'VGG Network'
network = input_data(shape=[None, 197, 233, 189, 1])

network = conv_3d(network, 4, 3, activation='relu')
network = conv_3d(network, 4, 3, activation='relu')
network = max_pool_3d(network, [1, 2, 2, 2, 1], strides=2)

network = conv_3d(network, 8, 3, activation='relu')
network = conv_3d(network, 8, 3, activation='relu')
network = max_pool_3d(network, [1, 2, 2, 2, 1], strides=2)

network = conv_3d(network, 64, 3, activation='relu')
network = conv_3d(network, 64, 3, activation='relu')
network = max_pool_3d(network, [1, 2, 2, 2, 1], strides=2)

network = conv_3d(network, 128, 3, activation='relu')
network = conv_3d(network, 128, 3, activation='relu')
network = max_pool_3d(network, [1, 2, 2, 2, 1], strides=2)
Example #15
0
def arch_fusionnet_translator_3d_iso_tflearn(img,
                                             feats=[None, None, None],
                                             last_dim=1,
                                             nl=INLReLU3D,
                                             nb_filters=32):

    # Add decorator to tflearn source code
    # sudo nano /usr/local/lib/python2.7/dist-packages/tflearn/layers/conv.py
    # @tf.contrib.framework.add_arg_scope
    with tf.contrib.framework.arg_scope([conv_3d],
                                        filter_size=4,
                                        strides=[1, 2, 2, 2, 1],
                                        activation='leaky_relu'):
        with tf.contrib.framework.arg_scope([conv_3d_transpose],
                                            filter_size=4,
                                            strides=[1, 2, 2, 2, 1],
                                            activation='leaky_relu'):
            shape = img.get_shape().as_list()
            dimb, dimz, dimy, dimx, dimc = shape
            e1a = conv_3d(incoming=img,
                          name="e1a",
                          nb_filter=nb_filters * 1,
                          bias=False)
            r1a = tf_bottleneck(e1a, name="r1a", nb_filter=nb_filters * 1)
            r1a = tf.nn.dropout(r1a, keep_prob=0.5)

            e2a = conv_3d(incoming=r1a,
                          name="e2a",
                          nb_filter=nb_filters * 1,
                          bias=False)
            r2a = tf_bottleneck(e2a, name="r2a", nb_filter=nb_filters * 1)
            r2a = tf.nn.dropout(r2a, keep_prob=0.5)

            e3a = conv_3d(incoming=r2a,
                          name="e3a",
                          nb_filter=nb_filters * 2,
                          bias=False)
            r3a = tf_bottleneck(e3a, name="r3a", nb_filter=nb_filters * 2)
            r3a = tf.nn.dropout(r3a, keep_prob=0.5)

            e4a = conv_3d(incoming=r3a,
                          name="e4a",
                          nb_filter=nb_filters * 2,
                          bias=False)
            r4a = tf_bottleneck(e4a, name="r4a", nb_filter=nb_filters * 2)
            r4a = tf.nn.dropout(r4a, keep_prob=0.5)

            e5a = conv_3d(incoming=r4a,
                          name="e5a",
                          nb_filter=nb_filters * 4,
                          bias=False)
            r5a = tf_bottleneck(e5a, name="r5a", nb_filter=nb_filters * 4)
            r5a = tf.nn.dropout(r5a, keep_prob=0.5)

            # e6a  = conv_3d(incoming=r5a,           name="e6a", nb_filter=nb_filters*4, bias=False)
            # r6a  = tf_bottleneck(e6a,              name="r6a", nb_filter=nb_filters*4)

            # e7a  = conv_3d(incoming=r6a,           name="e7a", nb_filter=nb_filters*8)           , bias=False
            # r7a  = tf_bottleneck(e7a,              name="r7a", nb_filter=nb_filters*8)
            # r7a  = dropout(incoming=r7a, keep_prob=0.5)
            print "In1 :", img.get_shape().as_list()
            print "E1a :", e1a.get_shape().as_list()
            print "R1a :", r1a.get_shape().as_list()
            print "E2a :", e2a.get_shape().as_list()
            print "R2a :", r2a.get_shape().as_list()
            print "E3a :", e3a.get_shape().as_list()
            print "R3a :", r3a.get_shape().as_list()
            print "E4a :", e4a.get_shape().as_list()
            print "R4a :", r4a.get_shape().as_list()
            print "E5a :", e5a.get_shape().as_list()
            print "R5a :", r5a.get_shape().as_list()

            r5b = tf_bottleneck(r5a, name="r5b", nb_filter=nb_filters * 4)
            d4b = conv_3d_transpose(incoming=r5b,
                                    name="d4b",
                                    nb_filter=nb_filters * 2,
                                    output_shape=[
                                        -(-dimz // (2**4)), -(-dimy // (2**4)),
                                        -(-dimx / (2**4))
                                    ],
                                    bias=False)
            a4b = tf.add(d4b, r4a, name="a4b")

            r4b = tf_bottleneck(a4b, name="r4b", nb_filter=nb_filters * 2)
            d3b = conv_3d_transpose(incoming=r4b,
                                    name="d3b",
                                    nb_filter=nb_filters * 2,
                                    output_shape=[
                                        -(-dimz // (2**3)), -(-dimy // (2**3)),
                                        -(-dimx / (2**3))
                                    ],
                                    bias=False)
            a3b = tf.add(d3b, r3a, name="a3b")

            r3b = tf_bottleneck(a3b, name="r3b", nb_filter=nb_filters * 2)
            d2b = conv_3d_transpose(incoming=r3b,
                                    name="d2b",
                                    nb_filter=nb_filters * 1,
                                    output_shape=[
                                        -(-dimz // (2**2)), -(-dimy // (2**2)),
                                        -(-dimx / (2**2))
                                    ],
                                    bias=False)
            a2b = tf.add(d2b, r2a, name="a2b")

            r2b = tf_bottleneck(a2b, name="r2b", nb_filter=nb_filters * 1)
            d1b = conv_3d_transpose(incoming=r2b,
                                    name="d1b",
                                    nb_filter=nb_filters * 1,
                                    output_shape=[
                                        -(-dimz // (2**1)), -(-dimy // (2**1)),
                                        -(-dimx / (2**1))
                                    ],
                                    bias=False)
            a1b = tf.add(d1b, r1a, name="a1b")

            out = conv_3d_transpose(incoming=a1b,
                                    name="out",
                                    nb_filter=last_dim,
                                    activation='tanh',
                                    output_shape=[
                                        -(-dimz // (2**0)), -(-dimy // (2**0)),
                                        -(-dimx / (2**0))
                                    ])

            # print "R7b :", r7b.get_shape().as_list()
            # print "D6b :", d6b.get_shape().as_list()
            # print "A6b :", a6b.get_shape().as_list()

            # print "R6b :", r6b.get_shape().as_list()
            # print "D5b :", d5b.get_shape().as_list()
            # print "A5b :", a5b.get_shape().as_list()

            print "R5b :", r5b.get_shape().as_list()
            print "D4b :", d4b.get_shape().as_list()
            print "A4b :", a4b.get_shape().as_list()

            print "R4b :", r4b.get_shape().as_list()
            print "D3b :", d3b.get_shape().as_list()
            print "A3b :", a3b.get_shape().as_list()

            print "R3b :", r3b.get_shape().as_list()
            print "D2b :", d2b.get_shape().as_list()
            print "A2b :", a2b.get_shape().as_list()

            print "R2b :", r2b.get_shape().as_list()
            print "D1b :", d1b.get_shape().as_list()
            print "A1b :", a1b.get_shape().as_list()

            print "Out :", out.get_shape().as_list()

            return out
Example #16
0
def inception_v3_3d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    network = input_data(shape=[None, width, height,3, 1], name='input')
    conv1_7_7 = conv_3d(network, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2')
    pool1_3_3 = max_pool_3d(conv1_7_7, 3,strides=2)
    #pool1_3_3 = local_response_normalization(pool1_3_3)
    conv2_3_3_reduce = conv_3d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
    conv2_3_3 = conv_3d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3')
    #conv2_3_3 = local_response_normalization(conv2_3_3)
    pool2_3_3 = max_pool_3d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
    inception_3a_1_1 = conv_3d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
    inception_3a_3_3_reduce = conv_3d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
    inception_3a_3_3 = conv_3d(inception_3a_3_3_reduce, 128,filter_size=3,  activation='relu', name = 'inception_3a_3_3')
    inception_3a_5_5_reduce = conv_3d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' )
    inception_3a_5_5 = conv_3d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name= 'inception_3a_5_5')
    inception_3a_pool = max_pool_3d(pool2_3_3, kernel_size=3, strides=1, )
    inception_3a_pool_1_1 = conv_3d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')

    # merge the inception_3a__
    inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=4)

    inception_3b_1_1 = conv_3d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' )
    inception_3b_3_3_reduce = conv_3d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
    inception_3b_3_3 = conv_3d(inception_3b_3_3_reduce, 192, filter_size=3,  activation='relu',name='inception_3b_3_3')
    inception_3b_5_5_reduce = conv_3d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce')
    inception_3b_5_5 = conv_3d(inception_3b_5_5_reduce, 96, filter_size=5,  name = 'inception_3b_5_5')
    inception_3b_pool = max_pool_3d(inception_3a_output, kernel_size=3, strides=1,  name='inception_3b_pool')
    inception_3b_pool_1_1 = conv_3d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1')

    #merge the inception_3b_*
    inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=4,name='inception_3b_output')

    pool3_3_3 = max_pool_3d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
    inception_4a_1_1 = conv_3d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
    inception_4a_3_3_reduce = conv_3d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
    inception_4a_3_3 = conv_3d(inception_4a_3_3_reduce, 208, filter_size=3,  activation='relu', name='inception_4a_3_3')
    inception_4a_5_5_reduce = conv_3d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
    inception_4a_5_5 = conv_3d(inception_4a_5_5_reduce, 48, filter_size=5,  activation='relu', name='inception_4a_5_5')
    inception_4a_pool = max_pool_3d(pool3_3_3, kernel_size=3, strides=1,  name='inception_4a_pool')
    inception_4a_pool_1_1 = conv_3d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')

    inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=4, name='inception_4a_output')


    inception_4b_1_1 = conv_3d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
    inception_4b_3_3_reduce = conv_3d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
    inception_4b_3_3 = conv_3d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
    inception_4b_5_5_reduce = conv_3d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
    inception_4b_5_5 = conv_3d(inception_4b_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4b_5_5')

    inception_4b_pool = max_pool_3d(inception_4a_output, kernel_size=3, strides=1,  name='inception_4b_pool')
    inception_4b_pool_1_1 = conv_3d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')

    inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=4, name='inception_4b_output')


    inception_4c_1_1 = conv_3d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1')
    inception_4c_3_3_reduce = conv_3d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
    inception_4c_3_3 = conv_3d(inception_4c_3_3_reduce, 256,  filter_size=3, activation='relu', name='inception_4c_3_3')
    inception_4c_5_5_reduce = conv_3d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
    inception_4c_5_5 = conv_3d(inception_4c_5_5_reduce, 64,  filter_size=5, activation='relu', name='inception_4c_5_5')

    inception_4c_pool = max_pool_3d(inception_4b_output, kernel_size=3, strides=1)
    inception_4c_pool_1_1 = conv_3d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')

    inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=4,name='inception_4c_output')

    inception_4d_1_1 = conv_3d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
    inception_4d_3_3_reduce = conv_3d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
    inception_4d_3_3 = conv_3d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
    inception_4d_5_5_reduce = conv_3d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
    inception_4d_5_5 = conv_3d(inception_4d_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4d_5_5')
    inception_4d_pool = max_pool_3d(inception_4c_output, kernel_size=3, strides=1,  name='inception_4d_pool')
    inception_4d_pool_1_1 = conv_3d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')

    inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=4, name='inception_4d_output')

    inception_4e_1_1 = conv_3d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
    inception_4e_3_3_reduce = conv_3d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
    inception_4e_3_3 = conv_3d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
    inception_4e_5_5_reduce = conv_3d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
    inception_4e_5_5 = conv_3d(inception_4e_5_5_reduce, 128,  filter_size=5, activation='relu', name='inception_4e_5_5')
    inception_4e_pool = max_pool_3d(inception_4d_output, kernel_size=3, strides=1,  name='inception_4e_pool')
    inception_4e_pool_1_1 = conv_3d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')


    inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=4, mode='concat')

    pool4_3_3 = max_pool_3d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')


    inception_5a_1_1 = conv_3d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
    inception_5a_3_3_reduce = conv_3d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
    inception_5a_3_3 = conv_3d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
    inception_5a_5_5_reduce = conv_3d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
    inception_5a_5_5 = conv_3d(inception_5a_5_5_reduce, 128, filter_size=5,  activation='relu', name='inception_5a_5_5')
    inception_5a_pool = max_pool_3d(pool4_3_3, kernel_size=3, strides=1,  name='inception_5a_pool')
    inception_5a_pool_1_1 = conv_3d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1')

    inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=4,mode='concat')


    inception_5b_1_1 = conv_3d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1')
    inception_5b_3_3_reduce = conv_3d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
    inception_5b_3_3 = conv_3d(inception_5b_3_3_reduce, 384,  filter_size=3,activation='relu', name='inception_5b_3_3')
    inception_5b_5_5_reduce = conv_3d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
    inception_5b_5_5 = conv_3d(inception_5b_5_5_reduce,128, filter_size=5,  activation='relu', name='inception_5b_5_5' )
    inception_5b_pool = max_pool_3d(inception_5a_output, kernel_size=3, strides=1,  name='inception_5b_pool')
    inception_5b_pool_1_1 = conv_3d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
    inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=4, mode='concat')

    pool5_7_7 = avg_pool_3d(inception_5b_output, kernel_size=7, strides=1)
    pool5_7_7 = dropout(pool5_7_7, 0.4)

    
    loss = fully_connected(pool5_7_7, output,activation='softmax')


    
    network = regression(loss, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')
    
    model = tflearn.DNN(network, checkpoint_path=model_name,
                        max_checkpoints=1, tensorboard_verbose=0,tensorboard_dir='log')


    return model
Y = [i[1] for i in train]
y1=tflearn.data_utils.to_categorical(Y, nb_classes=2)
v1 = np.array([i[0] for i in vald]).reshape(-1,20,50,50,1)
V = [i[1] for i in vald]
v2=tflearn.data_utils.to_categorical(V, nb_classes=2)
num_classes = 2
img_prep = tflearn.ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
img_aug = tflearn.ImageAugmentation()
img_aug.add_random_blur(sigma_max=3.0)
img_aug.add_random_flip_leftright()
img_aug.add_random_flip_updown()
img_aug.add_random_rotation(max_angle=25.)
network = input_data(shape=[None, 20,50,50,1],data_preprocessing=img_prep,data_augmentation=img_aug,name='input')
network = conv_3d(network, 32, 3,3, activation='relu')
network = max_pool_3d(network, 2,2)
#network = dropout(network, 0.25)
#network = conv_3d(network, 64, 2,2, activation='relu')
network = conv_3d(network, 64, 3,3, activation='relu')
network = max_pool_3d(network, 2,2)
#network = dropout(network, 0.25)
#network = conv_3d(network, 128, 3,3, activation='relu')
#network = conv_3d(network, 128, 2,2, activation='relu')
#network = max_pool_3d(network, 2,2)
#network = dropout(network, 0.25)
#network = conv_3d(network, 256, 3,3, activation='relu')
#network = conv_3d(network, 256, 1,1, activation='relu')
#network = max_pool_3d(network, 2,2)
#network = dropout(network, 0.25)
#network = conv_3d(network, 512, 1,1, activation='relu')
Example #18
0

def one_hot(v):
    return np.eye(2)[v.astype(int)].reshape((-1, 2))


Y = np.ravel(Y.T)
Y_test = np.ravel(Y_test.T)

Y_oh = one_hot(Y)
Y_test_oh = one_hot(Y_test)

# Convolutional network building
with tf.device('/gpu:%s' % arguments['--gpu-id']):
    network = input_data(shape=[None, 30, 30, 30, 1])
    network = conv_3d(network, 16, 5, activation=tf.nn.relu)
    network = max_pool_3d(network, 2)
    network = conv_3d(network, 32, 5, activation=tf.nn.relu)
    network = max_pool_3d(network, 2)
    network = fully_connected(network, 128, activation=tf.nn.relu)
    network = fully_connected(network, 2, activation=tf.nn.relu)
    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=1e-7)

# Train using classifier
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(X,
          Y_oh,
          n_epoch=epochs,
Example #19
0
def train(X=None,
          gpu_id=0,
          sparsity=False,
          latent=64,
          num_filters=32,
          filter_size=5,
          sparsity_level=DEFAULT_SPARSITY_LEVEL,
          sparsity_weight=DEFAULT_SPARSITY_WEIGHT,
          epochs=10,
          conv=False,
          checkpoint=None,
          is_training=True):
    assert checkpoint is not None or X is not None,\
        'Either data to train on or model to restore is required.'
    print(' * [INFO] Using GPU %s' % gpu_id)
    print(' * [INFO]', 'Using' if sparsity else 'Not using', 'sparsity')
    print(' * [INFO] Latent dimensions: %d' % latent)
    print(' * [INFO]', 'Using' if conv else 'Not using',
          'convolutional layers.')
    with tf.device(None if gpu_id is None else '/gpu:%s' % gpu_id):
        # Building the encoder
        if conv:
            encoder = tflearn.input_data(shape=[None, 30, 30, 30, 1])
            encoder = conv_3d(encoder,
                              num_filters,
                              filter_size,
                              activation=tf.nn.sigmoid)
            encoder = tflearn.fully_connected(encoder,
                                              latent,
                                              activation=tf.nn.sigmoid)
        else:
            encoder = tflearn.input_data(shape=[None, 27000])
            encoder = tflearn.fully_connected(encoder,
                                              256,
                                              activation=tf.nn.relu)
            encoder = tflearn.fully_connected(encoder,
                                              64,
                                              activation=tf.nn.relu)
            encoder = tflearn.fully_connected(encoder,
                                              latent,
                                              activation=tf.nn.relu)

        if sparsity:
            avg_activations = tf.reduce_mean(encoder, axis=1)
            div = tf.reduce_mean(kl_divergence(avg_activations,
                                               sparsity_level))

        # Building the decoder
        if conv:
            decoder = tflearn.fully_connected(encoder, (30**3) * num_filters,
                                              activation=tf.nn.sigmoid)
            decoder = tflearn.reshape(decoder, [-1, 30, 30, 30, num_filters])
            decoder = conv_3d_transpose(decoder,
                                        1,
                                        filter_size, [30, 30, 30],
                                        activation=tf.nn.sigmoid)
        else:
            decoder = tflearn.fully_connected(encoder,
                                              64,
                                              activation=tf.nn.relu)
            decoder = tflearn.fully_connected(decoder,
                                              256,
                                              activation=tf.nn.relu)
            decoder = tflearn.fully_connected(decoder,
                                              27000,
                                              activation=tf.nn.relu)

    def sparsity_loss(y_pred, y_true):
        return tf.reduce_mean(tf.square(y_pred - y_true)) + \
               sparsity_weight * div

    # Regression, with mean square error
    net = tflearn.regression(decoder,
                             optimizer='adam',
                             learning_rate=1e-4,
                             loss=sparsity_loss if sparsity else 'mean_square',
                             metric=None)

    # Training the auto encoder
    model = tflearn.DNN(net, tensorboard_verbose=0)
    encoding_model = tflearn.DNN(encoder, session=model.session)
    saver = tf.train.Saver()
    checkpoint_path = CKPT_FORMAT.format(id=checkpoint or ID_)

    if is_training:
        model.fit(X, X, n_epoch=epochs, run_id="auto_encoder", batch_size=256)
        saver.save(encoding_model.session, checkpoint_path)
    else:
        saver.restore(encoding_model.models, checkpoint_path)

    return {'model': model, 'encoding_model': encoding_model}
Example #20
0
def residual_block_concat(incoming,
                          nb_blocks,
                          out_channels,
                          downsample=False,
                          downsample_strides=2,
                          activation='relu',
                          batch_norm=True,
                          bias=True,
                          weights_init='variance_scaling',
                          bias_init='zeros',
                          regularizer='L2',
                          weight_decay=0.0001,
                          trainable=True,
                          restore=True,
                          reuse=False,
                          scope=None,
                          name="ResidualBlock"):
    """ Residual Block.

    A residual block as described in MSRA's Deep Residual Network paper.
    Full pre-activation architecture is used here.

    Input:
        4-D Tensor [batch, height, width, in_channels].

    Output:
        4-D Tensor [batch, new height, new width, nb_filter].

    Arguments:
        incoming: `Tensor`. Incoming 4-D Layer.
        nb_blocks: `int`. Number of layer blocks.
        out_channels: `int`. The number of convolutional filters of the
            convolution layers.
        downsample: `bool`. If True, apply downsampling using
            'downsample_strides' for strides.
        downsample_strides: `int`. The strides to use when downsampling.
        activation: `str` (name) or `function` (returning a `Tensor`).
            Activation applied to this layer (see tflearn.activations).
            Default: 'linear'.
        batch_norm: `bool`. If True, apply batch normalization.
        bias: `bool`. If True, a bias is used.
        weights_init: `str` (name) or `Tensor`. Weights initialization.
            (see tflearn.initializations) Default: 'uniform_scaling'.
        bias_init: `str` (name) or `tf.Tensor`. Bias initialization.
            (see tflearn.initializations) Default: 'zeros'.
        regularizer: `str` (name) or `Tensor`. Add a regularizer to this
            layer weights (see tflearn.regularizers). Default: None.
        weight_decay: `float`. Regularizer decay parameter. Default: 0.001.
        trainable: `bool`. If True, weights will be trainable.
        restore: `bool`. If True, this layer weights will be restored when
            loading a model.
        reuse: `bool`. If True and 'scope' is provided, this layer variables
            will be reused (shared).
        scope: `str`. Define this layer scope (optional). A scope can be
            used to share variables between layers. Note that scope will
            override name.
        name: A name for this layer (optional). Default: 'ShallowBottleneck'.

    """
    resnet = incoming
    in_channels = incoming.get_shape().as_list()[-1]

    # Variable Scope fix for older TF
    try:
        vscope = tf.variable_scope(scope,
                                   default_name=name,
                                   values=[incoming],
                                   reuse=reuse)
    except Exception:
        vscope = tf.variable_op_scope([incoming], scope, name, reuse=reuse)

    with vscope as scope:
        name = scope.name  #TODO

        for i in range(nb_blocks):

            identity = resnet

            if not downsample:
                downsample_strides = 1

            if batch_norm:
                resnet = tflearn.batch_normalization(resnet)
            resnet = tflearn.activation(resnet, activation)

            resnet = conv_3d(resnet, out_channels, 3, downsample_strides,
                             'same', 'linear', bias, weights_init, bias_init,
                             regularizer, weight_decay, trainable, restore)

            if batch_norm:
                resnet = tflearn.batch_normalization(resnet)
            resnet = tflearn.activation(resnet, activation)

            resnet = conv_3d(resnet, out_channels, 3, 1, 'same', 'linear',
                             bias, weights_init, bias_init, regularizer,
                             weight_decay, trainable, restore)

            # Downsampling
            if downsample_strides > 1:
                identity = tflearn.avg_pool_3d(identity, 1, downsample_strides)

            # Projection to new dimension
            if in_channels != out_channels:
                ch = (out_channels - in_channels) // 2
                identity = tf.pad(identity,
                                  [[0, 0], [0, 0], [0, 0], [0, 0], [ch, ch]])
                in_channels = out_channels

            resnet = tf.concat(1, [resnet, identity])
    return resnet
Example #21
0
def self(x_train, y_train, x_test, y_test):
    int_put = input_data(shape=[None, 224, 5, 5, 1], )

    conv1 = conv_3d(
        int_put,
        24,
        [24, 3, 3],
        padding='VALID',
        strides=[1, 1, 1, 1, 1],
        activation='prelu',
    )
    print('conv1', conv1.get_shape().as_list())
    batch_norm = batch_normalization(conv1)

    conv2 = conv_3d(
        batch_norm,
        12,
        [24, 3, 3],
        padding='VALID',
        strides=[1, 1, 1, 1, 1],
        activation='prelu',
    )
    print('conv2', conv2.get_shape().as_list())
    batch_norm_con = batch_normalization(conv2)

    decon2 = conv_3d_transpose(batch_norm_con,
                               24, [24, 3, 3],
                               padding='VALID',
                               output_shape=[201, 3, 3, 24])
    batch_norm = batch_normalization(decon2)
    print('a')
    decon2 = conv_3d_transpose(batch_norm,
                               1, [24, 3, 3],
                               padding='VALID',
                               output_shape=[224, 5, 5, 1])
    batch_norm = batch_normalization(decon2)

    network = regression(batch_norm,
                         optimizer='Adagrad',
                         loss='mean_square',
                         learning_rate=0.01,
                         metric='R2')
    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        tensorboard_dir="./tflearn_logs/")

    for i in range(10):
        model.fit(x_train,
                  x_train,
                  n_epoch=20,
                  shuffle=True,
                  show_metric=True,
                  validation_set=(x_test, x_test),
                  batch_size=32,
                  run_id='3d_net_self')
        x_pre = model.predict(x_train)
        x_pre = np.array(x_pre)
        x_true = np.array(x_train)
        psnr(x_true, x_pre)

    model.save('my_model_self.tflearn')
    '''
Example #22
0
def inception_v3_3d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
    network = input_data(shape=[None, width, height,3, 1], name='input')
    conv1_7_7 = conv_3d(network, 64, 7, strides=2, activation='relu', name = 'conv1_7_7_s2')
    pool1_3_3 = max_pool_3d(conv1_7_7, 3,strides=2)
    #pool1_3_3 = local_response_normalization(pool1_3_3)
    conv2_3_3_reduce = conv_3d(pool1_3_3, 64,1, activation='relu',name = 'conv2_3_3_reduce')
    conv2_3_3 = conv_3d(conv2_3_3_reduce, 192,3, activation='relu', name='conv2_3_3')
    #conv2_3_3 = local_response_normalization(conv2_3_3)
    pool2_3_3 = max_pool_3d(conv2_3_3, kernel_size=3, strides=2, name='pool2_3_3_s2')
    inception_3a_1_1 = conv_3d(pool2_3_3, 64, 1, activation='relu', name='inception_3a_1_1')
    inception_3a_3_3_reduce = conv_3d(pool2_3_3, 96,1, activation='relu', name='inception_3a_3_3_reduce')
    inception_3a_3_3 = conv_3d(inception_3a_3_3_reduce, 128,filter_size=3,  activation='relu', name = 'inception_3a_3_3')
    inception_3a_5_5_reduce = conv_3d(pool2_3_3,16, filter_size=1,activation='relu', name ='inception_3a_5_5_reduce' )
    inception_3a_5_5 = conv_3d(inception_3a_5_5_reduce, 32, filter_size=5, activation='relu', name= 'inception_3a_5_5')
    inception_3a_pool = max_pool_3d(pool2_3_3, kernel_size=3, strides=1, )
    inception_3a_pool_1_1 = conv_3d(inception_3a_pool, 32, filter_size=1, activation='relu', name='inception_3a_pool_1_1')

    # merge the inception_3a__
    inception_3a_output = merge([inception_3a_1_1, inception_3a_3_3, inception_3a_5_5, inception_3a_pool_1_1], mode='concat', axis=4)

    inception_3b_1_1 = conv_3d(inception_3a_output, 128,filter_size=1,activation='relu', name= 'inception_3b_1_1' )
    inception_3b_3_3_reduce = conv_3d(inception_3a_output, 128, filter_size=1, activation='relu', name='inception_3b_3_3_reduce')
    inception_3b_3_3 = conv_3d(inception_3b_3_3_reduce, 192, filter_size=3,  activation='relu',name='inception_3b_3_3')
    inception_3b_5_5_reduce = conv_3d(inception_3a_output, 32, filter_size=1, activation='relu', name = 'inception_3b_5_5_reduce')
    inception_3b_5_5 = conv_3d(inception_3b_5_5_reduce, 96, filter_size=5,  name = 'inception_3b_5_5')
    inception_3b_pool = max_pool_3d(inception_3a_output, kernel_size=3, strides=1,  name='inception_3b_pool')
    inception_3b_pool_1_1 = conv_3d(inception_3b_pool, 64, filter_size=1,activation='relu', name='inception_3b_pool_1_1')

    #merge the inception_3b_*
    inception_3b_output = merge([inception_3b_1_1, inception_3b_3_3, inception_3b_5_5, inception_3b_pool_1_1], mode='concat',axis=4,name='inception_3b_output')

    pool3_3_3 = max_pool_3d(inception_3b_output, kernel_size=3, strides=2, name='pool3_3_3')
    inception_4a_1_1 = conv_3d(pool3_3_3, 192, filter_size=1, activation='relu', name='inception_4a_1_1')
    inception_4a_3_3_reduce = conv_3d(pool3_3_3, 96, filter_size=1, activation='relu', name='inception_4a_3_3_reduce')
    inception_4a_3_3 = conv_3d(inception_4a_3_3_reduce, 208, filter_size=3,  activation='relu', name='inception_4a_3_3')
    inception_4a_5_5_reduce = conv_3d(pool3_3_3, 16, filter_size=1, activation='relu', name='inception_4a_5_5_reduce')
    inception_4a_5_5 = conv_3d(inception_4a_5_5_reduce, 48, filter_size=5,  activation='relu', name='inception_4a_5_5')
    inception_4a_pool = max_pool_3d(pool3_3_3, kernel_size=3, strides=1,  name='inception_4a_pool')
    inception_4a_pool_1_1 = conv_3d(inception_4a_pool, 64, filter_size=1, activation='relu', name='inception_4a_pool_1_1')

    inception_4a_output = merge([inception_4a_1_1, inception_4a_3_3, inception_4a_5_5, inception_4a_pool_1_1], mode='concat', axis=4, name='inception_4a_output')


    inception_4b_1_1 = conv_3d(inception_4a_output, 160, filter_size=1, activation='relu', name='inception_4a_1_1')
    inception_4b_3_3_reduce = conv_3d(inception_4a_output, 112, filter_size=1, activation='relu', name='inception_4b_3_3_reduce')
    inception_4b_3_3 = conv_3d(inception_4b_3_3_reduce, 224, filter_size=3, activation='relu', name='inception_4b_3_3')
    inception_4b_5_5_reduce = conv_3d(inception_4a_output, 24, filter_size=1, activation='relu', name='inception_4b_5_5_reduce')
    inception_4b_5_5 = conv_3d(inception_4b_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4b_5_5')

    inception_4b_pool = max_pool_3d(inception_4a_output, kernel_size=3, strides=1,  name='inception_4b_pool')
    inception_4b_pool_1_1 = conv_3d(inception_4b_pool, 64, filter_size=1, activation='relu', name='inception_4b_pool_1_1')

    inception_4b_output = merge([inception_4b_1_1, inception_4b_3_3, inception_4b_5_5, inception_4b_pool_1_1], mode='concat', axis=4, name='inception_4b_output')


    inception_4c_1_1 = conv_3d(inception_4b_output, 128, filter_size=1, activation='relu',name='inception_4c_1_1')
    inception_4c_3_3_reduce = conv_3d(inception_4b_output, 128, filter_size=1, activation='relu', name='inception_4c_3_3_reduce')
    inception_4c_3_3 = conv_3d(inception_4c_3_3_reduce, 256,  filter_size=3, activation='relu', name='inception_4c_3_3')
    inception_4c_5_5_reduce = conv_3d(inception_4b_output, 24, filter_size=1, activation='relu', name='inception_4c_5_5_reduce')
    inception_4c_5_5 = conv_3d(inception_4c_5_5_reduce, 64,  filter_size=5, activation='relu', name='inception_4c_5_5')

    inception_4c_pool = max_pool_3d(inception_4b_output, kernel_size=3, strides=1)
    inception_4c_pool_1_1 = conv_3d(inception_4c_pool, 64, filter_size=1, activation='relu', name='inception_4c_pool_1_1')

    inception_4c_output = merge([inception_4c_1_1, inception_4c_3_3, inception_4c_5_5, inception_4c_pool_1_1], mode='concat', axis=4,name='inception_4c_output')

    inception_4d_1_1 = conv_3d(inception_4c_output, 112, filter_size=1, activation='relu', name='inception_4d_1_1')
    inception_4d_3_3_reduce = conv_3d(inception_4c_output, 144, filter_size=1, activation='relu', name='inception_4d_3_3_reduce')
    inception_4d_3_3 = conv_3d(inception_4d_3_3_reduce, 288, filter_size=3, activation='relu', name='inception_4d_3_3')
    inception_4d_5_5_reduce = conv_3d(inception_4c_output, 32, filter_size=1, activation='relu', name='inception_4d_5_5_reduce')
    inception_4d_5_5 = conv_3d(inception_4d_5_5_reduce, 64, filter_size=5,  activation='relu', name='inception_4d_5_5')
    inception_4d_pool = max_pool_3d(inception_4c_output, kernel_size=3, strides=1,  name='inception_4d_pool')
    inception_4d_pool_1_1 = conv_3d(inception_4d_pool, 64, filter_size=1, activation='relu', name='inception_4d_pool_1_1')

    inception_4d_output = merge([inception_4d_1_1, inception_4d_3_3, inception_4d_5_5, inception_4d_pool_1_1], mode='concat', axis=4, name='inception_4d_output')

    inception_4e_1_1 = conv_3d(inception_4d_output, 256, filter_size=1, activation='relu', name='inception_4e_1_1')
    inception_4e_3_3_reduce = conv_3d(inception_4d_output, 160, filter_size=1, activation='relu', name='inception_4e_3_3_reduce')
    inception_4e_3_3 = conv_3d(inception_4e_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_4e_3_3')
    inception_4e_5_5_reduce = conv_3d(inception_4d_output, 32, filter_size=1, activation='relu', name='inception_4e_5_5_reduce')
    inception_4e_5_5 = conv_3d(inception_4e_5_5_reduce, 128,  filter_size=5, activation='relu', name='inception_4e_5_5')
    inception_4e_pool = max_pool_3d(inception_4d_output, kernel_size=3, strides=1,  name='inception_4e_pool')
    inception_4e_pool_1_1 = conv_3d(inception_4e_pool, 128, filter_size=1, activation='relu', name='inception_4e_pool_1_1')


    inception_4e_output = merge([inception_4e_1_1, inception_4e_3_3, inception_4e_5_5,inception_4e_pool_1_1],axis=4, mode='concat')

    pool4_3_3 = max_pool_3d(inception_4e_output, kernel_size=3, strides=2, name='pool_3_3')


    inception_5a_1_1 = conv_3d(pool4_3_3, 256, filter_size=1, activation='relu', name='inception_5a_1_1')
    inception_5a_3_3_reduce = conv_3d(pool4_3_3, 160, filter_size=1, activation='relu', name='inception_5a_3_3_reduce')
    inception_5a_3_3 = conv_3d(inception_5a_3_3_reduce, 320, filter_size=3, activation='relu', name='inception_5a_3_3')
    inception_5a_5_5_reduce = conv_3d(pool4_3_3, 32, filter_size=1, activation='relu', name='inception_5a_5_5_reduce')
    inception_5a_5_5 = conv_3d(inception_5a_5_5_reduce, 128, filter_size=5,  activation='relu', name='inception_5a_5_5')
    inception_5a_pool = max_pool_3d(pool4_3_3, kernel_size=3, strides=1,  name='inception_5a_pool')
    inception_5a_pool_1_1 = conv_3d(inception_5a_pool, 128, filter_size=1,activation='relu', name='inception_5a_pool_1_1')

    inception_5a_output = merge([inception_5a_1_1, inception_5a_3_3, inception_5a_5_5, inception_5a_pool_1_1], axis=4,mode='concat')


    inception_5b_1_1 = conv_3d(inception_5a_output, 384, filter_size=1,activation='relu', name='inception_5b_1_1')
    inception_5b_3_3_reduce = conv_3d(inception_5a_output, 192, filter_size=1, activation='relu', name='inception_5b_3_3_reduce')
    inception_5b_3_3 = conv_3d(inception_5b_3_3_reduce, 384,  filter_size=3,activation='relu', name='inception_5b_3_3')
    inception_5b_5_5_reduce = conv_3d(inception_5a_output, 48, filter_size=1, activation='relu', name='inception_5b_5_5_reduce')
    inception_5b_5_5 = conv_3d(inception_5b_5_5_reduce,128, filter_size=5,  activation='relu', name='inception_5b_5_5' )
    inception_5b_pool = max_pool_3d(inception_5a_output, kernel_size=3, strides=1,  name='inception_5b_pool')
    inception_5b_pool_1_1 = conv_3d(inception_5b_pool, 128, filter_size=1, activation='relu', name='inception_5b_pool_1_1')
    inception_5b_output = merge([inception_5b_1_1, inception_5b_3_3, inception_5b_5_5, inception_5b_pool_1_1], axis=4, mode='concat')

    pool5_7_7 = avg_pool_3d(inception_5b_output, kernel_size=7, strides=1)
    pool5_7_7 = dropout(pool5_7_7, 0.4)


    loss = fully_connected(pool5_7_7, output,activation='softmax')



    network = regression(loss, optimizer='momentum',
                         loss='categorical_crossentropy',
                         learning_rate=lr, name='targets')

    model = tflearn.DNN(network, checkpoint_path=model_name,
                        max_checkpoints=1, tensorboard_verbose=0,tensorboard_dir='log')


    return model
	def vol3d_encoder(self, x, name='Vol3D_Encoder'):
		with argscope([Conv3D], kernel_shape=4, padding='SAME', nl=tf.nn.elu):
			# x = x - VGG19_MEAN_TENSOR
			x = tf_2tanh(x)
			# x = x/255.0
			x = tf.expand_dims(x, axis=0) # to 1 256 256 256 3
			x = tf.transpose(x, [4, 1, 2, 3, 0]) # 
			"""
			# x = (LinearWrap(x)
			# 	.Conv3D('conv1a',   16, strides = 2, padding='SAME') #
			# 	.Conv3D('conv2a',   32, strides = 2, padding='SAME') #
			# 	.Conv3D('conv3a',   64, strides = 2, padding='SAME') #
			# 	.Conv3D('conv4a',  128, strides = 2, padding='SAME') #
			# 	.Conv3D('conv5a',  256, strides = 2, padding='SAME') #
			# 	.Conv3D('conv6a', 1024, strides = 2, padding='SAME', use_bias=True, nl=tf.tanh) # 4x4x4x1024
			# 	()) 
			"""
			with tf.contrib.framework.arg_scope([conv_3d], filter_size=4, strides=[1, 2, 2, 2, 1], activation='relu', reuse=False):
				with tf.contrib.framework.arg_scope([conv_3d_transpose], filter_size=4, strides=[1, 2, 2, 2, 1], activation='relu', reuse=False):
					# Encoder
					e1a  = conv_3d(incoming=x,        name="e1a", nb_filter=16, bias=False)
					r1a  = tf_bottleneck(e1a,              name="r1a", nb_filter=16)
					# r1a  = tf.nn.dropout(r1a,     keep_prob=0.5)

					e2a  = conv_3d(incoming=r1a,           name="e2a", nb_filter=32, bias=False)
					r2a  = tf_bottleneck(e2a,              name="r2a", nb_filter=32)
					# r2a  = tf.nn.dropout(r2a,     keep_prob=0.5)

					e3a  = conv_3d(incoming=r2a,           name="e3a", nb_filter=64, bias=False)
					r3a  = tf_bottleneck(e3a,              name="r3a", nb_filter=64)
					# r3a  = tf.nn.dropout(r3a,     keep_prob=0.5)

					e4a  = conv_3d(incoming=r3a,           name="e4a", nb_filter=128, bias=False)
					r4a  = tf_bottleneck(e4a,              name="r4a", nb_filter=128)
					# r4a  = tf.nn.dropout(r4a,     keep_prob=0.5)

					e5a  = conv_3d(incoming=r4a,           name="e5a", nb_filter=256, bias=False)
					r5a  = tf_bottleneck(e5a,              name="r5a", nb_filter=256)
					#r5a  = tf.nn.dropout(r5a,     	keep_prob=0.5)

					e6a  = conv_3d(incoming=r5a,           name="e6a", nb_filter=1024, bias=False)
					r6a  = tf_bottleneck(e6a,              name="r6a", nb_filter=1024)

					# e7a  = conv_3d(incoming=r6a,           name="e7a", nb_filter=NB_FILTERS*8)           , bias=False 
					# r7a  = tf_bottleneck(e7a,              name="r7a", nb_filter=NB_FILTERS*8)
					# r7a  = dropout(incoming=r7a, keep_prob=0.5)
					print("In1 :", x.get_shape().as_list())
					print("E1a :", e1a.get_shape().as_list())
					print("R1a :", r1a.get_shape().as_list())
					print("E2a :", e2a.get_shape().as_list())
					print("R2a :", r2a.get_shape().as_list())
					print("E3a :", e3a.get_shape().as_list())
					print("R3a :", r3a.get_shape().as_list())
					print("E4a :", e4a.get_shape().as_list())
					print("R4a :", r4a.get_shape().as_list())
					print("E5a :", e5a.get_shape().as_list())
					print("R5a :", r5a.get_shape().as_list())
					print("E6a :", e6a.get_shape().as_list())
					print("R6a :", r6a.get_shape().as_list())

					x = r6a

			x = tf.transpose(x, [4, 1, 2, 3, 0]) ##
			x = tf.reshape(x, [-1, 4, 4, 3]) #
			x = tf.batch_to_space(x, crops=[[0,0],[0,0]], block_size=64,name='b2s')
			# x = x*255.0
			x = tf_2imag(x)
			x = INLReLU(x)
			# x = x + VGG19_MEAN_TENSOR
			return x
Example #24
0
def test1(x_train, y_train, x_test, y_test):
    # Train using classifier

    #define network
    int_put = input_data(shape=[None, 224, 5, 5, 1], )

    conv1 = conv_3d(int_put,
                    24, [24, 3, 3],
                    padding='VALID',
                    strides=[1, 1, 1, 1, 1],
                    activation='prelu',
                    weight_decay=0.05)
    print('conv1', conv1.get_shape().as_list())
    batch_norm = batch_normalization(conv1)
    #act1=tflearn.activations.relu(batch_norm)
    #pool1=max_pool_3d(act1,[1,1,2,2,1],strides=[1,1,1,1,1])

    conv2 = conv_3d(batch_norm,
                    12, [24, 3, 3],
                    padding='VALID',
                    strides=[1, 1, 1, 1, 1],
                    activation='prelu',
                    weight_decay=0.05)
    print('conv2', conv2.get_shape().as_list())
    batch_norm = batch_normalization(conv2)
    #act = tflearn.activations.relu(batch_norm)
    #pool2=max_pool_3d(act,[1,1,2,2,1],strides=[1,1,1,1,1])

    net = residual_block_concat(batch_norm,
                                2,
                                16,
                                batch_norm=None,
                                downsample_strides=1,
                                weight_decay=0.05)
    #net = residual_block(net, 5, 16)
    #net = residual_block(net, 1, 32, )
    #net = residual_block(net, 4, 32)
    #net = residual_block(net, 1, 64, downsample=True)
    #net = residual_block(net, 2, 64)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, 'relu')
    '''
    conv3=conv_3d(batch_norm,24,[24,1,1],padding='VALID',strides=[1,5,1,1,1],activation='prelu')
    print('conv3', conv3.get_shape().as_list())
    batch_norm = batch_normalization(conv3)
    #act=tflearn.activations.relu(batch_norm)
    #pool3=max_pool_3d(act,[1,1,2,2,1],strides=[1,1,1,1,1])
    '''

    flat = flatten(net)
    print('flat', flat.get_shape().as_list())
    ip1 = fully_connected(
        flat,
        100,
        activation='prelu',
    )
    dro = dropout(ip1, 0.9)
    ip2 = fully_connected(
        dro,
        20,
        activation='softmax',
    )
    network = regression(ip2,
                         optimizer='Adagrad',
                         loss='categorical_crossentropy',
                         learning_rate=0.01)

    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        tensorboard_dir="./tflearn_logs/")
    model.fit(x_train,
              y_train,
              n_epoch=200,
              shuffle=True,
              validation_set=(x_test, y_test),
              show_metric=True,
              batch_size=32,
              run_id='3d_net')
Example #25
0
def generator_fusionnet(images, name='generator'):
	dimx = DIMX
	dimy = DIMY
	dimz = DIMZ

	with tf.variable_scope(name):
		# return images
		e1 = conv_3d(incoming=images, 
					 nb_filter=NB_FILTERS*1, 
					 filter_size=4,
					 strides=[1, 1, 1, 1, 1], # DIMZ/1, DIMY/2, DIMX/2, 
					 regularizer='L1',
					 activation='elu')
		e1 = batch_normalization(incoming=e1)
		###
		e2 = conv_3d(incoming=e1, 
					 nb_filter=NB_FILTERS*1, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/2, DIMY/4, DIMX/4, 
					 regularizer='L1',
					 activation='elu')
		
		e2 = batch_normalization(incoming=e2)
		###
		e3 = conv_3d(incoming=e2, 
					 nb_filter=NB_FILTERS*2, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/4, DIMY/8, DIMX/8,
					 regularizer='L1',
					 activation='elu')
		e3 = batch_normalization(incoming=e3)
		###
		e4 = conv_3d(incoming=e3, 
					 nb_filter=NB_FILTERS*2, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/8, DIMY/16, DIMX/16,
					 regularizer='L1',
					 activation='elu')
		e4 = batch_normalization(incoming=e4)
		###
		e5 = conv_3d(incoming=e4, 
					 nb_filter=NB_FILTERS*4, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/16, DIMY/32, DIMX/32,
					 regularizer='L1',
					 activation='elu')
		e5 = batch_normalization(incoming=e5)		
		###
		e6 = conv_3d(incoming=e5, 
					 nb_filter=NB_FILTERS*4, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/32, DIMY/64, DIMX/64,
					 regularizer='L1',
					 activation='elu')
		e6 = batch_normalization(incoming=e6)		
		###
		e7 = conv_3d(incoming=e6, 
					 nb_filter=NB_FILTERS*8, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/64, DIMY/128, DIMX/128,
					 regularizer='L1',
					 activation='elu')
		e7 = batch_normalization(incoming=e7)		
		### Middle
		e8 = conv_3d(incoming=e7, 
					 nb_filter=NB_FILTERS*8, 
					 filter_size=4,
					 strides=[1, 2, 2, 2, 1], # DIMZ/128, DIMY/256, DIMX/256,
					 regularizer='L1',
					 activation='elu')
		# print "Dim8: ", dimz, dimy, dimx
		dimz, dimy, dimx = dimz/2, dimy/2, dimx/2
		e8 = batch_normalization(incoming=e8)		

		################### Decoder

		# print "Dim D7a: ", dimz, dimy, dimx
		d7 = conv_3d_transpose(incoming=e8, 
							   nb_filter=NB_FILTERS*8, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/64, DIMY/128, DIMX/128,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[2, 4, 4])

		d7 = batch_normalization(incoming=d7)
		
		d7 = dropout(incoming=d7, keep_prob=0.5)
		
		d7 = merge(tensors_list=[d7, e7], mode='elemwise_sum')
		# d7 = d7+e7	
		###
		d6 = conv_3d_transpose(incoming=d7, 
							   nb_filter=NB_FILTERS*4, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/32, DIMY/64, DIMX/64,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[4, 8, 8])
		d6 = batch_normalization(incoming=d6)	
		d6 = dropout(incoming=d6, keep_prob=0.5)
		
		d6 = merge(tensors_list=[d6, e6], mode='elemwise_sum')
		# d6 = d6+e6
		###
		d5 = conv_3d_transpose(incoming=d6, 
							   nb_filter=NB_FILTERS*4, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/16, DIMY/32, DIMX/32,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[8, 16, 16])
		d5 = batch_normalization(incoming=d5)	
		d5 = dropout(incoming=d5, keep_prob=0.5)
		
		d5 = merge(tensors_list=[d5, e5], mode='elemwise_sum')
		# d5 = d5+e5
		###
		d4 = conv_3d_transpose(incoming=d5, 
							   nb_filter=NB_FILTERS*2, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/8, DIMY/16, DIMX/16,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[16, 32, 32])
		d4 = batch_normalization(incoming=d4)	
		
		d4 = merge(tensors_list=[d4, e4], mode='elemwise_sum')
		# d4 = d4+e4
		###
		d3 = conv_3d_transpose(incoming=d4, 
							   nb_filter=NB_FILTERS*2, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/4, DIMY/8, DIMX/8,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[32, 64, 64])
		d3 = batch_normalization(incoming=d3)	
		
		d3 = merge(tensors_list=[d3, e3], mode='elemwise_sum')
		# d3 = d3+e3
		###
		d2 = conv_3d_transpose(incoming=d3, 
							   nb_filter=NB_FILTERS*1, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/2, DIMY/4, DIMX/4,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[64, 128, 128])
		d2 = batch_normalization(incoming=d2)	
		
		d2 = merge(tensors_list=[d2, e2], mode='elemwise_sum')
		# d2 = d2+e2
		
		###
		d1 = conv_3d_transpose(incoming=d2, 
							   nb_filter=NB_FILTERS*1, 
							   filter_size=4,
							   strides=[1, 2, 2, 2, 1], # DIMZ/1, DIMY/2, DIMX/2,
							   regularizer='L1',
							   activation='elu', 
							   output_shape=[128, 256, 256])
		d1 = batch_normalization(incoming=d1)	
		
		d1 = merge(tensors_list=[d1, e1], mode='elemwise_sum')
		# d1 = d1+e1
		###
		
		out = conv_3d_transpose(incoming=d1, 
							   nb_filter=1, 
							   filter_size=4,
							   strides=[1, 1, 1, 1, 1], # DIMZ/1, DIMY/1, DIMX/1,
							   regularizer='L1',
							   activation='tanh', 
							   output_shape=[128, 256, 256])
		return out, e8