def residual_block_3d(input,
                      block_function,
                      filters,
                      repetitions,
                      kernel_regularizer,
                      is_first_layer=False,
                      scope=''):
    for i in range(repetitions):
        with tf.name_scope(scope + '_' + str(i)):
            strides = (1, 1, 1)
            if i == 0 and not is_first_layer:
                strides = (1, 2, 2)
            input = block_function(input,
                                   filters=filters,
                                   strides=strides,
                                   kernel_regularizer=kernel_regularizer)
            _ = print_layer_details(scope + '_' + str(i), input.get_shape())
    return input
def inception_v4(x_):
    with tf.name_scope("Stem"):
        x_max = tf.layers.max_pooling3d(x_,
                                        pool_size=[1, 2, 2],
                                        strides=(1, 2, 2))
        x_ = stem(x_max, "Stem")
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Inception_A"):
        shortcut = x_
        for i in range(1):
            x_ = inception_a(x_, "Inception_A" + str(i))
            # map identity with [1,1,1] kernel to match dimensions
            shortcut = tf.layers.conv3d(
                shortcut,
                filters=x_.get_shape()[4],
                kernel_size=[1, 1, 1],
                strides=(1, 1, 1),
                padding='Valid',
                kernel_regularizer=tf.keras.regularizers.l2(1e-4),
                activation=tf.nn.relu)
            # [shortcut matrix, residual matrix]
            x_ = tf.keras.layers.add([shortcut, x_])
            x_ = tf.nn.relu(x_)
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Reduction_A"):
        x_ = reduction_a(x_, "Reduction_A")
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Inception_B"):
        shortcut = x_
        for i in range(1):
            x_ = inception_b(x_, "Inception_B")
            shortcut = tf.layers.conv3d(
                shortcut,
                filters=x_.get_shape()[4],
                kernel_size=[1, 1, 1],
                strides=(1, 1, 1),
                padding='Valid',
                kernel_regularizer=tf.keras.regularizers.l2(1e-4),
                activation=tf.nn.relu)
            x_ = tf.keras.layers.add([shortcut, x_])
            x_ = tf.nn.relu(x_)
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Reduction_B"):
        x_ = reduction_b(x_, "Reduction_B")
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Inception_C"):
        shortcut = x_
        for i in range(1):
            x_ = inception_c(x_, "Inception_C")
            shortcut = tf.layers.conv3d(
                shortcut,
                filters=x_.get_shape()[4],
                kernel_size=[1, 1, 1],
                strides=(1, 1, 1),
                padding='Valid',
                kernel_regularizer=tf.keras.regularizers.l2(1e-4),
                activation=tf.nn.relu)
            x_ = tf.keras.layers.add([shortcut, x_])
            x_ = tf.nn.relu(x_)
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    # with tf.name_scope("Reduction_Conv"):
    #     x_ = tf.layers.conv3d(x_, filters=32, kernel_size=[1,1,1], strides=(1,1,1), padding='Valid', kernel_regularizer=tf.keras.regularizers.l2(1e-4))
    #     print_layer_details(tf.contrib.framework.get_name_scope(), x_.get_shape())
    with tf.name_scope("Final_Layer"):
        width = int(int(x_.get_shape()[2]) / 2)
        height = int(int(x_.get_shape()[3]) / 2)
        x_avg = tf.layers.average_pooling3d(x_,
                                            pool_size=[1, width, height],
                                            strides=(1, width, height))
        x_flat = tf.layers.flatten(inputs=x_avg)
        x_dense = tf.layers.dense(
            inputs=x_flat,
            units=64,
            activation=tf.nn.relu,
            kernel_regularizer=tf.keras.regularizers.l2(1e-4))

        y_ = tf.layers.dense(inputs=x_dense,
                             units=3,
                             kernel_regularizer=tf.keras.regularizers.l2(1e-4))
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            y_.get_shape())
    return y_
def inception_v4_se(x_):
    #reduction ratio for se_layers
    rr = 4
    with tf.name_scope("Stem"):
        x_ = tf.layers.max_pooling3d(x_,
                                     pool_size=[1, 2, 2],
                                     strides=(1, 2, 2))
        x_ = stem(x_, "Stem")
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Inception_A"):
        for i in range(1):
            x_ = inception_a(x_, "Inception_A")
            channel = int(np.shape(x_)[-1])
            x_ = se_layer(x_, out_dim=channel, ratio=rr, scope="SE_A" + str(i))
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Reduction_A"):
        x_ = reduction_a(x_, "Reduction_A")
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Inception_B"):
        for i in range(1):
            x_ = inception_b(x_, "Inception_B")
            channel = int(np.shape(x_)[-1])
            x_ = se_layer(x_, out_dim=channel, ratio=rr, scope="SE_B" + str(i))
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Reduction_B"):
        x_ = reduction_b(x_, "Reduction_B")
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Inception_C"):
        for i in range(1):
            x_ = inception_c(x_, "Inception_C")
            channel = int(np.shape(x_)[-1])
            x_ = se_layer(x_, out_dim=channel, ratio=rr, scope="SE_C" + str(i))
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    with tf.name_scope("Final_Layer"):
        width = int(int(x_.get_shape()[2]) / 2)
        height = int(int(x_.get_shape()[3]) / 2)
        x_avg = tf.layers.average_pooling3d(x_,
                                            pool_size=[1, width, height],
                                            strides=(1, width, height))
        x_flat = tf.layers.flatten(inputs=x_avg)
        x_dense = tf.layers.dense(
            inputs=x_flat,
            units=64,
            kernel_regularizer=tf.keras.regularizers.l2(1e-4))
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_dense.get_shape())
        y_ = tf.layers.dense(inputs=x_dense,
                             units=3,
                             kernel_regularizer=tf.keras.regularizers.l2(1e-4))

    return y_
def all_conv(x_):
    CONV = 6
    num_classes = 3
    # # # Summary
    # Total Number of Vars = 1.370.817
    # # # Layer A
    # input  8 x 84 x 84 x 1
    # output 8 x 84 x 84 x 96
    # number of variables = 84192
    ### Conv1 = 1 x 3 x 3 x 96 + 96 = 960
    ### Conv2 = 96 x 3 x 3 x 96 + 96 = 83040
    ### Batchnorm = Filter * 2 = 192
    with tf.name_scope("Layer_A"):
        x_ = Conv(x_,
                  filters=96,
                  kernel_size=[1, 3, 3],
                  strides=(1, 1, 1),
                  padding='same')
        x_ = Conv(x_,
                  filters=96,
                  kernel_size=[1, 3, 3],
                  strides=(1, 1, 1),
                  padding='same')
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())

    # # # Layer B
    # input  8 x 84 x 84 x 96
    # output 8 x 41 x 41 x 96
    # number of variables = 83232
    ### Conv1 = 96 x 3 x 3 x 96 + 96 = 83040
    ### Batchnorm = Filter * 2 = 192
    with tf.name_scope("Layer_B"):
        x_ = Conv(x_, filters=96, kernel_size=[1, 3, 3], strides=(1, 2, 2))
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())

    # # # Layer C_1 & C_2
    # input  8 x 41 x 41 x 96
    # output 8 x 41 x 41 x 192
    # number of variables = 498.816
    ### Conv1 = 96 x 3 x 3 x 192 + 192 = 166080
    ### Conv2 = 192 x 3 x 3 x 192 + 192 = 331968
    ### Batchnorm = Filter * 4 = 768
    for n in range(2):
        with tf.name_scope("Layer_C_" + str(n)):
            x_ = Conv(x_,
                      filters=192,
                      kernel_size=[1, 3, 3],
                      strides=(1, 1, 1),
                      padding='same')
            print_layer_details(tf.contrib.framework.get_name_scope(),
                                x_.get_shape())

    # # # Layer D
    # input  8 x 41 x 41 x 192
    # output 8 x 20 x 20 x 192
    # number of variables = 332352
    ### Conv1 = 192 x 3 x 3 x 192 + 192 = 331968
    ### Batchnorm = Filter * 2 = 384
    with tf.name_scope("Layer_D"):
        x_ = Conv(x_, filters=192, kernel_size=[1, 3, 3], strides=(1, 2, 2))
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())

    # # # Layer E
    # input  8 x 20 x 20 x 192
    # output 8 x 19 x 19 x 192
    # number of variables = 332352
    ### Conv1 = 192 x 3 x 3 x 192 + 192 = 331968
    ### Batchnorm = Filter * 2 = 384
    with tf.name_scope("Layer_E"):
        x_ = Conv(x_,
                  filters=192,
                  kernel_size=[1, 3, 3],
                  strides=(1, 1, 1),
                  padding='same')
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())

    # # # Layer F
    # input  8 x 19 x 19 x 192
    # output 8 x 19 x 19 x 192
    # number of variables = 37440
    ### Conv1 = 192 x 1 x 1 x 192 + 192 = 37056
    ### Batchnorm = Filter * 2 = 384
    with tf.name_scope("Layer_F"):
        x_ = Conv(x_, filters=192, kernel_size=[1, 1, 1], strides=(1, 1, 1))
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    # # # Dim Red Layer
    # input  8 x 19 x 19 x 192
    # output 8 x 19 x 19 x 10
    # number of variables = 1950
    ### Conv1 = 192 x 1 x 1 x 10 + 10 = 1930
    ### Batchnorm = Filter * 2 = 20
    with tf.name_scope("Dimensional_Red_Layer"):
        x_ = Conv(x_,
                  filters=10,
                  kernel_size=[1, 1, 1],
                  strides=(1, 1, 1),
                  padding='same')
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            x_.get_shape())
    # # # Final Layer
    # output (?, 3)
    ### Dense Input = 8 * 2 * 1 * 10 = 160 Nodes
    ### FCL = 160 * 3 + 3 = 483
    with tf.name_scope("Final_Layer"):
        width = int(int(x_.get_shape()[2]) / 2)
        height = int(int(x_.get_shape()[3]) / 2)
        x_avg = tf.layers.average_pooling3d(x_,
                                            pool_size=[1, width, height],
                                            strides=(1, width, height))
        x_ = tf.layers.flatten(inputs=x_avg)
        y_ = tf.layers.dense(inputs=x_,
                             units=num_classes,
                             kernel_regularizer=tf.keras.regularizers.l2(1e-4))
        print_layer_details(tf.contrib.framework.get_name_scope(),
                            y_.get_shape())
        return y_