예제 #1
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        with tf.name_scope('Preprocessing'):
            red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
            preprocessed_input = tf.concat([
                tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
                tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
                tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
            ], 3)
        x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                       stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training,
                       activation=tf.nn.relu, padding='VALID')
        padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID')

        logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes,
                                    kernel_size=(1, 1),
                                    l2_strength=self.args.l2_strength,
                                    bias=self.args.bias,
                                    is_training=self.is_training)
        self.logits = flatten(logits_unflattened)

        self.__init_output()
예제 #2
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        with tf.name_scope('Preprocessing'):
            red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
            preprocessed_input = tf.concat([
                tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
                tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
                tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
            ], 3)
        x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]],
                          "CONSTANT")
        conv1 = conv2d('conv1',
                       x=x_padded,
                       w=None,
                       num_filters=self.output_channels['conv1'],
                       kernel_size=(3, 3),
                       stride=(2, 2),
                       l2_strength=self.args.l2_strength,
                       bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled,
                       is_training=self.is_training,
                       activation=tf.nn.relu,
                       padding='VALID')
        padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(padded,
                               size=(3, 3),
                               stride=(2, 2),
                               name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4,
                                  size=(7, 7),
                                  stride=(1, 1),
                                  name='global_pool',
                                  padding='VALID')

        logits_unflattened = conv2d('fc',
                                    global_pool,
                                    w=None,
                                    num_filters=self.args.num_classes,
                                    kernel_size=(1, 1),
                                    l2_strength=self.args.l2_strength,
                                    bias=self.args.bias,
                                    is_training=self.is_training)
        self.logits = flatten(logits_unflattened)

        self.__init_output()
예제 #3
0
 def __build(self):
     self.__init_global_epoch()
     self.__init_global_step()
     self.__init_input()
     # 0. 图像预处理 减去均值 乘以归一化系数##################################
     with tf.name_scope('Preprocessing'):
         # 分割成三通道
         red, green, blue = tf.split(self.X, num_or_size_splits=3, axis=3)
         # 每个通道 减去均值 乘以归一化系数 后再concat/merge 通道扩展合并
         preprocessed_input = tf.concat([
             tf.subtract(blue, ShuffleNet.MEAN[0]) * ShuffleNet.NORMALIZER,
             tf.subtract(green, ShuffleNet.MEAN[1]) * ShuffleNet.NORMALIZER,
             tf.subtract(red, ShuffleNet.MEAN[2]) * ShuffleNet.NORMALIZER,
         ], 3)
     # 1. conv1 3*3*3*24 卷积 步长 2 BN RELU #########################################################
     ######## 周围填充 
     x_padded = tf.pad(preprocessed_input, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
     ######## conv
     conv1 = conv2d('conv1', x=x_padded, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                    stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                    batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training,
                    activation=tf.nn.relu, padding='VALID')
     # 2. 最大值池化 3*3 步长2 ##################################################
     padded = tf.pad(conv1, [[0, 0], [0, 1], [0, 1], [0, 0]], "CONSTANT")
     max_pool = max_pool_2d(padded, size=(3, 3), stride=(2, 2), name='max_pool')
     # 3. 一次 步长为2 非分组点卷积 concate通道扩展合并模块, 再进行3次步长为1的 add通道叠加模块
     stage2 = self.__stage(max_pool, stage=2, repeat=3)
     # 4. 一次 步长为2 分组点卷积   concate通道扩展合并模块, 再进行7次步长为1的 add通道叠加模块
     stage3 = self.__stage(stage2, stage=3, repeat=7)
     # 5. 一次 步长为2 分组点卷积   concate通道扩展合并模块, 再进行3次步长为1的 add通道叠加模块
     stage4 = self.__stage(stage3, stage=4, repeat=3)
     # 6. 全局均值池化层 7*7 池化核 步长1
     global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool', padding='VALID')
     # 7. 1*1点卷积 输出 类别数量个 卷积特征图
     logits_unflattened = conv2d('fc', global_pool, w=None, num_filters=self.args.num_classes,
                                 kernel_size=(1, 1),# 1*1点卷积
                                 l2_strength=self.args.l2_strength,
                                 bias=self.args.bias,
                                 is_training=self.is_training)
     # 8. 摊平 到 一维
     self.logits = flatten(logits_unflattened)
     # 9. 计算误差 
     self.__init_output()
예제 #4
0
    def __build(self):
        self.__init_global_epoch()
        self.__init_global_step()
        self.__init_input()

        x_resized = self.__resize(self.X)
        conv1 = conv2d('conv1', x=x_resized, w=None, num_filters=self.output_channels['conv1'], kernel_size=(3, 3),
                       stride=(2, 2), l2_strength=self.args.l2_strength, bias=self.args.bias,
                       batchnorm_enabled=self.args.batchnorm_enabled, is_training=self.is_training)
        conv1_padded = tf.pad(conv1, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        max_pool = max_pool_2d(conv1_padded, size=(3, 3), stride=(2, 2), name='max_pool')
        stage2 = self.__stage(max_pool, stage=2, repeat=3)
        stage3 = self.__stage(stage2, stage=3, repeat=7)
        stage4 = self.__stage(stage3, stage=4, repeat=3)
        global_pool = avg_pool_2d(stage4, size=(7, 7), stride=(1, 1), name='global_pool')
        flattened = flatten(global_pool)

        self.logits = dense('fc', flattened, w=None, output_dim=self.args.num_classes,
                            l2_strength=self.args.l2_strength,
                            bias=self.args.bias,
                            is_training=self.is_training)
        self.__init_output()
예제 #5
0
    def __init_network(self):
        with tf.variable_scope('mobilenet_encoder'):
            # Preprocessing as done in the paper
            with tf.name_scope('pre_processing'):
                preprocessed_input = (self.X - self.mean_img) / 255.0

            # Model is here!
            conv1_1 = conv2d('conv_1', preprocessed_input, num_filters=int(round(32 * self.args.width_multiplier)),
                             kernel_size=(3, 3),
                             padding='SAME', stride=(2, 2), activation=tf.nn.relu6,
                             batchnorm_enabled=self.args.batchnorm_enabled,
                             is_training=self.is_training, l2_strength=self.args.l2_strength, bias=self.args.bias)
            self.__add_to_nodes([conv1_1])
            ############################################################################################
            conv2_1_dw, conv2_1_pw = depthwise_separable_conv2d('conv_ds_2', conv1_1,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=64, kernel_size=(3, 3), padding='SAME',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv2_1_dw, conv2_1_pw])

            conv2_2_dw, conv2_2_pw = depthwise_separable_conv2d('conv_ds_3', conv2_1_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=128, kernel_size=(3, 3), padding='SAME',
                                                                stride=(2, 2),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv2_2_dw, conv2_2_pw])
            ############################################################################################
            conv3_1_dw, conv3_1_pw = depthwise_separable_conv2d('conv_ds_4', conv2_2_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=128, kernel_size=(3, 3), padding='SAME',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv3_1_dw, conv3_1_pw])

            conv3_2_dw, conv3_2_pw = depthwise_separable_conv2d('conv_ds_5', conv3_1_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=256, kernel_size=(3, 3), padding='SAME',
                                                                stride=(2, 2),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv3_2_dw, conv3_2_pw])
            ############################################################################################
            conv4_1_dw, conv4_1_pw = depthwise_separable_conv2d('conv_ds_6', conv3_2_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=256, kernel_size=(3, 3), padding='SAME',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv4_1_dw, conv4_1_pw])

            conv4_2_dw, conv4_2_pw = depthwise_separable_conv2d('conv_ds_7', conv4_1_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=512, kernel_size=(3, 3), padding='SAME',
                                                                stride=(2, 2),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv4_2_dw, conv4_2_pw])
            ############################################################################################
            conv5_1_dw, conv5_1_pw = depthwise_separable_conv2d('conv_ds_8', conv4_2_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=512, kernel_size=(3, 3), padding='SAME',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv5_1_dw, conv5_1_pw])

            conv5_2_dw, conv5_2_pw = depthwise_separable_conv2d('conv_ds_9', conv5_1_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=512, kernel_size=(3, 3), padding='SAME',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv5_2_dw, conv5_2_pw])

            conv5_3_dw, conv5_3_pw = depthwise_separable_conv2d('conv_ds_10', conv5_2_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=512, kernel_size=(3, 3), padding='SAME',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv5_3_dw, conv5_3_pw])

            conv5_4_dw, conv5_4_pw = depthwise_separable_conv2d('conv_ds_11', conv5_3_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=512, kernel_size=(3, 3), padding='SAME',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv5_4_dw, conv5_4_pw])

            conv5_5_dw, conv5_5_pw = depthwise_separable_conv2d('conv_ds_12', conv5_4_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=512, kernel_size=(3, 3), padding='SAME',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv5_5_dw, conv5_5_pw])

            conv5_6_dw, conv5_6_pw = depthwise_separable_conv2d('conv_ds_13', conv5_5_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=1024, kernel_size=(3, 3), padding='SAME',
                                                                stride=(2, 2),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv5_6_dw, conv5_6_pw])
            ############################################################################################
            conv6_1_dw, conv6_1_pw = depthwise_separable_conv2d('conv_ds_14', conv5_6_pw,
                                                                width_multiplier=self.args.width_multiplier,
                                                                num_filters=1024, kernel_size=(3, 3), padding='SAME',
                                                                stride=(1, 1),
                                                                batchnorm_enabled=self.args.batchnorm_enabled,
                                                                activation=tf.nn.relu6,
                                                                is_training=self.is_training,
                                                                l2_strength=self.args.l2_strength,
                                                                biases=(self.args.bias, self.args.bias))
            self.__add_to_nodes([conv6_1_dw, conv6_1_pw])
            ############################################################################################
            avg_pool = avg_pool_2d(conv6_1_pw, size=(7, 7), stride=(1, 1))
            dropped = dropout(avg_pool, self.args.dropout_keep_prob, self.is_training)
            self.logits = flatten(conv2d('fc', dropped, kernel_size=(1, 1), num_filters=self.args.num_classes,
                                         l2_strength=self.args.l2_strength,
                                         bias=self.args.bias))
            self.__add_to_nodes([avg_pool, dropped, self.logits])
예제 #6
0
def shufflenet_unit(name,
                    x,
                    w=None,
                    num_groups=1,
                    group_conv_bottleneck=True,
                    num_filters=16,
                    stride=(1, 1),
                    l2_strength=0.0,
                    bias=0.0,
                    batchnorm_enabled=True,
                    is_training=True,
                    fusion='add'):
    # refer to ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices
    ##  1 * 1 GConv (BN ReLU) -> Channel Shuffle -> 3*3 DWConv(stride=2)(BN) -> 1*1GConv(BN)
    ## 3*3 AVG Pool(stride=2)
    ## concat above-> ReLU

    activation = tf.nn.relu

    with tf.variable_scope(name) as scope:
        residual = x
        bottleneck_filters = (num_filters // 4) if fusion == 'add' else (
            num_filters - residual.get_shape()[3].value) // 4

        if group_conv_bottleneck:
            bottleneck = grouped_conv2d('Gbottleneck',
                                        x=x,
                                        w=None,
                                        num_filters=bottleneck_filters,
                                        kernel_size=(1, 1),
                                        padding='VALID',
                                        num_groups=num_groups,
                                        l2_strength=l2_strength,
                                        bias=bias,
                                        activation=activation,
                                        batchnorm_enabled=batchnorm_enabled,
                                        is_training=is_training)
            shuffled = channel_shuffle('channel_shuffle', bottleneck,
                                       num_groups)
        else:
            bottleneck = conv2d('bottleneck',
                                x=x,
                                w=None,
                                num_filters=bottleneck_filters,
                                kernel_size=(1, 1),
                                padding='VALID',
                                l2_strength=l2_strength,
                                bias=bias,
                                activation=activation,
                                batchnorm_enabled=batchnorm_enabled,
                                is_training=is_training)
            shuffled = bottleneck
        padded = tf.pad(shuffled, [[0, 0], [1, 1], [1, 1], [0, 0]], "CONSTANT")
        depthwise = depthwise_conv2d('depthwise',
                                     x=padded,
                                     w=None,
                                     stride=stride,
                                     l2_strength=l2_strength,
                                     padding='VALID',
                                     bias=bias,
                                     activation=None,
                                     batchnorm_enabled=batchnorm_enabled,
                                     is_training=is_training)
        if stride == (2, 2):
            residual_pooled = avg_pool_2d(residual,
                                          size=(3, 3),
                                          stride=stride,
                                          padding='SAME')
        else:
            residual_pooled = residual

        if fusion == 'concat':
            group_conv1x1 = grouped_conv2d('Gconv1x1',
                                           x=depthwise,
                                           w=None,
                                           num_filters=num_filters -
                                           residual.get_shape()[3].value,
                                           kernel_size=(1, 1),
                                           padding='VALID',
                                           num_groups=num_groups,
                                           l2_strength=l2_strength,
                                           bias=bias,
                                           activation=None,
                                           batchnorm_enabled=batchnorm_enabled,
                                           is_training=is_training)
            return activation(
                tf.concat([residual_pooled, group_conv1x1], axis=-1))
        elif fusion == 'add':
            group_conv1x1 = grouped_conv2d('Gconv1x1',
                                           x=depthwise,
                                           w=None,
                                           num_filters=num_filters,
                                           kernel_size=(1, 1),
                                           padding='VALID',
                                           num_groups=num_groups,
                                           l2_strength=l2_strength,
                                           bias=bias,
                                           activation=None,
                                           batchnorm_enabled=batchnorm_enabled,
                                           is_training=is_training)
            residual_match = residual_pooled
            # This is used if the number of filters of the residual block is different from that
            # of the group convolution.
            if num_filters != residual_pooled.get_shape()[3].value:
                residual_match = conv2d('residual_match',
                                        x=residual_pooled,
                                        w=None,
                                        num_filters=num_filters,
                                        kernel_size=(1, 1),
                                        padding='VALID',
                                        l2_strength=l2_strength,
                                        bias=bias,
                                        activation=None,
                                        batchnorm_enabled=batchnorm_enabled,
                                        is_training=is_training)
            return activation(group_conv1x1 + residual_match)
        else:
            raise ValueError(
                "Specify whether the fusion is \'concat\' or \'add\'")
예제 #7
0
def mobilenet_conv_layers(input, batch_size, num_unrolls):
    input = tf.to_float(input) - IMAGENET_MEAN
    conv1_1 = conv2d('conv_1',
                     input,
                     num_filters=int(round(32 * 1)),
                     kernel_size=(3, 3),
                     padding='SAME',
                     stride=(2, 2),
                     activation=tf.nn.relu6,
                     batchnorm_enabled=False,
                     is_training=True,
                     l2_strength=0.0,
                     bias=0.0)
    #self.__add_to_nodes([conv1_1])
    ############################################################################################
    conv2_1_dw, conv2_1_pw = depthwise_separable_conv2d(
        'conv_ds_2',
        conv1_1,
        width_multiplier=1,
        num_filters=64,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv2_1_dw, conv2_1_pw])
    conv2_2_dw, conv2_2_pw = depthwise_separable_conv2d(
        'conv_ds_3',
        conv2_1_pw,
        width_multiplier=1,
        num_filters=128,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(2, 2),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv2_2_dw, conv2_2_pw])
    ############################################################################################
    #with tf.variable_scope('conv1_skip'):

    if 0:
        pool2 = tf.nn.max_pool(conv2_2_pw,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool2')
        lrn2 = tf.nn.local_response_normalization(pool2,
                                                  depth_radius=2,
                                                  alpha=2e-5,
                                                  beta=0.75,
                                                  bias=1.0,
                                                  name='norm2')

        prelu_skip = tf_util.get_variable('prelu',
                                          shape=[16],
                                          dtype=tf.float32,
                                          initializer=prelu_initializer)
        conv1_skip = tf_util.prelu(
            tf_util.conv_layer(lrn2, 16, 1, activation=None), prelu_skip)
        conv1_skip = tf.transpose(conv1_skip, perm=[0, 3, 1, 2])
        conv1_skip_flat = tf_util.remove_axis(conv1_skip, [2, 3])

    conv3_1_dw, conv3_1_pw = depthwise_separable_conv2d(
        'conv_ds_4',
        conv2_2_pw,
        width_multiplier=1,
        num_filters=128,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv3_1_dw, conv3_1_pw])

    conv3_2_dw, conv3_2_pw = depthwise_separable_conv2d(
        'conv_ds_5',
        conv3_1_pw,
        width_multiplier=1,
        num_filters=256,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(2, 2),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv3_2_dw, conv3_2_pw])
    ############################################################################################

    conv4_1_dw, conv4_1_pw = depthwise_separable_conv2d(
        'conv_ds_6',
        conv3_2_pw,
        width_multiplier=1,
        num_filters=256,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv4_1_dw, conv4_1_pw])

    conv4_2_dw, conv4_2_pw = depthwise_separable_conv2d(
        'conv_ds_7',
        conv4_1_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(2, 2),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv4_2_dw, conv4_2_pw])
    ############################################################################################
    if 0:
        pool3 = tf.nn.max_pool(conv4_2_pw,
                               ksize=[1, 3, 3, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID',
                               name='pool2')
        lrn3 = tf.nn.local_response_normalization(pool2,
                                                  depth_radius=2,
                                                  alpha=2e-5,
                                                  beta=0.75,
                                                  bias=1.0,
                                                  name='norm2')

        with tf.variable_scope('conv2_skip'):
            prelu_skip = tf_util.get_variable('prelu',
                                              shape=[16],
                                              dtype=tf.float32,
                                              initializer=prelu_initializer)
            conv2_skip = tf_util.prelu(
                tf_util.conv_layer(lrn3, 16, 1, activation=None), prelu_skip)
            conv2_skip = tf.transpose(conv2_skip, perm=[0, 3, 1, 2])
            conv2_skip_flat = tf_util.remove_axis(conv2_skip, [2, 3])

    conv5_1_dw, conv5_1_pw = depthwise_separable_conv2d(
        'conv_ds_8',
        conv4_2_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_1_dw, conv5_1_pw])

    conv5_2_dw, conv5_2_pw = depthwise_separable_conv2d(
        'conv_ds_9',
        conv5_1_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_2_dw, conv5_2_pw])

    conv5_3_dw, conv5_3_pw = depthwise_separable_conv2d(
        'conv_ds_10',
        conv5_2_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_3_dw, conv5_3_pw])

    conv5_4_dw, conv5_4_pw = depthwise_separable_conv2d(
        'conv_ds_11',
        conv5_3_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_4_dw, conv5_4_pw])

    conv5_5_dw, conv5_5_pw = depthwise_separable_conv2d(
        'conv_ds_12',
        conv5_4_pw,
        width_multiplier=1,
        num_filters=512,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_5_dw, conv5_5_pw])

    conv5_6_dw, conv5_6_pw = depthwise_separable_conv2d(
        'conv_ds_13',
        conv5_5_pw,
        width_multiplier=1,
        num_filters=1024,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(2, 2),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv5_6_dw, conv5_6_pw])
    ############################################################################################
    conv6_1_dw, conv6_1_pw = depthwise_separable_conv2d(
        'conv_ds_14',
        conv5_6_pw,
        width_multiplier=1,
        num_filters=1024,
        kernel_size=(3, 3),
        padding='SAME',
        stride=(1, 1),
        batchnorm_enabled=False,
        activation=tf.nn.relu6,
        is_training=True,
        l2_strength=0.0,
        biases=(0.0, 0.0))
    #self.__add_to_nodes([conv6_1_dw, conv6_1_pw])
    ############################################################################################
    avg_pool = avg_pool_2d(conv6_1_pw, size=(7, 7), stride=(1, 1))
    dropped = dropout(avg_pool, -1, True)
    #print("dropout:shape:")
    #print(dropped.get_shape())
    if 1:
        logits = flatten(
            conv2d('fc',
                   dropped,
                   kernel_size=(1, 1),
                   num_filters=32,
                   l2_strength=0.0,
                   bias=0.0,
                   padding='SAME'))
    else:
        logits = (conv2d('fc',
                         dropped,
                         kernel_size=(1, 1),
                         num_filters=32,
                         l2_strength=0.0,
                         bias=0.0,
                         padding='SAME'))
        logits = tf_util.remove_axis(logits, [2, 3])
    if 1:
        logits_shape = logits.get_shape().as_list()
        pool5_reshape = tf.reshape(
            logits, [batch_size, num_unrolls, 2, logits_shape[-1]])

    else:
        skip_concat = tf.concat([conv1_skip_flat, logits], 1)
        #skip_concat = tf.concat([conv1_skip_flat, conv2_skip_flat, logits], 1)
        #
        #print("logitss:shape:")
        #print(logits_shape)
        #
        skip_concat_shape = skip_concat.get_shape().as_list()
        print("Ship_concat shape")
        print(skip_concat_shape)
        # Split and merge image pairs
        # (BxTx2)xHxWxC
        pool5_reshape = tf.reshape(
            skip_concat, [batch_size, num_unrolls, 2, skip_concat_shape[-1]])
        # (BxT)x(2xHxWxC)
    reshaped = tf_util.remove_axis(pool5_reshape, [1, 3])
    return reshaped