Exemplo n.º 1
0
    def resNet_v1(self):

        model_params = {
            "conv1": [5, 5, 64],
            "rb1_1": [3, 3, 64],
            "rb1_2": [3, 3, 64],
            "rb2_1": [3, 3, 128],
            "rb2_2": [3, 3, 128],
            "fc3": 10,
        }

        with tf.name_scope("resNet_v1"):
            net = nf.convolution_layer(self.inputs,
                                       model_params["conv1"], [1, 2, 2, 1],
                                       name="conv1")
            id_rb1 = tf.nn.max_pool(net,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME')

            net = nf.convolution_layer(id_rb1,
                                       model_params["rb1_1"], [1, 1, 1, 1],
                                       name="rb1_1")
            id_rb2 = nf.convolution_layer(net,
                                          model_params["rb1_2"], [1, 1, 1, 1],
                                          name="rb1_2")

            id_rb2 = nf.shortcut(id_rb2, id_rb1, name="rb1")

            net = nf.convolution_layer(id_rb2,
                                       model_params["rb2_1"], [1, 2, 2, 1],
                                       padding="SAME",
                                       name="rb2_1")
            id_rb3 = nf.convolution_layer(net,
                                          model_params["rb2_2"], [1, 1, 1, 1],
                                          name="rb2_2")

            id_rb3 = nf.shortcut(id_rb3, id_rb2, name="rb2")

            net = nf.global_avg_pooling(id_rb3, flatten=True)

            net = tf.layers.dropout(net,
                                    rate=self.dropout,
                                    training=self.is_training,
                                    name='dropout2')
            logits = nf.fc_layer(net,
                                 model_params["fc3"],
                                 name="logits",
                                 activat_fn=None)

        return logits
Exemplo n.º 2
0
    def alex_net_2D(self, kwargs={}):

        #init = tf.random_normal_initializer(stddev=0.01)
        init = tf.contrib.layers.xavier_initializer()
        l2_reg = tf.contrib.layers.l2_regularizer(1e-5)

        model_params = {
            'conv1': [11, 11, 96],
            'conv2': [5, 5, 256],
            'conv3': [3, 3, 384],
            'conv4': [3, 3, 384],
            'conv5': [3, 3, 256],
            'conv6': [3, 3, 128],
            'conv7': [3, 3, 64],
            'conv_code': [3, 3, 16],
            #'conv_code': [3,3,1],
        }
        with tf.name_scope("Detector"):

            conv_1 = nf.convolution_layer(self.inputs,
                                          model_params["conv1"], [1, 4, 4, 1],
                                          name="conv1",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')
            #conv_1 = nf.convolution_layer(self.inputs, model_params["conv1"], [1,1,1,1], name="conv1", activat_fn=tf.nn.relu, initializer=init, reg=l2_reg, padding='SAME')
            conv_1 = tf.nn.max_pool(conv_1,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME')

            conv_2 = nf.convolution_layer(conv_1,
                                          model_params["conv2"], [1, 1, 1, 1],
                                          name="conv2",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')
            conv_2 = tf.nn.max_pool(conv_2,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME')

            conv_3 = nf.convolution_layer(conv_2,
                                          model_params["conv3"], [1, 1, 1, 1],
                                          name="conv3",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')

            conv_4 = nf.convolution_layer(conv_3,
                                          model_params["conv4"], [1, 1, 1, 1],
                                          name="conv4",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')

            conv_5 = nf.convolution_layer(conv_4,
                                          model_params["conv5"], [1, 1, 1, 1],
                                          name="conv5",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')

            conv_6 = nf.convolution_layer(conv_5,
                                          model_params["conv6"], [1, 1, 1, 1],
                                          name="conv6",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')
            #conv_6 = tf.nn.max_pool(conv_6, ksize=[1,3,3,1], strides=[1,2,2,1], padding='SAME')

            conv_7 = nf.convolution_layer(conv_6,
                                          model_params["conv7"], [1, 1, 1, 1],
                                          name="conv7",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')
            #conv_7 = tf.nn.max_pool(conv_7, ksize=[1,3,3,1], strides=[1,2,2,1], padding='SAME')

            dropout = tf.layers.dropout(conv_7,
                                        rate=self.dropout,
                                        training=self.is_training,
                                        name='dropout2')

            conv_code = nf.convolution_layer(dropout,
                                             model_params["conv_code"],
                                             [1, 1, 1, 1],
                                             name="conv_code",
                                             activat_fn=tf.nn.relu,
                                             initializer=init,
                                             reg=l2_reg,
                                             padding='SAME')

            return conv_code
Exemplo n.º 3
0
    def baseline_2D(self, kwargs={}):
        init = tf.random_normal_initializer(stddev=0.01)

        feature_size = 64

        model_params = {
            'conv1': [11, 11, feature_size * 2],
            'conv2': [5, 5, feature_size * 4],
            'resblock': [3, 3, feature_size * 4],
            'conv3': [3, 3, feature_size * 4],
            'conv4': [3, 3, feature_size * 8],
            'conv5': [3, 3, feature_size * 8],
            'conv6': [3, 3, feature_size * 4],
            'conv_code': [3, 3, 16],
        }

        ### Generator
        num_resblock = 16

        g_input = self.inputs

        with tf.name_scope("Detector"):
            # 256x256x1
            x = nf.convolution_layer(g_input,
                                     model_params["conv1"], [1, 2, 2, 1],
                                     name="conv1",
                                     activat_fn=tf.nn.relu,
                                     initializer=init)
            conv_1 = tf.nn.max_pool(x,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME')
            print("conv_1: %s" % conv_1.get_shape())

            x = nf.convolution_layer(conv_1,
                                     model_params["conv2"], [1, 2, 2, 1],
                                     name="conv2",
                                     activat_fn=tf.nn.relu,
                                     initializer=init)
            conv_2 = tf.nn.max_pool(x,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='SAME')
            print("conv_2: %s" % conv_2.get_shape())

            x = conv_2
            # 128x128xfeature_size
            with tf.variable_scope("detector_resblock", reuse=False):
                #Add the residual blocks to the model
                for i in range(num_resblock):
                    x = nf.resBlock(x,
                                    feature_size * 4,
                                    scale=1,
                                    reuse=False,
                                    idx=i,
                                    initializer=init)
                x = nf.convolution_layer(x,
                                         model_params["conv3"], [1, 1, 1, 1],
                                         name="conv3",
                                         activat_fn=tf.nn.relu,
                                         initializer=init)
                x += conv_2
                print("conv_3: %s" % x.get_shape())

            x = nf.convolution_layer(x,
                                     model_params["conv4"], [1, 1, 1, 1],
                                     name="conv4",
                                     activat_fn=tf.nn.relu,
                                     initializer=init)
            print("conv_4: %s" % x.get_shape())

            x = nf.convolution_layer(x,
                                     model_params["conv5"], [1, 1, 1, 1],
                                     name="conv5",
                                     activat_fn=tf.nn.relu,
                                     initializer=init)
            print("conv_5: %s" % x.get_shape())

            x = nf.convolution_layer(x,
                                     model_params["conv6"], [1, 1, 1, 1],
                                     name="conv6",
                                     activat_fn=tf.nn.relu,
                                     initializer=init)
            print("conv_6: %s" % x.get_shape())

            x = nf.convolution_layer(x,
                                     model_params["conv_code"], [1, 1, 1, 1],
                                     name="conv_code",
                                     activat_fn=tf.nn.relu,
                                     initializer=init)
            print("conv_code: %s" % x.get_shape())

        return x
Exemplo n.º 4
0
    def alex_net(self, kwargs={}):

        init = tf.random_normal_initializer(stddev=0.01)
        l2_reg = tf.contrib.layers.l2_regularizer(1e-5)

        model_params = {
            'conv1': [11, 11, 96],
            'conv2': [5, 5, 256],
            'conv3': [3, 3, 384],
            'conv4': [3, 3, 384],
            'conv5': [3, 3, 256],
            'fc6': 8192,
            'fc7': 8192,
            'fc_code': 4096,
            #                        'fc7': 4096,
            #                        'fc_code': 1024,
        }
        with tf.name_scope("Detector"):

            conv_1 = nf.convolution_layer(self.inputs,
                                          model_params["conv1"], [1, 4, 4, 1],
                                          name="conv1",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='VALID')
            conv_1 = tf.nn.max_pool(conv_1,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='VALID')

            conv_2 = nf.convolution_layer(conv_1,
                                          model_params["conv2"], [1, 1, 1, 1],
                                          name="conv2",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')
            conv_2 = tf.nn.max_pool(conv_2,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='VALID')

            conv_3 = nf.convolution_layer(conv_2,
                                          model_params["conv3"], [1, 1, 1, 1],
                                          name="conv3",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')

            conv_4 = nf.convolution_layer(conv_3,
                                          model_params["conv4"], [1, 1, 1, 1],
                                          name="conv4",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')

            conv_5 = nf.convolution_layer(conv_4,
                                          model_params["conv5"], [1, 1, 1, 1],
                                          name="conv5",
                                          activat_fn=tf.nn.relu,
                                          initializer=init,
                                          reg=l2_reg,
                                          padding='SAME')
            conv_5 = tf.nn.max_pool(conv_5,
                                    ksize=[1, 3, 3, 1],
                                    strides=[1, 2, 2, 1],
                                    padding='VALID')
            conv_5 = tf.reshape(conv_5,
                                [-1, int(np.prod(conv_5.get_shape()[1:]))],
                                name="conv5_flatout")

            fc6 = nf.fc_layer(conv_5,
                              model_params["fc6"],
                              name="fc6",
                              activat_fn=tf.nn.relu,
                              reg=l2_reg)

            fc7 = nf.fc_layer(fc6,
                              model_params["fc7"],
                              name="fc7",
                              activat_fn=tf.nn.relu,
                              reg=l2_reg)

            dropout = tf.layers.dropout(fc7,
                                        rate=self.dropout,
                                        training=self.is_training,
                                        name='dropout2')

            fc_code = nf.fc_layer(dropout,
                                  model_params["fc_code"],
                                  name="fc_code",
                                  activat_fn=None,
                                  reg=l2_reg)

            return fc_code
Exemplo n.º 5
0
        def attention_network(image_input, layers, channels, is_training):

            with tf.variable_scope("attention"):

                att_net = nf.convolution_layer(image_input, [3, 3, 64],
                                               [1, 2, 2, 1],
                                               name="conv1-1")
                att_net = nf.convolution_layer(att_net, [3, 3, 64],
                                               [1, 1, 1, 1],
                                               name="conv1-2")
                att_net = tf.nn.max_pool(att_net,
                                         ksize=[1, 3, 3, 1],
                                         strides=[1, 2, 2, 1],
                                         padding='SAME')
                att_net = nf.convolution_layer(att_net, [3, 3, 128],
                                               [1, 1, 1, 1],
                                               name="conv2-1")
                att_net = nf.convolution_layer(att_net, [3, 3, 128],
                                               [1, 1, 1, 1],
                                               name="conv2-2")
                att_net = tf.nn.max_pool(att_net,
                                         ksize=[1, 3, 3, 1],
                                         strides=[1, 2, 2, 1],
                                         padding='SAME')
                att_net = nf.convolution_layer(att_net, [3, 3, 256],
                                               [1, 1, 1, 1],
                                               name="conv3-1")
                att_net = nf.convolution_layer(att_net, [3, 3, 256],
                                               [1, 1, 1, 1],
                                               name="conv3-2")
                att_net = tf.nn.max_pool(att_net,
                                         ksize=[1, 3, 3, 1],
                                         strides=[1, 2, 2, 1],
                                         padding='SAME')
                att_net = nf.convolution_layer(att_net, [3, 3, 512],
                                               [1, 1, 1, 1],
                                               name="conv4-1")
                #att_net = nf.convolution_layer(att_net, [3,3,512], [1,1,1,1],name="conv4-2")
                att_net = tf.nn.max_pool(att_net,
                                         ksize=[1, 3, 3, 1],
                                         strides=[1, 2, 2, 1],
                                         padding='SAME')
                bsize, a, b, c = att_net.get_shape().as_list()
                if bsize == None:
                    bsize = -1
                att_net = tf.reshape(
                    att_net,
                    [bsize, int(np.prod(att_net.get_shape()[1:]))])
                att_net = nf.fc_layer(att_net, 2048, name="fc1")
                att_net = nf.fc_layer(att_net, 2048, name="fc2")
                #att_net = tf.layers.dropout(att_net, rate=dropout, training=is_training, name='dropout1')
                logits = nf.fc_layer(att_net,
                                     channels * layers,
                                     name="logits",
                                     activat_fn=None)

                bsize = tf.shape(logits)[0]
                #logits = tf.reshape(logits, (bsize,1,1,channels*layers))
                logits = tf.reshape(logits, (bsize, 1, 1, channels, layers))
                weighting = tf.nn.softmax(logits)
                """
                    max_index = tf.argmax(tf.nn.softmax(logits),4) 
                    weighting = tf.one_hot(max_index, 
                                        depth=layers, 
                                        on_value=1.0,
                                        axis = -1)
                    """

            return weighting
Exemplo n.º 6
0
    def googleLeNet_v1(self):

        model_params = {
            "conv1": [5, 5, 64],
            "conv2": [3, 3, 128],
            "inception_1": {
                "1x1": 64,
                "3x3": {
                    "1x1": 96,
                    "3x3": 128
                },
                "5x5": {
                    "1x1": 16,
                    "5x5": 32
                },
                "s1x1": 32
            },
            "inception_2": {
                "1x1": 128,
                "3x3": {
                    "1x1": 128,
                    "3x3": 192
                },
                "5x5": {
                    "1x1": 32,
                    "5x5": 96
                },
                "s1x1": 64
            },
            "fc3": 10,
        }

        with tf.name_scope("googleLeNet_v1"):
            net = nf.convolution_layer(self.inputs,
                                       model_params["conv1"], [1, 2, 2, 1],
                                       name="conv1")
            net = tf.nn.max_pool(net,
                                 ksize=[1, 3, 3, 1],
                                 strides=[1, 2, 2, 1],
                                 padding='SAME')
            net = tf.nn.local_response_normalization(
                net,
                depth_radius=5,
                bias=1.0,
                alpha=0.0001,
                beta=0.75,
                name='LocalResponseNormalization')
            net = nf.convolution_layer(net,
                                       model_params["conv2"], [1, 1, 1, 1],
                                       name="conv2",
                                       flatten=False)
            net = tf.nn.local_response_normalization(
                net,
                depth_radius=5,
                bias=1.0,
                alpha=0.0001,
                beta=0.75,
                name='LocalResponseNormalization')
            net = nf.inception_v1(net,
                                  model_params,
                                  name="inception_1",
                                  flatten=False)
            net = nf.inception_v1(net,
                                  model_params,
                                  name="inception_2",
                                  flatten=False)
            net = tf.nn.avg_pool(net,
                                 ksize=[1, 3, 3, 1],
                                 strides=[1, 1, 1, 1],
                                 padding='VALID')
            net = tf.reshape(net, [-1, int(np.prod(net.get_shape()[1:]))])

            net = tf.layers.dropout(net,
                                    rate=self.dropout,
                                    training=self.is_training,
                                    name='dropout2')
            logits = nf.fc_layer(net,
                                 model_params["fc3"],
                                 name="logits",
                                 activat_fn=None)

        return logits
Exemplo n.º 7
0
    def EDSR_WGAN_att(self, kwargs):
        def attention_network(image_input, layers, channels, is_training):

            with tf.variable_scope("attention"):

                att_net = nf.convolution_layer(image_input, [3, 3, 64],
                                               [1, 2, 2, 1],
                                               name="conv1-1")
                att_net = nf.convolution_layer(att_net, [3, 3, 64],
                                               [1, 1, 1, 1],
                                               name="conv1-2")
                att_net = tf.nn.max_pool(att_net,
                                         ksize=[1, 3, 3, 1],
                                         strides=[1, 2, 2, 1],
                                         padding='SAME')
                att_net = nf.convolution_layer(att_net, [3, 3, 128],
                                               [1, 1, 1, 1],
                                               name="conv2-1")
                att_net = nf.convolution_layer(att_net, [3, 3, 128],
                                               [1, 1, 1, 1],
                                               name="conv2-2")
                att_net = tf.nn.max_pool(att_net,
                                         ksize=[1, 3, 3, 1],
                                         strides=[1, 2, 2, 1],
                                         padding='SAME')
                att_net = nf.convolution_layer(att_net, [3, 3, 256],
                                               [1, 1, 1, 1],
                                               name="conv3-1")
                att_net = nf.convolution_layer(att_net, [3, 3, 256],
                                               [1, 1, 1, 1],
                                               name="conv3-2")
                att_net = tf.nn.max_pool(att_net,
                                         ksize=[1, 3, 3, 1],
                                         strides=[1, 2, 2, 1],
                                         padding='SAME')
                att_net = nf.convolution_layer(att_net, [3, 3, 512],
                                               [1, 1, 1, 1],
                                               name="conv4-1")
                #att_net = nf.convolution_layer(att_net, [3,3,512], [1,1,1,1],name="conv4-2")
                att_net = tf.nn.max_pool(att_net,
                                         ksize=[1, 3, 3, 1],
                                         strides=[1, 2, 2, 1],
                                         padding='SAME')
                bsize, a, b, c = att_net.get_shape().as_list()
                if bsize == None:
                    bsize = -1
                att_net = tf.reshape(
                    att_net,
                    [bsize, int(np.prod(att_net.get_shape()[1:]))])
                att_net = nf.fc_layer(att_net, 2048, name="fc1")
                att_net = nf.fc_layer(att_net, 2048, name="fc2")
                #att_net = tf.layers.dropout(att_net, rate=dropout, training=is_training, name='dropout1')
                logits = nf.fc_layer(att_net,
                                     channels * layers,
                                     name="logits",
                                     activat_fn=None)

                bsize = tf.shape(logits)[0]
                #logits = tf.reshape(logits, (bsize,1,1,channels*layers))
                logits = tf.reshape(logits, (bsize, 1, 1, channels, layers))
                weighting = tf.nn.softmax(logits)
                """
                    max_index = tf.argmax(tf.nn.softmax(logits),4) 
                    weighting = tf.one_hot(max_index, 
                                        depth=layers, 
                                        on_value=1.0,
                                        axis = -1)
                    """

            return weighting

        reuse = kwargs["reuse"]
        d_inputs = kwargs["d_inputs"]
        is_training = kwargs["is_training"]
        net = kwargs["net"]

        init = tf.random_normal_initializer(stddev=0.01)

        feature_size = 64
        scaling_factor = 1

        DEPTH = 64

        model_params = {

            # Generator
            'conv1': [3, 3, feature_size],
            'resblock': [3, 3, feature_size],
            'conv2': [3, 3, feature_size],
            'conv3': [3, 3, 3],
            'd_output': [3, 3, 3 * feature_size],

            # Discriminator
            'conv1_wgan': [5, 5, DEPTH],
            'conv2_wgan': [5, 5, DEPTH * 2],
            'conv3_wgan': [5, 5, DEPTH * 4],
            'conv4_wgan': [5, 5, DEPTH * 8],
            'd_output_wgan': [5, 5, 1],
        }

        if net is "Gen":

            ### Generator
            num_resblock = 16

            g_input = self.inputs

            with tf.variable_scope("EDSR_gen", reuse=reuse):

                with tf.name_scope("attention_x2"):
                    att_layers = feature_size
                    arr = attention_network(self.inputs, att_layers, 3,
                                            is_training)

                x = nf.convolution_layer(g_input,
                                         model_params["conv1"], [1, 1, 1, 1],
                                         name="conv1",
                                         activat_fn=None,
                                         initializer=init)
                conv_1 = x
                with tf.variable_scope("resblock", reuse=reuse):

                    #Add the residual blocks to the model
                    for i in range(num_resblock):
                        x = nf.resBlock(x,
                                        feature_size,
                                        scale=scaling_factor,
                                        reuse=reuse,
                                        idx=i,
                                        initializer=init)
                    x = nf.convolution_layer(x,
                                             model_params["conv2"],
                                             [1, 1, 1, 1],
                                             name="conv2",
                                             activat_fn=None,
                                             initializer=init)
                    x += conv_1
                x = nf.convolution_layer(x,
                                         model_params["conv1"], [1, 1, 1, 1],
                                         name="conv3",
                                         activat_fn=None,
                                         initializer=init)
                g_network = nf.convolution_layer(x,
                                                 model_params["d_output"],
                                                 [1, 1, 1, 1],
                                                 name="conv4",
                                                 activat_fn=None,
                                                 initializer=init)

                #Attention
                bsize, a, b, c = g_network.get_shape().as_list()
                g_network = tf.reshape(g_network, (-1, a, b, 3, att_layers))
                g_network = tf.multiply(g_network, arr)
                g_network = tf.reduce_sum(g_network, 4)

                g_output = tf.nn.sigmoid(g_network)

            return g_output

        elif net is "Dis":
            d_model = kwargs["d_model"]

            ### Discriminator
            num_resblock = 2

            input_gan = d_inputs

            with tf.variable_scope("EDSR_dis", reuse=reuse):

                if d_model is "PatchWGAN_GP":

                    layer1_1 = nf.convolution_layer(input_gan,
                                                    model_params["conv1_wgan"],
                                                    [1, 1, 1, 1],
                                                    name="conv1_wgan_1",
                                                    activat_fn=nf.lrelu,
                                                    initializer=init)
                    layer1_2 = nf.convolution_layer(layer1_1,
                                                    model_params["conv1_wgan"],
                                                    [1, 1, 1, 1],
                                                    name="conv1_wgan_2",
                                                    activat_fn=nf.lrelu,
                                                    initializer=init)
                    layer1_3 = nf.convolution_layer(layer1_1 + layer1_2,
                                                    model_params["conv1_wgan"],
                                                    [1, 2, 2, 1],
                                                    name="conv1_wgan_3",
                                                    activat_fn=nf.lrelu,
                                                    initializer=init)
                    #                    layer1_3 = nf.convolution_layer(layer1_1 + layer1_2,       model_params["conv1_wgan"],    [1,1,1,1], name="conv1_wgan_3",     activat_fn=nf.lrelu, initializer=init)

                    layer2_1 = nf.convolution_layer(layer1_3,
                                                    model_params["conv2_wgan"],
                                                    [1, 1, 1, 1],
                                                    name="conv2_wgan_1",
                                                    activat_fn=nf.lrelu,
                                                    initializer=init)
                    layer2_2 = nf.convolution_layer(layer2_1,
                                                    model_params["conv2_wgan"],
                                                    [1, 1, 1, 1],
                                                    name="conv2_wgan_2",
                                                    activat_fn=nf.lrelu,
                                                    initializer=init)
                    layer2_3 = nf.convolution_layer(layer2_1 + layer2_2,
                                                    model_params["conv2_wgan"],
                                                    [1, 2, 2, 1],
                                                    name="conv2_wgan_3",
                                                    activat_fn=nf.lrelu,
                                                    initializer=init)
                    #                    layer2_3 = nf.convolution_layer(layer2_1 + layer2_2,       model_params["conv2_wgan"],    [1,1,1,1], name="conv2_wgan_3",     activat_fn=nf.lrelu, initializer=init)

                    layer3_1 = nf.convolution_layer(layer2_3,
                                                    model_params["conv3_wgan"],
                                                    [1, 1, 1, 1],
                                                    name="conv3_wgan_1",
                                                    activat_fn=nf.lrelu,
                                                    initializer=init)
                    layer3_2 = nf.convolution_layer(layer3_1,
                                                    model_params["conv3_wgan"],
                                                    [1, 1, 1, 1],
                                                    name="conv3_wgan_2",
                                                    activat_fn=nf.lrelu,
                                                    initializer=init)
                    layer3_3 = nf.convolution_layer(layer3_1 + layer3_2,
                                                    model_params["conv3_wgan"],
                                                    [1, 2, 2, 1],
                                                    name="conv3_wgan_3",
                                                    activat_fn=nf.lrelu,
                                                    initializer=init)
                    #                    layer3_3 = nf.convolution_layer(layer3_1 + layer3_2,       model_params["conv3_wgan"],    [1,1,1,1], name="conv3_wgan_3",     activat_fn=nf.lrelu, initializer=init)

                    layer4_1 = nf.convolution_layer(
                        layer3_3,
                        model_params["d_output_wgan"], [1, 1, 1, 1],
                        name="d_output_wgan_1",
                        activat_fn=nf.lrelu,
                        initializer=init)
                    output = nf.convolution_layer(
                        layer4_1,
                        model_params["d_output_wgan"], [1, 1, 1, 1],
                        name="d_output_wgan_2",
                        activat_fn=nf.lrelu,
                        initializer=init)

                    d_logits = output

                    return [d_logits, tf.reduce_mean(layer1_3)]

                else:
                    print("d_model parameter error!")

            return d_logits
Exemplo n.º 8
0
    def EDSR_WGAN(self, kwargs):

        reuse = kwargs["reuse"]
        d_inputs = kwargs["d_inputs"]
        d_target = kwargs["d_target"]
        is_training = kwargs["is_training"]
        net = kwargs["net"]

        init = tf.random_normal_initializer(stddev=0.01)

        feature_size = 64
        scaling_factor = 1

        DEPTH = 28
        #        DEPTH = 32

        model_params = {
            'conv1': [3, 3, feature_size],
            'resblock': [3, 3, feature_size],
            'conv2': [3, 3, feature_size],
            'conv3': [3, 3, 3],
            'd_output': [3, 3, 3],
            'conv1_wgan-gp': [5, 5, DEPTH],
            'conv2_wgan-gp': [5, 5, DEPTH * 2],
            'conv3_wgan-gp': [5, 5, DEPTH * 4],
            'd_output_wgan-gp': [5, 5, 3],

            #                        # v5-0
            'conv1_wgan': [5, 5, DEPTH],
            'conv2_wgan': [5, 5, DEPTH * 2],
            'conv3_wgan': [5, 5, DEPTH * 4],
            'd_output_wgan': [5, 5, 3],
            'maxpool_wgan': [1, 2, 2, 1],
            #
            #                        # v5-1
            #                        'conv1_wgan': [3,3,DEPTH],
            #                        'conv2_wgan': [3,3,DEPTH*2],
            #                        'conv3_wgan': [3,3,DEPTH*4],
            #                        'd_output_wgan': [3,3,3],
            #                        'maxpool_wgan': [1, 3, 3, 1],

            #                        # v5-3
            #                        'conv1_wgan': [9,9,DEPTH],
            #                        'conv2_wgan': [9,9,DEPTH*2],
            #                        'conv3_wgan': [9,9,DEPTH*4],
            #                        'd_output_wgan': [9,9,3],
            #                        'maxpool_wgan': [1, 2, 2, 1],

            #                        # v5-4
            #                        'conv1_wgan': [5,5,DEPTH],
            #                        'conv2_wgan': [7,7,DEPTH*2],
            #                        'conv3_wgan': [9,9,DEPTH*4],
            #                        'd_output_wgan': [5,5,3],
            #                        'maxpool_wgan': [1, 2, 2, 1],
        }

        if net is "Gen":

            ### Generator
            num_resblock = 16

            g_input = self.inputs

            with tf.variable_scope("EDSR_gen", reuse=reuse):
                x = nf.convolution_layer(g_input,
                                         model_params["conv1"], [1, 1, 1, 1],
                                         name="conv1",
                                         activat_fn=None,
                                         initializer=init)
                conv_1 = x
                with tf.variable_scope("resblock", reuse=reuse):

                    #Add the residual blocks to the model
                    for i in range(num_resblock):
                        x = nf.resBlock(x,
                                        feature_size,
                                        scale=scaling_factor,
                                        reuse=reuse,
                                        idx=i,
                                        initializer=init)
                    x = nf.convolution_layer(x,
                                             model_params["conv2"],
                                             [1, 1, 1, 1],
                                             name="conv2",
                                             activat_fn=None,
                                             initializer=init)
                    x += conv_1
                x = nf.convolution_layer(x,
                                         model_params["conv1"], [1, 1, 1, 1],
                                         name="conv3",
                                         activat_fn=None,
                                         initializer=init)
                g_network = nf.convolution_layer(x,
                                                 model_params["d_output"],
                                                 [1, 1, 1, 1],
                                                 name="conv4",
                                                 activat_fn=None,
                                                 initializer=init)

                g_output = tf.nn.sigmoid(g_network)

            return g_output

        elif net is "Dis":
            d_model = kwargs["d_model"]

            ### Discriminator
            num_resblock = 2

            input_gan = d_inputs

            with tf.variable_scope("EDSR_dis", reuse=reuse):
                if d_model is "EDSR":

                    x = nf.convolution_layer(input_gan,
                                             model_params["conv1"],
                                             [1, 1, 1, 1],
                                             name="conv1",
                                             activat_fn=nf.lrelu,
                                             initializer=init)
                    conv_1 = x
                    with tf.variable_scope("resblock", reuse=reuse):
                        #Add the residual blocks to the model
                        for i in range(num_resblock):
                            x = nf.resBlock(x,
                                            feature_size,
                                            scale=scaling_factor,
                                            reuse=reuse,
                                            idx=i,
                                            activation_fn=nf.lrelu,
                                            initializer=init)
                        x = nf.convolution_layer(x,
                                                 model_params["conv2"],
                                                 [1, 1, 1, 1],
                                                 name="conv2",
                                                 activat_fn=nf.lrelu,
                                                 initializer=init)
                        x += conv_1

                    x = nf.convolution_layer(x,
                                             model_params["conv1"],
                                             [1, 1, 1, 1],
                                             name="conv3",
                                             activat_fn=nf.lrelu,
                                             initializer=init)
                    d_logits = nf.convolution_layer(x,
                                                    model_params["d_output"],
                                                    [1, 1, 1, 1],
                                                    name="conv4",
                                                    activat_fn=nf.lrelu,
                                                    flatten=False,
                                                    initializer=init)

                elif d_model is "WGAN-GP":

                    x = nf.convolution_layer(input_gan,
                                             model_params["conv1_wgan-gp"],
                                             [1, 1, 1, 1],
                                             name="conv1_wgan-gp",
                                             activat_fn=nf.lrelu,
                                             initializer=init)
                    x = nf.convolution_layer(x,
                                             model_params["conv2_wgan-gp"],
                                             [1, 1, 1, 1],
                                             name="conv2_wgan-gp",
                                             activat_fn=nf.lrelu,
                                             initializer=init)
                    x = nf.convolution_layer(x,
                                             model_params["conv3_wgan-gp"],
                                             [1, 1, 1, 1],
                                             name="conv3_wgan-gp",
                                             activat_fn=nf.lrelu,
                                             initializer=init)
                    x = nf.convolution_layer(x,
                                             model_params["d_output_wgan-gp"],
                                             [1, 1, 1, 1],
                                             name="d_output_wgan-gp",
                                             activat_fn=nf.lrelu,
                                             initializer=init)
                    d_logits = x

                elif d_model is "PatchWGAN":

                    x = nf.convolution_layer(input_gan,
                                             model_params["conv1_wgan"],
                                             [1, 1, 1, 1],
                                             name="conv1_wgan",
                                             activat_fn=nf.lrelu,
                                             initializer=init)

                    pool1 = nf.max_pool_layer(x,
                                              model_params["maxpool_wgan"],
                                              [1, 2, 2, 1],
                                              name="conv1_wgan_mp")
                    pool1_ = nf.max_pool_layer(-x,
                                               model_params["maxpool_wgan"],
                                               [1, 2, 2, 1],
                                               name="conv1_wgan_mp")
                    minus_mask = tf.cast(tf.greater(tf.abs(pool1_), pool1),
                                         tf.float32)
                    plus_mask = tf.cast(tf.greater(pool1, tf.abs(pool1_)),
                                        tf.float32)
                    pool1 = plus_mask * pool1 + minus_mask * (-pool1_)

                    x = nf.convolution_layer(pool1,
                                             model_params["conv2_wgan"],
                                             [1, 1, 1, 1],
                                             name="conv2_wgan",
                                             activat_fn=nf.lrelu,
                                             initializer=init)

                    pool2 = nf.max_pool_layer(x,
                                              model_params["maxpool_wgan"],
                                              [1, 2, 2, 1],
                                              name="conv2_wgan_mp")
                    pool2_ = nf.max_pool_layer(-x,
                                               model_params["maxpool_wgan"],
                                               [1, 2, 2, 1],
                                               name="conv2_wgan_mp")
                    minus_mask = tf.cast(tf.greater(tf.abs(pool2_), pool2),
                                         tf.float32)
                    plus_mask = tf.cast(tf.greater(pool2, tf.abs(pool2_)),
                                        tf.float32)
                    pool2 = plus_mask * pool2 + minus_mask * (-pool2_)

                    x = nf.convolution_layer(pool2,
                                             model_params["conv3_wgan"],
                                             [1, 1, 1, 1],
                                             name="conv3_wgan",
                                             activat_fn=nf.lrelu,
                                             initializer=init)

                    pool3 = nf.max_pool_layer(x,
                                              model_params["maxpool_wgan"],
                                              [1, 2, 2, 1],
                                              name="conv3_wgan_mp")
                    pool3_ = nf.max_pool_layer(-x,
                                               model_params["maxpool_wgan"],
                                               [1, 2, 2, 1],
                                               name="conv3_wgan_mp")
                    minus_mask = tf.cast(tf.greater(tf.abs(pool3_), pool3),
                                         tf.float32)
                    plus_mask = tf.cast(tf.greater(pool3, tf.abs(pool3_)),
                                        tf.float32)
                    pool3 = plus_mask * pool3 + minus_mask * (-pool3_)

                    x = nf.convolution_layer(pool3,
                                             model_params["d_output_wgan"],
                                             [1, 1, 1, 1],
                                             name="d_output_wgan",
                                             activat_fn=nf.lrelu,
                                             initializer=init)

                    ### v4
                    #                    pool4 = nf.max_pool_layer(x, model_params["maxpool_wgan"], [1, 2, 2, 1], name="conv4_wgan_mp")
                    #                    pool4_ = nf.max_pool_layer(-x, model_params["maxpool_wgan"], [1, 2, 2, 1], name="conv4_wgan_mp")
                    #                    minus_mask = tf.cast(tf.greater(tf.abs(pool4_), pool4), tf.float32)
                    #                    plus_mask = tf.cast(tf.greater(pool4, tf.abs(pool4_)), tf.float32)
                    #                    x = plus_mask*pool4 + minus_mask*(-pool4_)

                    d_logits = x

                elif d_model is "PatchWGAN_GP":

                    patch_size = 16
                    _, image_h, image_w, image_c = input_gan.get_shape(
                    ).as_list()

                    d_patch_list = []
                    for i in range(0, image_h // patch_size):
                        for j in range(0, image_w // patch_size):
                            input_patch = input_gan[:, i:i + patch_size,
                                                    j:j + patch_size, :]

                            x = nf.convolution_layer(
                                input_patch,
                                model_params["conv1_wgan-gp"], [1, 1, 1, 1],
                                name="conv1_wgan-gp",
                                activat_fn=nf.lrelu,
                                initializer=init)
                            x = nf.convolution_layer(
                                x,
                                model_params["conv2_wgan-gp"], [1, 1, 1, 1],
                                name="conv2_wgan-gp",
                                activat_fn=nf.lrelu,
                                initializer=init)
                            x = nf.convolution_layer(
                                x,
                                model_params["conv3_wgan-gp"], [1, 1, 1, 1],
                                name="conv3_wgan-gp",
                                activat_fn=nf.lrelu,
                                initializer=init)
                            x = nf.convolution_layer(
                                x,
                                model_params["d_output_wgan-gp"], [1, 1, 1, 1],
                                name="d_output_wgan-gp",
                                activat_fn=nf.lrelu,
                                initializer=init)

                            d_curr_patch = x
                            d_curr_patch = tf.reduce_mean(d_curr_patch,
                                                          axis=[1, 2, 3])
                            d_patch_list.append(d_curr_patch)

                    d_patch_stack = tf.stack([
                        d_patch_list[i]
                        for i in range((image_h // patch_size) *
                                       (image_w // patch_size))
                    ],
                                             axis=1)
                    d_patch_weight = d_patch_stack / tf.reduce_sum(
                        tf.abs(d_patch_stack), axis=1, keep_dims=True)
                    d_patch = d_patch_weight * d_patch_stack

                    d_logits = d_patch

            return d_logits
Exemplo n.º 9
0
    def EXAMPLE_CNN(self, kwargs):

        model_params = {
            "conv_1": [3, 3, 128],
            "conv_2": [3, 3, 256],
            "fc_1": 1024,
            "fc_2": 512,
            "fc_out": 10,
        }

        reuse = kwargs["reuse"]
        l2_reg = tf.contrib.layers.l2_regularizer(1e-5)

        print(
            "==================================================================="
        )

        with tf.variable_scope("CNN", reuse=reuse):

            input = kwargs["input"]

            print("[EXAMPLE_CNN] input: %s" % input.get_shape())

            conv_1_1 = nf.convolution_layer(input,
                                            model_params["conv_1"],
                                            [1, 1, 1, 1],
                                            name="conv_1_1",
                                            padding='SAME',
                                            activat_fn=nf.lrelu,
                                            is_bn=True,
                                            is_training=self.is_training,
                                            reg=l2_reg)
            conv_1_2 = nf.convolution_layer(conv_1_1,
                                            model_params["conv_1"],
                                            [1, 1, 1, 1],
                                            name="conv_1_2",
                                            padding='SAME',
                                            activat_fn=nf.lrelu,
                                            is_bn=True,
                                            is_training=self.is_training,
                                            reg=l2_reg)
            conv_1 = conv_1_1 + conv_1_2
            conv_1 = tf.nn.max_pool(conv_1, [1, 2, 2, 1], [1, 2, 2, 1],
                                    padding='VALID')
            conv_1 = tf.layers.dropout(conv_1,
                                       rate=self.dropout,
                                       training=self.is_training,
                                       name='conv_1_dropout')
            print("conv_1: %s" % conv_1.get_shape())

            conv_2_1 = nf.convolution_layer(conv_1,
                                            model_params["conv_2"],
                                            [1, 1, 1, 1],
                                            name="conv_2_1",
                                            padding='SAME',
                                            activat_fn=nf.lrelu,
                                            is_bn=True,
                                            is_training=self.is_training,
                                            reg=l2_reg)
            conv_2_2 = nf.convolution_layer(conv_2_1,
                                            model_params["conv_2"],
                                            [1, 1, 1, 1],
                                            name="conv_2_2",
                                            padding='SAME',
                                            activat_fn=nf.lrelu,
                                            is_bn=True,
                                            is_training=self.is_training,
                                            reg=l2_reg)
            conv_2 = conv_2_1 + conv_2_2
            conv_2 = tf.nn.max_pool(conv_2, [1, 2, 2, 1], [1, 2, 2, 1],
                                    padding='VALID')
            conv_2 = tf.layers.dropout(conv_2,
                                       rate=self.dropout,
                                       training=self.is_training,
                                       name='conv_2_dropout')
            print("conv_2: %s" % conv_2.get_shape())

            conv_code = tf.reshape(conv_2,
                                   [tf.shape(self.inputs)[0], 7 * 7 * 256])
            fc_1 = nf.fc_layer(conv_code,
                               model_params["fc_1"],
                               name="fc_1",
                               activat_fn=nf.lrelu,
                               is_bn=True,
                               is_training=self.is_training,
                               reg=l2_reg)
            fc_1 = tf.layers.dropout(fc_1,
                                     rate=self.dropout,
                                     training=self.is_training,
                                     name='fc_1_dropout')
            print("fc_1: %s" % fc_1.get_shape())

            fc_2 = nf.fc_layer(fc_1,
                               model_params["fc_2"],
                               name="fc_2",
                               activat_fn=nf.lrelu,
                               is_bn=True,
                               is_training=self.is_training,
                               reg=l2_reg)
            fc_2 = tf.layers.dropout(fc_2,
                                     rate=self.dropout,
                                     training=self.is_training,
                                     name='fc_2_dropout')
            print("fc_2: %s" % fc_2.get_shape())

            fc_out = nf.fc_layer(fc_2,
                                 model_params["fc_out"],
                                 name="fc_out",
                                 activat_fn=None)
            print("fc_out: %s" % fc_out.get_shape())

            return fc_out
Exemplo n.º 10
0
    def ResNet10(self, kwargs):

        model_params = {
            "conv1": [7, 7, 64],
            "conv2_1": [3, 3, 64],
            "conv2_2": [3, 3, 64],
            "conv3_1": [3, 3, 128],
            "conv3_2": [3, 3, 128],
            "conv3_sc": [1, 1, 128],
            "conv4_1": [3, 3, 256],
            "conv4_2": [3, 3, 256],
            "conv4_sc": [1, 1, 256],
            "conv5_1": [3, 3, 512],
            "conv5_2": [3, 3, 512],
            "conv5_sc": [1, 1, 512],
            "convM_1": [3, 3, 512],
            "convM_2": [3, 3, 512],
            "fcM_3": 8,
            "fcM_4": 2,
        }

        reuse = kwargs["reuse"]
        l2_reg = tf.contrib.layers.l2_regularizer(1e-5)

        print(
            "==================================================================="
        )

        with tf.variable_scope("ResNet10", reuse=reuse):

            input = kwargs["input"]

            print("[ResNet-10] input: %s" % input.get_shape())

            # conv 1
            x = nf.convolution_layer(input,
                                     model_params["conv1"], [1, 2, 2, 1],
                                     name="conv1",
                                     padding='SAME',
                                     activat_fn=tf.nn.relu,
                                     is_bn=True,
                                     is_training=self.is_training)
            x = tf.nn.max_pool(x, [1, 3, 3, 1], [1, 2, 2, 1],
                               padding='SAME',
                               name='max_1')
            print("conv1: %s" % x.get_shape())
            conv1 = x

            # conv 2
            sc_2 = x
            x = nf.convolution_layer(x,
                                     model_params["conv2_1"], [1, 1, 1, 1],
                                     name="conv2_1",
                                     padding='SAME',
                                     activat_fn=tf.nn.relu,
                                     is_bn=True,
                                     is_training=self.is_training)
            x = nf.convolution_layer(x,
                                     model_params["conv2_2"], [1, 1, 1, 1],
                                     name="conv2_2",
                                     padding='SAME',
                                     activat_fn=None,
                                     is_bn=True,
                                     is_training=self.is_training)
            x = tf.add(x, sc_2)
            x = tf.nn.relu(x, name="conv2_2" + "_out")
            print("[residual_simple_block] conv2: %s" % x.get_shape())

            # conv 3
            sc_3 = nf.convolution_layer(x,
                                        model_params["conv3_sc"], [1, 2, 2, 1],
                                        name="conv3_sc",
                                        padding='SAME',
                                        activat_fn=None,
                                        is_bn=True,
                                        is_training=self.is_training)
            x = nf.convolution_layer(x,
                                     model_params["conv3_1"], [1, 2, 2, 1],
                                     name="conv3_1",
                                     padding='SAME',
                                     activat_fn=tf.nn.relu,
                                     is_bn=True,
                                     is_training=self.is_training)
            x = nf.convolution_layer(x,
                                     model_params["conv3_2"], [1, 1, 1, 1],
                                     name="conv3_2",
                                     padding='SAME',
                                     activat_fn=None,
                                     is_bn=True,
                                     is_training=self.is_training)
            x = tf.add(x, sc_3)
            x = tf.nn.relu(x, name="conv3_2" + "_out")
            print("[residual_simple_block] conv3: %s" % x.get_shape())

            # conv 4
            sc_4 = nf.convolution_layer(x,
                                        model_params["conv4_sc"], [1, 2, 2, 1],
                                        name="conv4_sc",
                                        padding='SAME',
                                        activat_fn=None,
                                        is_bn=True,
                                        is_training=self.is_training)
            x = nf.convolution_layer(x,
                                     model_params["conv4_1"], [1, 2, 2, 1],
                                     name="conv4_1",
                                     padding='SAME',
                                     activat_fn=tf.nn.relu,
                                     is_bn=True,
                                     is_training=self.is_training)
            x = nf.convolution_layer(x,
                                     model_params["conv4_2"], [1, 1, 1, 1],
                                     name="conv4_2",
                                     padding='SAME',
                                     activat_fn=None,
                                     is_bn=True,
                                     is_training=self.is_training)
            x = tf.add(x, sc_4)
            x = tf.nn.relu(x, name="conv4_2" + "_out")
            print("[residual_simple_block] conv4: %s" % x.get_shape())

            # conv 5
            sc_5 = nf.convolution_layer(x,
                                        model_params["conv5_sc"], [1, 2, 2, 1],
                                        name="conv5_sc",
                                        padding='SAME',
                                        activat_fn=None,
                                        is_bn=True,
                                        is_training=self.is_training)
            x = nf.convolution_layer(x,
                                     model_params["conv5_1"], [1, 2, 2, 1],
                                     name="conv5_1",
                                     padding='SAME',
                                     activat_fn=tf.nn.relu,
                                     is_bn=True,
                                     is_training=self.is_training)
            x = nf.convolution_layer(x,
                                     model_params["conv5_2"], [1, 1, 1, 1],
                                     name="conv5_2",
                                     padding='SAME',
                                     activat_fn=None,
                                     is_bn=True,
                                     is_training=self.is_training)
            x = tf.add(x, sc_5)
            x = tf.nn.relu(x, name="conv5_2" + "_out")
            print("[residual_simple_block] conv5: %s" % x.get_shape())

            # module conv 1
            x = nf.convolution_layer(x,
                                     model_params["convM_1"], [1, 1, 1, 1],
                                     name="convM_1",
                                     padding='SAME',
                                     activat_fn=tf.nn.relu,
                                     is_bn=True,
                                     is_training=self.is_training,
                                     reg=l2_reg)
            x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1],
                               padding='VALID',
                               name='max_1')
            print("[relation_module] convM_1: %s" % x.get_shape())

            # module conv 2
            x = nf.convolution_layer(x,
                                     model_params["convM_2"], [1, 1, 1, 1],
                                     name="convM_2",
                                     padding='SAME',
                                     activat_fn=tf.nn.relu,
                                     is_bn=True,
                                     is_training=self.is_training,
                                     reg=l2_reg)
            x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1],
                               padding='VALID',
                               name='max_2')
            print("[relation_module] convM_2: %s" % x.get_shape())

            # module fc 3
            x = tf.reshape(x, [tf.shape(input)[0], 3 * 1 * 512])
            x = nf.fc_layer(x,
                            model_params["fcM_3"],
                            name="fcM_3",
                            activat_fn=tf.nn.relu,
                            is_bn=False,
                            is_training=self.is_training,
                            reg=l2_reg)
            print("[relation_module] fcM_3: %s" % x.get_shape())

            # module fc 4
            x = nf.fc_layer(x,
                            model_params["fcM_4"],
                            name="fcM_4",
                            activat_fn=None,
                            is_bn=False,
                            is_training=self.is_training,
                            reg=l2_reg)
            print("[relation_module] fcM_4: %s" % x.get_shape())

            return x, conv1
Exemplo n.º 11
0
    def CNN_1st_v1(self, kwargs):

        model_params = {
            "conv_1": [11, 11, 128],
            "conv_2": [5, 5, 256],
            "conv_3": [3, 3, 512],
            "conv_4": [3, 3, 1024],
            "conv_5": [3, 3, 1024],
            "fc_1": 1024,
            "fc_2": 512,
            "fc_out": 2,
        }

        reuse = kwargs["reuse"]
        l2_reg = tf.contrib.layers.l2_regularizer(1e-5)

        print(
            "==================================================================="
        )

        with tf.variable_scope("CNN", reuse=reuse):

            input = kwargs["input"]

            print("[CNN_1st_v1] input: %s" % input.get_shape())

            conv_1_1 = nf.convolution_layer(input,
                                            model_params["conv_1"],
                                            [1, 4, 4, 1],
                                            name="conv_1_1",
                                            padding='SAME',
                                            activat_fn=nf.lrelu,
                                            is_bn=True,
                                            is_training=self.is_training,
                                            reg=l2_reg)
            #conv_1_2 = nf.convolution_layer(conv_1_1, model_params["conv_1"], [1,1,1,1], name="conv_1_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg)
            #conv_1 = conv_1_1 + conv_1_2
            conv_1 = tf.nn.max_pool(conv_1_1, [1, 3, 3, 1], [1, 2, 2, 1],
                                    padding='VALID')
            conv_1 = tf.layers.dropout(conv_1,
                                       rate=self.dropout,
                                       training=self.is_training,
                                       name='conv_1_dropout')
            print("conv_1: %s" % conv_1.get_shape())

            conv_2_1 = nf.convolution_layer(conv_1,
                                            model_params["conv_2"],
                                            [1, 2, 2, 1],
                                            name="conv_2_1",
                                            padding='SAME',
                                            activat_fn=nf.lrelu,
                                            is_bn=True,
                                            is_training=self.is_training,
                                            reg=l2_reg)
            #conv_2_2 = nf.convolution_layer(conv_2_1, model_params["conv_2"], [1,1,1,1], name="conv_2_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg)
            #conv_2 = conv_2_1 + conv_2_2
            conv_2 = tf.nn.max_pool(conv_2_1, [1, 3, 3, 1], [1, 2, 2, 1],
                                    padding='VALID')
            conv_2 = tf.layers.dropout(conv_2,
                                       rate=self.dropout,
                                       training=self.is_training,
                                       name='conv_2_dropout')
            print("conv_2: %s" % conv_2.get_shape())

            conv_3_1 = nf.convolution_layer(conv_2,
                                            model_params["conv_3"],
                                            [1, 1, 1, 1],
                                            name="conv_3_1",
                                            padding='SAME',
                                            activat_fn=nf.lrelu,
                                            is_bn=True,
                                            is_training=self.is_training,
                                            reg=l2_reg)
            #conv_3_2 = nf.convolution_layer(conv_3_1, model_params["conv_3"], [1,1,1,1], name="conv_3_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg)
            #conv_3 = conv_3_1 + conv_3_2
            conv_3 = tf.layers.dropout(conv_3_1,
                                       rate=self.dropout,
                                       training=self.is_training,
                                       name='conv_3_dropout')
            print("conv_3: %s" % conv_3.get_shape())

            #conv_4_1 = nf.convolution_layer(conv_3, model_params["conv_4"], [1,1,1,1], name="conv_4_1", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg)
            #conv_4_2 = nf.convolution_layer(conv_4_1, model_params["conv_4"], [1,1,1,1], name="conv_4_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg)
            #conv_4 = conv_4_1 + conv_4_2
            #conv_4 = tf.layers.dropout(conv_4, rate=self.dropout, training=self.is_training, name='conv_4_dropout')
            #print("conv_4: %s" % conv_4.get_shape())

            conv_5_1 = nf.convolution_layer(conv_3,
                                            model_params["conv_5"],
                                            [1, 1, 1, 1],
                                            name="conv_5_1",
                                            padding='SAME',
                                            activat_fn=nf.lrelu,
                                            is_bn=True,
                                            is_training=self.is_training,
                                            reg=l2_reg)
            #conv_5_2 = nf.convolution_layer(conv_5_1, model_params["conv_5"], [1,1,1,1], name="conv_5_2", padding='SAME', activat_fn=nf.lrelu, is_bn=True, is_training=self.is_training, reg=l2_reg)
            #conv_5 = conv_5_1 + conv_5_2
            conv_5 = tf.nn.max_pool(conv_5_1, [1, 3, 3, 1], [1, 2, 2, 1],
                                    padding='VALID')
            conv_5 = tf.layers.dropout(conv_5,
                                       rate=self.dropout,
                                       training=self.is_training,
                                       name='conv_5_dropout')
            print("conv_5: %s" % conv_5.get_shape())

            conv_code = tf.reshape(conv_5,
                                   [tf.shape(self.inputs)[0], 4 * 2 * 1024])
            fc_1 = nf.fc_layer(conv_code,
                               model_params["fc_1"],
                               name="fc_1",
                               activat_fn=nf.lrelu,
                               is_bn=True,
                               is_training=self.is_training,
                               reg=l2_reg)
            fc_1 = tf.layers.dropout(fc_1,
                                     rate=self.dropout,
                                     training=self.is_training,
                                     name='fc_1_dropout')
            print("fc_1: %s" % fc_1.get_shape())

            fc_2 = nf.fc_layer(fc_1,
                               model_params["fc_2"],
                               name="fc_2",
                               activat_fn=nf.lrelu,
                               is_bn=True,
                               is_training=self.is_training,
                               reg=l2_reg)
            fc_2 = tf.layers.dropout(fc_2,
                                     rate=self.dropout,
                                     training=self.is_training,
                                     name='fc_2_dropout')
            print("fc_2: %s" % fc_2.get_shape())

            fc_out = nf.fc_layer(fc_2,
                                 model_params["fc_out"],
                                 name="fc_out",
                                 activat_fn=None)
            print("fc_out: %s" % fc_out.get_shape())

            return fc_out, conv_1