コード例 #1
0
    def forward_metric_net(self, x):

        with tf.variable_scope('metric', reuse=tf.AUTO_REUSE) as scope:

            w1 = tf.get_variable('w1', shape=[48, 24])
            b1 = tf.get_variable('b1', shape=[24])
            out = fc(x, w1, b1, activation='leaky_relu')
            w2 = tf.get_variable('w2', shape=[24, 16])
            b2 = tf.get_variable('b2', shape=[16])
            out = fc(out, w2, b2, activation='leaky_relu')

        return out
コード例 #2
0
    def decoder(self,input_z,name = 'generate_img',is_training = True):
        hidden_num = 64
        output_dim = 64
        with tf.variable_scope(name,reuse = tf.AUTO_REUSE):

            x = ly.fc(input_z, hidden_num * 8 * (output_dim // 16) * (output_dim // 16),name = 'gen_fc_0')
            x = tf.reshape(x, shape=[self.imle_deep, output_dim // 16, output_dim // 16, hidden_num * 8]) ## 4, 4, 8*64

            x = ly.deconv2d(x,hidden_num * 4,name = 'g_deconv2d_0') ### 8,8, 256
            x = ly.batch_normal(x,name = 'g_deconv_bn_0',is_training = is_training)
            x = ly.relu(x)

            x = ly.deconv2d(x,hidden_num * 2,name = 'g_deconv2d_1') ### 16,16, 128
            x = ly.batch_normal(x,name = 'g_deconv_bn_1',is_training = is_training)
            x = ly.relu(x)

            x = ly.deconv2d(x,hidden_num,name = 'g_deconv2d_2') ### 32,32, 64
            x = ly.batch_normal(x,name = 'g_deconv_bn_2',is_training = is_training)
            x = ly.relu(x)

            x = ly.deconv2d(x, 3, name = 'g_deconv2d_3') ### 64,64, 3
            x = ly.batch_normal(x,name = 'g_deconv_bn_3',is_training = is_training)
            x = tf.nn.tanh(x)

            return x
コード例 #3
0
ファイル: resnet_modify.py プロジェクト: joyivan/cifar_resnet
 def buildResNet(self, inputs, n, is_training):
     filters = [16, 32, 64]
     inputs = tf.reshape(inputs,
                         shape=(-1, image_width, image_height, image_depth))
     #Conv1_x
     x = layer.conv(inputs, [3, 3, 3, 16], strides=1, name="Conv1")
     #Conv2_x
     #x = layer.maxpool(x, win_size=3, strides=2, name="Conv2_Pool1")
     for i in range(n):
         x = layer.res_block(x,
                             16,
                             is_training=is_training,
                             name="Conv2_Res" + str(i))
     #Conv3_x
     for i in range(n):
         x = layer.res_block(x,
                             32,
                             is_training=is_training,
                             name="Conv3_Res" + str(i))
     #Conv4_x
     for i in range(n):
         x = layer.res_block(x,
                             64,
                             is_training=is_training,
                             name="Conv4_Res" + str(i))
     #x = layer.avgpool(x, win_size=7, strides=7, name="Global_avgpool")
     x = layer.avgpool(x, win_size=8, strides=8, name="Global_avgpool")
     reshaped_x = tf.reshape(x, [-1, filters[2]])
     x = layer.fc(reshaped_x, output_dim=num_classes, name="FC")
     return x
コード例 #4
0
ファイル: GAN.py プロジェクト: xukeyuxin/GLANN-TF
    def discriminator(self,
                      x,
                      name='discriminator_img',
                      is_training=True):  ## 64,64,3
        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            x = ly.conv2d(x, 64, strides=2, use_bias=True,
                          name='d_conv_0')  ## 32,32,64
            x = ly.batch_normal(x, name='d_bn_0', is_training=is_training)
            x = ly.relu(x, 0.2)

            x = ly.conv2d(x, 128, strides=2, use_bias=True,
                          name='d_conv_1')  ## 16,16,128
            x = ly.batch_normal(x, name='d_bn_1', is_training=is_training)
            x = ly.relu(x, 0.2)

            x = ly.conv2d(x, 256, strides=2, use_bias=True,
                          name='d_conv_2')  ## 8,8,256
            x = ly.batch_normal(x, name='d_bn_2', is_training=is_training)
            x = ly.relu(x, 0.2)

            x = ly.conv2d(x, 512, strides=2, use_bias=True,
                          name='d_conv_3')  ## 4,4,512
            x = ly.batch_normal(x, name='d_bn_3', is_training=is_training)
            x = ly.relu(x, 0.2)

            x = ly.fc(x, 1, name='fc_0')
            x = tf.nn.sigmoid(x)
            return x
コード例 #5
0
ファイル: resnet_modify.py プロジェクト: joyivan/cifar_resnet
    def infer(self, inputs, n, is_training, reuse=False):
        with tf.variable_scope('stage0', reuse=reuse):
            x = layer.conv_bn_relu(inputs, [3, 3, 3, 16],
                                   1,
                                   is_training,
                                   name='stage0')
        for i in range(n):
            with tf.variable_scope('stage1_res_%d' % i, reuse=reuse):
                if i == 0:
                    x = layer.res_block(x,
                                        16,
                                        is_training,
                                        name='stage1_res%d' % i,
                                        first_block=True)
                else:
                    x = layer.res_block(x,
                                        16,
                                        is_training,
                                        name='stage1_res%d' % i)

        for i in range(n):
            with tf.variable_scope('stage2_res_%d' % i, reuse=reuse):
                x = layer.res_block(x,
                                    32,
                                    is_training,
                                    name='stage2_res%d' % i)
        for i in range(n):
            with tf.variable_scope('stage3_res_%d' % i, reuse=reuse):
                x = layer.res_block(x,
                                    64,
                                    is_training,
                                    name='stage3_res%d' % i)

        with tf.variable_scope('fc', reuse=reuse):
            x = layer.batchNorm(x, is_training, 'fc_batchNorm')
            x = tf.nn.relu(x)
            feature = tf.reshape(layer.avgpool(x, 8, 8,
                                               name='global_avg_pool'),
                                 shape=(-1, 64))
            x = layer.fc(feature, 10, name='fc')
        return x
コード例 #6
0
    def __call__(self, input):
        with tf.variable_scope(self.name, reuse=self.reuse):
            input = ly.conv2d(input, 64, strides=2,
                              name='conv_0')  ## (-1,150,150,64)
            input = ly.batch_normal(input, name='bn_0')
            input = tf.nn.leaky_relu(input)

            input = ly.conv2d(input, 128, strides=2,
                              name='conv_1')  ## (-1,75,75,128)
            input = ly.batch_normal(input, name='bn_1')
            input = tf.nn.leaky_relu(input)

            input = ly.conv2d(input, 256, strides=2,
                              name='conv_2')  ## (-1,38,38,256)
            input = ly.batch_normal(input, name='bn_2')
            input = tf.nn.leaky_relu(input)

            input = ly.conv2d(input, 512, strides=2,
                              name='conv_3')  ## (-1,19,19,512)
            input = ly.batch_normal(input, name='bn_3')
            input = tf.nn.leaky_relu(input)

            print(input.shape)
            input = ly.conv2d(input, 512, strides=2,
                              name='conv_4')  ## (-1,10,10,512)
            input = ly.batch_normal(input, name='bn_4')
            input = tf.nn.leaky_relu(input)

            ## avg
            input = tf.reduce_mean(input, axis=[1, 2])
            input = tf.nn.dropout(input, keep_prob=0.5)

            input = ly.fc(input, 1, name='fc_0')
            # input = ly.batch_normal(input, name='bn_5')
            input = tf.nn.sigmoid(input)

        return input
コード例 #7
0
    def classify(self,
                 d_opt=None,
                 name='classify',
                 is_training=True):  ### 64,64,1
        with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
            x = tf.pad(self.input_img, [[0, 0], [5, 5], [5, 5], [0, 0]],
                       "REFLECT")
            x = ly.conv2d(x,
                          64,
                          kernal_size=11,
                          name='conv_0',
                          padding='VALID',
                          use_bias=True)
            x = ly.batch_normal(x, name='bn_0', is_training=is_training)
            x = ly.relu(x)

            x = ly.maxpooling2d(x)  ## 32,32,64

            x = tf.pad(x, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
            x = ly.conv2d(x,
                          128,
                          kernal_size=7,
                          name='conv_1',
                          padding='VALID',
                          use_bias=True)
            x = ly.batch_normal(x, name='bn_1', is_training=is_training)
            x = ly.relu(x)

            x = ly.maxpooling2d(x)  ## 16,16,128

            x = tf.pad(x, [[0, 0], [2, 2], [2, 2], [0, 0]], "REFLECT")
            x = ly.conv2d(x,
                          256,
                          kernal_size=5,
                          name='conv_2',
                          padding='VALID',
                          use_bias=True)
            x = ly.batch_normal(x, name='bn_2', is_training=is_training)
            x = ly.relu(x)

            x = ly.maxpooling2d(x)  ## 8,8,256

            x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
            x = ly.conv2d(x,
                          512,
                          kernal_size=3,
                          name='conv_3',
                          padding='VALID',
                          use_bias=True)
            x = ly.batch_normal(x, name='bn_3', is_training=is_training)
            x = ly.relu(x)

            x = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
            x = ly.conv2d(x,
                          512,
                          kernal_size=3,
                          name='conv_4',
                          padding='VALID',
                          use_bias=True)
            x = ly.batch_normal(x, name='bn_4', is_training=is_training)
            x = ly.relu(x)

            x = ly.maxpooling2d(x)  ## 4,4,512

            x = ly.fc(x, 1024, name='fc_0', use_bias=True)
            x = ly.batch_normal(x, name='bn_5', is_training=is_training)
            x = ly.relu(x)
            x = tf.nn.dropout(x, keep_prob=0.5)

            x = ly.fc(x, self.class_num, name='fc_1', use_bias=True)
            self.pred_x_index = tf.argmax(tf.nn.softmax(x), axis=-1)
            self.pred_x_value = tf.reduce_max(tf.nn.softmax(x), axis=-1)

            if (is_training):
                cross_loss = tf.reduce_mean(
                    tf.nn.softmax_cross_entropy_with_logits_v2(
                        labels=self.input_label, logits=x),
                    axis=0)
                l2_loss = 0.0005 * tf.reduce_sum([
                    tf.nn.l2_loss(var)
                    for var in self.get_single_var('classify/fc')
                ])
                loss = cross_loss + l2_loss
                self.summaries.append(tf.summary.scalar('loss', loss))

                _grad = d_opt.compute_gradients(
                    loss, var_list=self.get_vars('classify'))
                train_op = d_opt.apply_gradients(_grad)

                return train_op
コード例 #8
0
    def build_model(self, images, keep_prob):
        # Convolution layer
        x_image = tf.reshape(images, [-1, self.n_in[0], self.n_in[1], 3])

        with tf.variable_scope("Discriminator") as scope:
            with tf.variable_scope("conv_layer1") as scope:
                output = layer.conv2d(x=x_image,
                                      stride=2,
                                      filter_size=[
                                          self.filter_size[0],
                                          self.filter_size[1], 3,
                                          self.layers[0]
                                      ],
                                      i=1,
                                      BatchNorm=True)
                output = activation.leakyReLU(output)
                tf.summary.histogram("conv_layer1", output)

            with tf.variable_scope("conv_layer2") as scope:
                # ResidualBlock
                output = layer.ResidualBlock(x=output,
                                             stride=1,
                                             filter_size=[
                                                 self.filter_size[0],
                                                 self.filter_size[1],
                                                 self.layers[0], self.layers[1]
                                             ],
                                             i=str(2) + '_' + str(1),
                                             BatchNorm=True)
                output = layer.ResidualBlock(x=output,
                                             stride=1,
                                             filter_size=[
                                                 self.filter_size[0],
                                                 self.filter_size[1],
                                                 self.layers[0], self.layers[1]
                                             ],
                                             i=str(2) + '_' + str(2),
                                             BatchNorm=True)
                output = layer.conv2d(x=output,
                                      stride=2,
                                      filter_size=[
                                          self.filter_size[0],
                                          self.filter_size[1], self.layers[0],
                                          self.layers[1]
                                      ],
                                      i=2,
                                      BatchNorm=True)
                output = activation.leakyReLU(output)
                output = tf.nn.dropout(output, keep_prob)

            tf.summary.histogram("conv_layer2", output)

            with tf.variable_scope("conv_layer3") as scope:
                output = layer.conv2d(x=output,
                                      stride=2,
                                      filter_size=[
                                          self.filter_size[0],
                                          self.filter_size[1], self.layers[1],
                                          self.layers[2]
                                      ],
                                      i=3,
                                      BatchNorm=True)
                output = activation.leakyReLU(output)
                tf.summary.histogram("conv_layer3", output)

            h_fc_1 = tf.nn.dropout(output, keep_prob)
            # Fc1
            output = layer.fc(h_fc_1, self.labels, "", BatchNorm=False)

        return output
コード例 #9
0
    def model(self, image_batch=None, label_batch=None):
        """创建网络graph"""
        # 1st Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv1 = layer.conv_block(image_batch,
                                      11,
                                      11,
                                      64,
                                      2,
                                      2,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block1')
        self.pool1 = layer.max_pool(self.conv1,
                                    3,
                                    3,
                                    2,
                                    2,
                                    padding='SAME',
                                    name='pool1')

        # 2nd Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv2 = layer.conv_block(self.pool1,
                                      7,
                                      7,
                                      96,
                                      1,
                                      1,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block2')
        self.pool2 = layer.max_pool(self.conv2,
                                    3,
                                    3,
                                    2,
                                    2,
                                    padding='SAME',
                                    name='pool2')

        # 3nd Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv3 = layer.conv_block(self.pool2,
                                      5,
                                      5,
                                      96,
                                      1,
                                      1,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block3')
        self.pool3 = layer.max_pool(self.conv3,
                                    3,
                                    3,
                                    1,
                                    1,
                                    padding='SAME',
                                    name='pool3')

        # 3nd Layer: Convolution-BatchNorm-ReLU-pool layer
        self.conv4 = layer.conv_block(self.pool3,
                                      3,
                                      3,
                                      96,
                                      1,
                                      1,
                                      is_training=self.is_training,
                                      norm=self.norm,
                                      initializer=self.initializer,
                                      name='conv_block4')
        self.pool4 = layer.max_pool(self.conv4,
                                    3,
                                    3,
                                    1,
                                    1,
                                    padding='SAME',
                                    name='pool4')

        # 5th Layer: ffully connected-BatchNorm-ReLU-> Dropout
        self.fc1 = layer.fc(self.pool4,
                            256,
                            initializer=self.initializer,
                            relu=True,
                            is_training=self.is_training,
                            norm=self.norm,
                            name='fc1')
        self.dropout1 = layer.dropout(self.fc1,
                                      self.keep_prob,
                                      name='dropout1')

        # 6th Layer: fully connected layer
        self.fc2 = layer.fc(self.dropout1,
                            10,
                            initializer=self.initializer,
                            relu=False,
                            is_training=self.is_training,
                            norm=None,
                            name='fc2')

        if not label_batch == None:
            loss = self.netloss(self.fc2, label_batch)
            correct_prediction = tf.equal(tf.argmax(self.fc2, 1), label_batch)
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return loss, accuracy
            #return loss,accuracy,self.fc2,tf.argmax(self.fc2,1),label_batch
        else:
            #prediction时,label=None,无需返回
            #return self.fc2
            return tf.argmax(self.fc2, 1)