예제 #1
0
def VGG16(x, n_classes, is_pretrain = True):
    x = utils.conv_layer('conv1_1', x, 64, filter = [3,3], strides = [1,1,1,1], is_pretrain = is_pretrain)
    x = utils.conv_layer('conv1_2', x, 64, filter = [3,3], strides = [1,1,1,1], is_pretrain = is_pretrain)
    x = utils.pool_layer('pool1', x, filter = [1,2,2,1], strides = [1,2,2,1], is_max_pool = True)

    x = utils.conv_layer('conv2_1', x, 128, filter = [3,3], strides = [1,1,1,1], is_pretrain = is_pretrain)
    x = utils.conv_layer('conv2_2', x, 128, filter = [3,3], strides = [1,1,1,1], is_pretrain = is_pretrain)
    x = utils.pool_layer('pool2', x, filter = [1,2,2,1], strides = [1,2,2,1], is_max_pool = True)

    x = utils.conv_layer('conv3_1', x, 256, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv3_2', x, 256, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv3_3', x, 256, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.pool_layer('pool3', x, filter=[1,2,2,1], strides=[1,2,2,1], is_max_pool=True)

    x = utils.conv_layer('conv4_1', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv4_2', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv4_3', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.pool_layer('pool4', x, filter=[1,2,2,1], strides=[1,2,2,1], is_max_pool=True)

    x = utils.conv_layer('conv5_1', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv5_2', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.conv_layer('conv5_3', x, 512, filter=[3, 3], strides=[1, 1, 1, 1], is_pretrain=is_pretrain)
    x = utils.pool_layer('pool5', x, filter=[1, 2, 2, 1], strides=[1, 2, 2, 1], is_max_pool=True)

    x = utils.fc_layer('fc6', x, num_output = 4096)
    x = utils.batch_normalization(x)
    x = utils.fc_layer('fc7', x, num_output = 4096)
    x = utils.batch_normalization(x)
    x = utils.fc_layer('fc8', x, num_output = n_classes)

    return x
예제 #2
0
def build_disc0(x, testing=False, reuse=False):
    disc0_l1 = x + tf.random_normal(shape=tf.shape(x), stddev=0.05)
    disc0_l1 = utils.lrelu(
        utils.conv2d(disc0_l1, (3, 3, 3, 96), name='disc0_conv1'))

    # 32 x 32 --> 16 x 16
    disc0_l2 = utils.batch_normalization(utils.lrelu(
        utils.conv2d(disc0_l1, (3, 3, 96, 96),
                     stride=[1, 2, 2, 1],
                     name='disc0_conv2')),
                                         name='bn1',
                                         reuse=reuse)

    disc0_l2 = tf.nn.dropout(disc0_l2, 0.1 if testing else 1)

    # 16 x 16 --> 8x8
    disc0_l3 = utils.batch_normalization(utils.lrelu(
        utils.conv2d(disc0_l2, (3, 3, 96, 192),
                     stride=[1, 2, 2, 1],
                     name='disc0_conv3')),
                                         name='bn2',
                                         reuse=reuse)

    # 8x8 --> 8x8
    disc0_l4 = utils.batch_normalization(utils.lrelu(
        utils.conv2d(disc0_l3, (3, 3, 192, 192), name='disc0_conv4')),
                                         name='bn3',
                                         reuse=reuse)

    disc0_l4 = tf.nn.dropout(disc0_l4, 0.1 if testing else 1)

    # 8x8 --> 6x6
    disc0_l5 = tf.layers.batch_normalization(utils.lrelu(
        utils.conv2d(disc0_l4, (3, 3, 192, 192),
                     padding='VALID',
                     name='disc0_conv5')),
                                             name='bn4',
                                             reuse=reuse)

    disc0_l5 = tf.reshape(disc0_l5, [100, 6, 6, 192])
    disc0_shared = utils.lrelu(
        utils.network_in_network(disc0_l5,
                                 192,
                                 num_units=192,
                                 name='disc0_shared'))
    disc0_shared_flat = tf.reshape(disc0_shared, [-1, 192 * 6 * 6])
    disc0_z_recon = utils.dense(disc0_shared_flat,
                                num_inputs=192 * 6 * 6,
                                num_units=16,
                                name='disc0_z_recon')

    disc0_shared_pool = tf.reduce_mean(disc0_shared, [1, 2])
    disc0_adv = utils.dense(disc0_shared_pool,
                            num_inputs=192,
                            num_units=10,
                            name='disc1_z_adv')
    # disc0_adv is the pre-softmax classification output for the discriminator

    return disc0_adv, disc0_z_recon
예제 #3
0
def build_disc1(h1, testing=False, reuse=False):
    # 16 x 16 --> 8x8
    disc1_conv1 = utils.batch_normalization(utils.lrelu(
        utils.conv2d(h1, (3, 3, 3, 32),
                     stride=[1, 2, 2, 1],
                     name='disc1_conv1')),
                                            name='disc1_bn1',
                                            reuse=reuse)

    disc1_conv2 = utils.batch_normalization(utils.lrelu(
        utils.conv2d(disc1_conv1, (3, 3, 32, 64), name='disc1_conv2')),
                                            name='disc1_bn2',
                                            reuse=reuse)

    # 8x8 --> 8x8
    disc1_conv3 = utils.batch_normalization(utils.lrelu(
        utils.conv2d(disc1_conv2, (3, 3, 64, 64), name='disc1_conv3')),
                                            name='disc1_bn3',
                                            reuse=reuse)

    disc1_conv3 = tf.nn.dropout(disc1_conv3, 0.1 if testing else 1)

    # 8x8 --> 6x6
    disc1_conv4 = tf.layers.batch_normalization(utils.lrelu(
        utils.conv2d(disc1_conv3, (3, 3, 64, 64),
                     padding='VALID',
                     name='disc1_conv4')),
                                                name='bn4',
                                                reuse=reuse)

    disc1_l5 = tf.reshape(disc1_conv4, [100, 6, 6, 64])

    disc1_shared = utils.lrelu(
        utils.network_in_network(disc1_l5,
                                 64,
                                 num_units=64,
                                 name='disc1_shared'))
    disc1_shared_flat = tf.reshape(disc1_shared, [-1, 64 * 6 * 6])
    disc1_z_recon = utils.dense(disc1_shared_flat,
                                num_inputs=64 * 6 * 6,
                                num_units=50,
                                name='disc1_z_recon')

    disc1_shared_pool = tf.reduce_mean(disc1_shared, [1, 2])
    disc1_adv = utils.dense(disc1_shared_pool,
                            num_inputs=64,
                            num_units=1,
                            name='disc1_z_adv')
    # disc1_adv is the pre-sigmoid output of the discriminator

    return disc1_adv, disc1_z_recon
예제 #4
0
def build_gen1(y, z1):
    # y is of dimension (batch_size, 8, 8, 3)
    gen1_z_embed = utils.batch_normalization(
        tf.nn.relu(
            utils.dense(z1,
                        num_inputs=50,
                        num_units=256,
                        bias=True,
                        name='gen1_z_embed')))

    y_flatten = tf.reshape(y, (-1, 8 * 8 * 3))
    gen1_y_embed = tf.nn.relu(
        utils.bias(utils.batch_normalization(
            utils.dense(y_flatten,
                        num_inputs=192,
                        num_units=512,
                        bias=False,
                        name='gen1_y_embed')), (512, ),
                   name='gen1_y_embed_bias'))

    gen1_in = tf.concat([gen1_z_embed, gen1_y_embed], axis=1)

    gen1_l1 = tf.transpose(
        tf.reshape(
            tf.nn.relu(
                utils.bias(utils.batch_normalization(
                    utils.dense(gen1_in,
                                num_inputs=768,
                                num_units=1024,
                                bias=False,
                                name='gen1_l1')), (1024, ),
                           name='gen1_l1_bias')), (-1, 64, 4, 4)),
        [0, 2, 3, 1])

    gen1_l2 = tf.nn.relu(
        utils.bias(utils.batch_normalization(
            utils.conv2d_transpose(gen1_l1, (4, 4, 64, 64), (100, 11, 11, 64),
                                   bias=False,
                                   padding='VALID',
                                   stride=(1, 2, 2, 1),
                                   name='gen1_l3')), (64, ),
                   name='gen1_l3_bias'))

    gen1_l3 = tf.sigmoid(
        utils.conv2d_transpose(gen1_l2, (6, 6, 3, 64), (100, 16, 16, 3),
                               padding='VALID',
                               name='gen1_l4'))

    return gen1_l3
예제 #5
0
    def build_net(self):
        with tf.variable_scope('resnet' + str(self.DEPTH_OF_NET)):
            conv1 = utils.convolution_layer(self.INPUTS,
                                            kernel_size=7,
                                            stride=2,
                                            kernel_nums=64,
                                            name='conv1')
            norm1 = utils.batch_normalization(conv1)
            relu1 = utils.nonlinear_ops(norm1)  # 112*112

            temp = relu1
            for i in range(4):  # 2:56*56 3:28*28 4:14*14 5:7*7
                name = 'conv' + str(i + 2)
                with tf.variable_scope(name):
                    if i == 0:
                        kernel_size = 3
                        padding = 'SAME'
                    else:
                        kernel_size = 1
                        padding = 'VALID'
                    temp = utils.max_pool_layer(temp,
                                                kernel_size,
                                                2,
                                                padding=padding,
                                                name='down_sample')
                    temp = utils.res_block(temp, self.LAYER_INFO[name]['nums'],
                                           self.LAYER_INFO[name]['length'])

            #avg_pool = utils.average_pool_layer(temp, 7, 1, name='avg_pool')
            fcinput = tf.reshape(temp, [-1, 2048])
            self.fc = utils.fully_connect_layer(fcinput,
                                                2048,
                                                self.NUM_OF_CLASS,
                                                activation=None,
                                                name='fc')
예제 #6
0
    def forward(self, x, y, miss_list=None, norm_params=None):
        """
        Forward through the model
        :param x: features
        :param y: labels
        :param miss_list: miss list
        :param norm_params: normalization parameters
        :return: reconstruction
        """
        self.miss_list = miss_list
        # Batch normalization of the data
        x_norm, norm_params = batch_normalization(x, self.types_list,
                                                  miss_list)
        self.x_norm = x_norm  # to classify later

        # Auxiliary inference q(a|x)
        q_a, q_a_mu, q_a_log_var = self.aux_encoder(x_norm)
        self.samples_qa.append([q_a, y])

        # Latent inference q(z|a,y,x)
        z, z_mu, z_log_var = self.encoder(torch.cat([x_norm, y, q_a], dim=1))
        self.samples_z.append([z, y])

        # Generative p(x|g(z),y)
        # the data x are also given to later compute the reconstruction loss / log likelihood,
        # alongside parameters and samples
        # during the handling of the different likelihoods
        # It may be confusing as a design choice, but it's convenient
        log_p_x, log_p_x_missing, samples_x, params_x = self.decoder(
            torch.cat([z, y], dim=1), x, miss_list, norm_params)

        # Generative p(a|z,y,x)
        p_a, p_a_mu, p_a_log_var = self.aux_decoder(
            torch.cat([x_norm, y, z], dim=1))
        self.samples_pa.append([p_a, y])

        # KL(q(a|x) || p(a|z,y,x))
        a_kl = self._kld(q_a, (q_a_mu, q_a_log_var), (p_a_mu, p_a_log_var))
        # KL(q(z|a,y,x) || p(z))
        z_kl = self._kld(z=z, q_param=(z_mu, z_log_var),
                         p_param=None)  # using z prior

        self.kl_divergence = (a_kl + z_kl).unsqueeze(1)

        return log_p_x, log_p_x_missing, samples_x, params_x
예제 #7
0
    X, Y = [], []

    for m, line in enumerate(file_handler):
        values = line.rstrip().split(',')
        X.append([float(v) for v in values[:-1]])
        Y.append(float(values[-1]))

    X = np.array(X).reshape(m + 1, -1).T
    Y = np.array(Y).reshape(m + 1, -1).T

    print(X.shape)

    plt.scatter(X, Y)
    plt.show()

    X_norm = utils.batch_normalization(X)

    plt.scatter(X_norm, Y)
    plt.show()

    ann = ArtificialNeuralNetwork(X_norm.shape[0], 'mse')

    ann.push_layer(10, 'sigmoid', 1)
    ann.push_layer(5, 'sigmoid', 1)
    ann.push_layer(1, 'linear', 1)
    model: ArtificialNeuralNetwork
    model, metrics = Gradient_Descent(X_norm,
                                      Y,
                                      ann,
                                      500,
                                      X.shape[1],
예제 #8
0
def build_gen0(h1, z0, preload_weights=32 * [None]):
    gen0_z_embed1 = tf.nn.relu(
        utils.bias(
            utils.batch_normalization(utils.dense(
                z0,
                num_inputs=16,
                num_units=128,
                bias=False,
                weight_preset=preload_weights[0],
                name='gen0_z_embed1'),
                                      scale_preset=preload_weights[2],
                                      mean_preset=preload_weights[3],
                                      variance_preset=preload_weights[4]),
            # Bias:
            (
                128, ),
            bias_preset=preload_weights[1],
            name='gen0_z_embed1_bias'))

    gen0_z_embed2 = tf.nn.relu(
        utils.bias(
            utils.batch_normalization(utils.dense(
                gen0_z_embed1,
                num_inputs=128,
                num_units=128,
                bias=False,
                weight_preset=preload_weights[5],
                name='gen0_z_embed2'),
                                      scale_preset=preload_weights[7],
                                      mean_preset=preload_weights[8],
                                      variance_preset=preload_weights[9]),
            # Bias:
            (
                128, ),
            bias_preset=preload_weights[6],
            name='gen0_z_embed2_bias'))

    h1_flatten = tf.reshape(h1, (-1, 16 * 16 * 3))

    gen0_in = tf.concat([h1_flatten, gen0_z_embed2], axis=1)

    gen0_in_reshaped = tf.transpose(
        tf.reshape(
            tf.nn.relu(
                utils.bias(
                    utils.batch_normalization(
                        utils.dense(gen0_in,
                                    num_inputs=896,
                                    num_units=256 * 5 * 5,
                                    bias=False,
                                    weight_preset=preload_weights[10],
                                    name='gen0_embed'),
                        scale_preset=preload_weights[12],
                        mean_preset=preload_weights[13],
                        variance_preset=preload_weights[14]),
                    # Bias:
                    (
                        256 * 5 * 5, ),
                    bias_preset=preload_weights[11],
                    name='gen0_embed_bias')),
            [-1, 256, 5, 5]),
        [0, 2, 3, 1])

    gen0_deconv1 = tf.nn.relu(
        utils.bias(
            utils.batch_normalization(utils.conv2d_transpose(
                gen0_in_reshaped, (5, 5, 256, 256), (100, 10, 10, 256),
                bias=False,
                weight_preset=preload_weights[15],
                stride=(1, 2, 2, 1),
                padding='SAME',
                name='gen0_deconv1'),
                                      scale_preset=preload_weights[17],
                                      mean_preset=preload_weights[18],
                                      variance_preset=preload_weights[19]),
            # Bias
            (
                256, ),
            bias_preset=preload_weights[16],
            name='gen0_deconv1_bias'))

    gen0_deconv2 = tf.nn.relu(
        utils.bias(
            utils.batch_normalization(utils.conv2d_transpose(
                gen0_deconv1, (5, 5, 128, 256), (100, 14, 14, 128),
                bias=False,
                weight_preset=preload_weights[20],
                padding='VALID',
                name='gen0_deconv2'),
                                      scale_preset=preload_weights[22],
                                      mean_preset=preload_weights[23],
                                      variance_preset=preload_weights[24]),
            # Bias:
            (
                128, ),
            bias_preset=preload_weights[21],
            name='gen0_deconv2_bias'))

    gen0_deconv3 = tf.nn.relu(
        utils.bias(
            utils.batch_normalization(utils.conv2d_transpose(
                gen0_deconv2, (5, 5, 128, 128), (100, 28, 28, 128),
                bias=False,
                weight_preset=preload_weights[25],
                stride=(1, 2, 2, 1),
                padding='SAME',
                name='gen0_deconv3'),
                                      scale_preset=preload_weights[27],
                                      mean_preset=preload_weights[28],
                                      variance_preset=preload_weights[29]),
            # Bias:
            (
                128, ),
            bias_preset=preload_weights[26],
            name='gen0_deconv3_bias'))

    gen0_deconv4 = tf.sigmoid(
        utils.conv2d_transpose(gen0_deconv3, (5, 5, 3, 128), (100, 32, 32, 3),
                               weight_preset=preload_weights[30],
                               bias_preset=preload_weights[31],
                               padding='VALID',
                               name='gen0_deconv4'))
    return gen0_deconv4
예제 #9
0
    def u_net_model(self,
                    image,
                    w_inputs,
                    h_inputs,
                    batch_size,
                    input_channels=3,
                    n_classs=1,
                    keep_prob=0.5,
                    base_channel=64):

        with tf.variable_scope('u-net'):

            image = batch_normalization(image, input_channels, axis=[0, 1, 2])
            tf.summary.image(self.name + '/image', image, batch_size)

            #layer1
            with tf.variable_scope('conv1_1'):
                conv1_1_weight = tf.get_variable(
                    'conv1_1_weigt', [3, 3, input_channels, base_channel],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv1_1_bias = tf.get_variable(
                    'conv1_1_bias', [base_channel],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv1_1_result = tf.nn.conv2d(image,
                                              conv1_1_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv1_1')
                conv1_1_relu = tf.nn.relu(tf.add(conv1_1_result, conv1_1_bias),
                                          name='conv1_1_relu')
    #            conv1_1_relu=tf.nn.dropout(conv1_1_relu,keep_prob)

            with tf.variable_scope('conv1_2'):
                conv1_2_weight = tf.get_variable(
                    'conv1_2_weigt', [3, 3, base_channel, base_channel],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv1_2_bias = tf.get_variable(
                    'conv1_2_bias', [base_channel],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv1_2_result = tf.nn.conv2d(conv1_1_relu,
                                              conv1_2_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv1_2')
                conv1_2_relu = tf.nn.relu(tf.add(conv1_2_result, conv1_2_bias),
                                          name='conv1_2_relu')  #h_w_inputs
    #            conv1_2_relu=tf.nn.dropout(conv1_2_relu,keep_prob)
    #            conv1_2_relu=batch_normalization(conv1_2_relu,base_channel,axis=[0,1,2])

            with tf.variable_scope('pool1'):
                pool1 = tf.nn.max_pool(conv1_2_relu, [1, 2, 2, 1],
                                       [1, 2, 2, 1],
                                       padding='SAME')  #h_w_inputs/2
                pool1 = tf.nn.dropout(pool1, keep_prob)

            #layer2
            with tf.variable_scope('conv2_1'):
                conv2_1_weight = tf.get_variable(
                    'conv2_1_weigt', [3, 3, base_channel, base_channel * 2],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv2_1_bias = tf.get_variable(
                    'conv2_1_bias', [base_channel * 2],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv2_1_result = tf.nn.conv2d(pool1,
                                              conv2_1_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv2_1')
                conv2_1_relu = tf.nn.relu(tf.add(conv2_1_result, conv2_1_bias),
                                          name='conv2_1_relu')
    #            conv2_1_relu=tf.nn.dropout(conv2_1_relu,keep_prob)

            with tf.variable_scope('conv2_2'):
                conv2_2_weight = tf.get_variable(
                    'conv2_2_weigt',
                    [3, 3, base_channel * 2, base_channel * 2],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv2_2_bias = tf.get_variable(
                    'conv2_2_bias', [base_channel * 2],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv2_2_result = tf.nn.conv2d(conv2_1_relu,
                                              conv2_2_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv2_2')
                conv2_2_relu = tf.nn.relu(tf.add(conv2_2_result, conv2_2_bias),
                                          name='conv2_2_relu')  #h_w_inputs/2
                #            conv2_2_relu=tf.nn.dropout(conv2_2_re lu,keep_prob)
                conv2_2_relu = batch_normalization(conv2_2_relu,
                                                   base_channel * 2,
                                                   axis=[0, 1, 2])

            with tf.variable_scope('pool2'):
                pool2 = tf.nn.max_pool(conv2_2_relu, [1, 2, 2, 1],
                                       [1, 2, 2, 1],
                                       padding='SAME')  #h_w_inputs/4
                pool2 = tf.nn.dropout(pool2, keep_prob)

            #layer3
            with tf.variable_scope('conv3_1'):
                conv3_1_weight = tf.get_variable(
                    'conv3_1_weigt',
                    [3, 3, base_channel * 2, base_channel * 4],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv3_1_bias = tf.get_variable(
                    'conv3_1_bias', [base_channel * 4],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv3_1_result = tf.nn.conv2d(pool2,
                                              conv3_1_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv3_1')
                conv3_1_relu = tf.nn.relu(tf.add(conv3_1_result, conv3_1_bias),
                                          name='conv3_1_relu')
    #            conv3_1_relu=tf.nn.dropout(conv3_1_relu,keep_prob)

            with tf.variable_scope('conv3_2'):
                conv3_2_weight = tf.get_variable(
                    'conv3_2_weigt',
                    [3, 3, base_channel * 4, base_channel * 4],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv3_2_bias = tf.get_variable(
                    'conv3_2_bias', [base_channel * 4],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv3_2_result = tf.nn.conv2d(conv3_1_relu,
                                              conv3_2_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv3_2')
                conv3_2_relu = tf.nn.relu(tf.add(conv3_2_result, conv3_2_bias),
                                          name='conv3_2_relu')  #h_w_inputs/4
    #            conv3_2_relu=batch_normalization(conv3_2_relu,base_channel*4,axis=[0,1,2])

            with tf.variable_scope('pool3'):
                pool3 = tf.nn.max_pool(conv3_2_relu, [1, 2, 2, 1],
                                       [1, 2, 2, 1],
                                       padding='SAME')  #h_w_inputs/8
                pool3 = tf.nn.dropout(pool3, keep_prob)

            #layer4
            with tf.variable_scope('conv4_1'):
                conv4_1_weight = tf.get_variable(
                    'conv4_1_weight',
                    [3, 3, base_channel * 4, base_channel * 8],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv4_1_bias = tf.get_variable(
                    'conv4_1_bias', [base_channel * 8],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv4_1_result = tf.nn.conv2d(pool3,
                                              conv4_1_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv4_1_result')
                conv4_1_relu = tf.nn.relu(tf.add(conv4_1_result, conv4_1_bias),
                                          name='conv4_1_relu')
    #            conv4_1_relu=tf.nn.dropout(conv4_1_relu,keep_prob)

            with tf.variable_scope('conv4_2'):
                conv4_2_weight = tf.get_variable(
                    'conv4_2_weight',
                    [3, 3, base_channel * 8, base_channel * 8],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv4_2_bias = tf.get_variable(
                    'conv4_2_bias', [base_channel * 8],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv4_2_result = tf.nn.conv2d(conv4_1_relu,
                                              conv4_2_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv4_2_result')
                conv4_2_relu = tf.nn.relu(tf.add(conv4_2_result, conv4_2_bias),
                                          name='conv4_2_relu')
                #            conv4_2_relu=tf.nn.dropout(conv4_2_relu,keep_prob)
                conv4_2_relu = batch_normalization(conv4_2_relu,
                                                   base_channel * 8,
                                                   axis=[0, 1, 2])

            with tf.variable_scope('pool4'):
                pool4 = tf.nn.max_pool(conv4_2_relu, [1, 2, 2, 1],
                                       [1, 2, 2, 1],
                                       padding='SAME')  #h_w_inputs/16
                pool4 = tf.nn.dropout(pool4, keep_prob)

            #layer5
            with tf.variable_scope('conv5_1'):
                conv5_1_weight = tf.get_variable(
                    'conv5_1_weigt',
                    [3, 3, base_channel * 8, base_channel * 16],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv5_1_bias = tf.get_variable(
                    'conv5_1_bias', [base_channel * 16],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv5_1_result = tf.nn.conv2d(pool4,
                                              conv5_1_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv5_1')
                conv5_1_relu = tf.nn.relu(tf.add(conv5_1_result, conv5_1_bias),
                                          name='conv5_1_relu')
    #            conv5_1_relu=tf.nn.dropout(conv5_1_relu,keep_prob)

            with tf.variable_scope('conv5_2'):
                conv5_2_weight = tf.get_variable(
                    'conv5_2_weigt',
                    [3, 3, base_channel * 16, base_channel * 16],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv5_2_bias = tf.get_variable(
                    'conv5_2_bias', [base_channel * 16],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv5_2_result = tf.nn.conv2d(conv5_1_relu,
                                              conv5_2_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv5_2')
                conv5_2_relu = tf.nn.relu(tf.add(conv5_2_result, conv5_2_bias),
                                          name='conv5_2_relu')
    #            conv5_2_relu=tf.nn.dropout(conv5_2_relu,keep_prob)
    #            conv5_2_relu=batch_normalization(conv5_2_relu,base_channel*16,axis=[0,1,2])

            with tf.variable_scope('unsample1'):
                unsam1_weight = tf.get_variable(
                    'unsam1_weight',
                    [2, 2, base_channel * 8, base_channel * 16],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                unsam1_bias = tf.get_variable(
                    'unsam1_bias', [base_channel * 8],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                unsam1_result = tf.nn.conv2d_transpose(
                    conv5_2_relu, unsam1_weight, [
                        batch_size, w_inputs // 8, h_inputs // 8,
                        base_channel * 8
                    ], [1, 2, 2, 1])
                unsam1_relu = tf.nn.relu(tf.add(unsam1_result,
                                                unsam1_bias))  #h_w_inputs/4
                #之前出过错误,因为只改一层的上采样的输出的batchsize导致一层没改,使得无法批量导入图片数据
                unsam1_relu = tf.nn.dropout(unsam1_relu, keep_prob)

                merged_layer1 = tf.concat([unsam1_relu, conv4_2_relu], axis=-1)

            #layer6
            with tf.variable_scope('conv6_1'):
                conv6_1_weigt = tf.get_variable(
                    'conv6_1_weigt',
                    [3, 3, base_channel * 16, base_channel * 8],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv6_1_bias = tf.get_variable(
                    'conv6_1_bias', [base_channel * 8],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv6_1_result = tf.nn.conv2d(merged_layer1,
                                              conv6_1_weigt, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv6_1_result')
                conv6_1_relu = tf.nn.relu(tf.add(conv6_1_result, conv6_1_bias),
                                          name='conv6_1_relu')
    #            conv6_1_relu=tf.nn.dropout(conv6_1_relu,keep_prob)

            with tf.variable_scope('conv6_2'):
                conv6_2_weigt = tf.get_variable(
                    'conv6_2_weigt',
                    [3, 3, base_channel * 8, base_channel * 8],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv6_2_bias = tf.get_variable(
                    'conv6_2_bias', [base_channel * 8],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv6_2_result = tf.nn.conv2d(conv6_1_relu,
                                              conv6_2_weigt, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv6_2_result')
                conv6_2_relu = tf.nn.relu(tf.add(conv6_2_result, conv6_2_bias),
                                          name='conv6_2_relu')
                #            conv6_2_relu=tf.nn.dropout(conv6_2_relu,keep_prob)
                conv6_2_relu = batch_normalization(conv6_2_relu,
                                                   base_channel * 8,
                                                   axis=[0, 1, 2])

            with tf.variable_scope('unsample2'):
                #由于上采样中扩大对应的是池化层的缩小,所以其设置应该和池化层相同,而不是前面的卷积层相同
                unsam2_weight = tf.get_variable(
                    'unsam2_weight',
                    [2, 2, base_channel * 4, base_channel * 8],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                #和池化层一样,核大小为2
                unsam2_bias = tf.get_variable(
                    'unsam2_bias', [base_channel * 4],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                unsam2_result = tf.nn.conv2d_transpose(
                    conv6_2_relu, unsam2_weight, [
                        batch_size, w_inputs // 4, h_inputs // 4,
                        base_channel * 4
                    ], [1, 2, 2, 1])
                unsam2_relu = tf.nn.relu(tf.add(unsam2_result,
                                                unsam2_bias))  #h_w_inputs/2
                unsam2_relu = tf.nn.dropout(unsam2_relu, keep_prob)

                merged_layer2 = tf.concat([unsam2_relu, conv3_2_relu], axis=-1)

                #layer7
            with tf.variable_scope('conv7_1'):
                conv7_1_weight = tf.get_variable(
                    'conv7_1_weight',
                    [3, 3, base_channel * 8, base_channel * 4],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv7_1_bias = tf.get_variable(
                    'conv7_1_bias', [base_channel * 4],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv7_1_result = tf.nn.conv2d(merged_layer2,
                                              conv7_1_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv7_1_result')
                conv7_1_relu = tf.nn.relu(tf.add(conv7_1_result, conv7_1_bias),
                                          name='conv7_1_relu')
    #            conv7_1_relu=tf.nn.dropout(conv7_1_relu,keep_prob)

            with tf.variable_scope('conv7_2'):
                conv7_2_weight = tf.get_variable(
                    'conv7_2_weight',
                    [3, 3, base_channel * 4, base_channel * 4],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv7_2_bias = tf.get_variable(
                    'conv7_2_bias', [base_channel * 4],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv7_2_result = tf.nn.conv2d(conv7_1_relu,
                                              conv7_2_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv7_2_result')
                conv7_2_relu = tf.nn.relu(tf.add(conv7_2_result, conv7_2_bias),
                                          name='conv7_2_relu')
    #            conv7_2_relu=tf.nn.dropout(conv7_2_relu,keep_prob)
    #            conv7_2_relu=batch_normalization(conv7_2_relu,base_channel*4,axis=[0,1,2])

            with tf.variable_scope('unsample3'):
                unsam3_weight = tf.get_variable(
                    'unsam3_weight',
                    [2, 2, base_channel * 2, base_channel * 4],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                unsam3_bias = tf.get_variable(
                    'unsam3_bias', [base_channel * 2],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                unsam3_result = tf.nn.conv2d_transpose(
                    conv7_2_relu, unsam3_weight, [
                        batch_size, w_inputs // 2, h_inputs // 2,
                        base_channel * 2
                    ], [1, 2, 2, 1])
                unsam3_relu = tf.nn.relu(tf.add(unsam3_result, unsam3_bias),
                                         name='unsam3_relu')
                unsam3_relu = tf.nn.dropout(unsam3_relu, keep_prob)

                merged_layer3 = tf.concat([unsam3_relu, conv2_2_relu], axis=-1)

                #layer8
            with tf.variable_scope('conv8_1'):
                conv8_1_weight = tf.get_variable(
                    'conv8_1_weight',
                    [3, 3, base_channel * 4, base_channel * 2],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv8_1_bias = tf.get_variable(
                    'conv8_1_bias', [base_channel * 2],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv8_1_result = tf.nn.conv2d(merged_layer3,
                                              conv8_1_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv8_1_result')
                conv8_1_relu = tf.nn.relu(tf.add(conv8_1_result, conv8_1_bias),
                                          name='conv8_1_relu')
    #            conv8_1_relu=tf.nn.dropout(conv8_1_relu,keep_prob)

            with tf.variable_scope('conv8_2'):
                conv8_2_weight = tf.get_variable(
                    'conv8_2_weight',
                    [3, 3, base_channel * 2, base_channel * 2],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv8_2_bias = tf.get_variable(
                    'conv8_2_bias', [base_channel * 2],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv8_2_result = tf.nn.conv2d(conv8_1_relu,
                                              conv8_2_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv8_2_result')
                conv8_2_relu = tf.nn.relu(tf.add(conv8_2_result, conv8_2_bias),
                                          name='conv8_2_relu')
                #            conv8_2_relu=tf.nn.dropout(conv8_2_relu,keep_prob)
                conv8_2_relu = batch_normalization(conv8_2_relu,
                                                   base_channel * 2,
                                                   axis=[0, 1, 2])

            with tf.variable_scope('unsample4'):
                unsam4_weight = tf.get_variable(
                    'unsam4_weight', [2, 2, base_channel, base_channel * 2],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                unsam4_bias = tf.get_variable(
                    'unsam4_bias', [base_channel],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                unsam4_result = tf.nn.conv2d_transpose(
                    conv8_2_relu, unsam4_weight,
                    [batch_size, w_inputs, h_inputs, base_channel],
                    [1, 2, 2, 1])
                unsam4_relu = tf.nn.relu(tf.add(unsam4_result, unsam4_bias),
                                         name='unsam4_relu')
                unsam4_relu = tf.nn.dropout(unsam4_relu, keep_prob)

                merged_layer4 = tf.concat([unsam4_relu, conv1_2_relu], axis=-1)

                #layer9
            with tf.variable_scope('conv9_1'):
                conv9_1_weight = tf.get_variable(
                    'conv9_1_weight', [3, 3, base_channel * 2, base_channel],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv9_1_bias = tf.get_variable(
                    'conv9_1_bias', [base_channel],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv9_1_result = tf.nn.conv2d(merged_layer4,
                                              conv9_1_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv9_1_result')
                conv9_1_relu = tf.nn.relu(tf.add(conv9_1_result, conv9_1_bias),
                                          name='conv9_1_relu')
    #            conv9_1_relu=tf.nn.dropout(conv9_1_relu,keep_prob)

            with tf.variable_scope('conv9_2'):
                conv9_2_weight = tf.get_variable(
                    'conv9_2_weight', [3, 3, base_channel, base_channel],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv9_2_bias = tf.get_variable(
                    'conv9_2_bias', [base_channel],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv9_2_result = tf.nn.conv2d(conv9_1_relu,
                                              conv9_2_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv9_2_result')
                conv9_2_relu = tf.nn.relu(tf.add(conv9_2_result, conv9_2_bias),
                                          name='conv9_2_relu')
                #            conv9_2_relu=tf.nn.dropout(conv9_2_relu,keep_prob)
                conv9_2_relu = batch_normalization(conv9_2_relu,
                                                   base_channel,
                                                   axis=[0, 1, 2])

    #            print(conv9_2_relu.shape)

            with tf.variable_scope('conv9_3'):
                conv9_3_weight = tf.get_variable(
                    'conv9_3_weight', [3, 3, base_channel, n_classs],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv9_3_bias = tf.get_variable(
                    'conv9_3_bias', [n_classs],
                    initializer=tf.truncated_normal_initializer(stddev=1.0))
                conv9_3_result = tf.nn.conv2d(conv9_2_relu,
                                              conv9_3_weight, [1, 1, 1, 1],
                                              padding='SAME',
                                              name='conv9_3_result')
                conv9_3_add = tf.add(conv9_3_result,
                                     conv9_3_bias,
                                     name='conv9_3_add')
                conv9_3_sigmoid = tf.nn.sigmoid(
                    conv9_3_add, name='conv9_3_sigmoid')  #不能用relu只能用sigmoid函数
                #由于层数太多,不加BN,loss下降不了
                #观察效果
                image2 = tf.reshape(conv9_3_sigmoid,
                                    [-1, w_inputs, h_inputs, n_classs])
                tf.summary.image(self.name + '/image1', image2, batch_size)
                self.y_pred = conv9_3_sigmoid