Esempio n. 1
0
    def create_discriminator(discrim_inputs, discrim_targets):
        n_layers = 3
        layers = []

        # 2x [batch, in_channels, height, width] => [batch, in_channels * 2, height, width]
        input = fluid.layers.concat(input=[discrim_inputs, discrim_targets], axis=1)

        # layer_1: [batch, in_channels * 2, 256, 256] => [batch, ndf, 128, 128]
        convolved = discrim_conv(input, a.ndf, stride=2)
        rectified = utils.lrelu(convolved, 0.2)
        layers.append(rectified)

        # layer_2: [batch, 128, 128, ndf] => [batch, 64, 64, ndf * 2]
        # layer_3: [batch, 64, 64, ndf * 2] => [batch, 32, 32, ndf * 4]
        # layer_4: [batch, 32, 32, ndf * 4] => [batch, 31, 31, ndf * 8]
        for i in range(n_layers):
            out_channels = a.ndf * min(2 ** (i + 1), 8)
            stride = 1 if i == n_layers - 1 else 2  # last layer here has stride 1
            convolved = discrim_conv(layers[-1], out_channels, stride=stride)
            normalized = utils.batchnorm(convolved)
            rectified = utils.lrelu(normalized, 0.2)
            layers.append(rectified)

        # layer_5: [batch, 31, 31, ndf * 8] => [batch, 30, 30, 1]
        convolved = discrim_conv(rectified, out_channels=1, stride=1)
        output = fluid.layers.sigmoid(convolved)
        layers.append(output)

        return layers[-1]
    def create_discriminator(discrim_inputs, discrim_targets):
        n_layers = 3
        layers = []

        # 2x [batch, height, width, in_channels] => [batch, height, width, in_channels * 2]
        input = tf.concat([discrim_inputs, discrim_targets], axis=3)

        # layer_1: [batch, 256, 256, in_channels * 2] => [batch, 128, 128, ndf]
        with tf.variable_scope("layer_1"):
            convolved = conv(input, a.ndf, stride=2)
            rectified = lrelu(convolved, 0.2)
            layers.append(rectified)

        # layer_2: [batch, 128, 128, ndf] => [batch, 64, 64, ndf * 2]
        # layer_3: [batch, 64, 64, ndf * 2] => [batch, 32, 32, ndf * 4]
        # layer_4: [batch, 32, 32, ndf * 4] => [batch, 31, 31, ndf * 8]
        for i in range(n_layers):
            with tf.variable_scope("layer_%d" % (len(layers) + 1)):
                out_channels = a.ndf * min(2**(i + 1), 8)
                stride = 1 if i == n_layers - 1 else 2  # last layer here has stride 1
                convolved = conv(layers[-1], out_channels, stride=stride)
                normalized = batchnorm(convolved)  # BN
                rectified = lrelu(normalized, 0.2)
                # rectified  = lrelu(convolved, 0.2)#no BN
                layers.append(rectified)

        # layer_5: [batch, 31, 31, ndf * 8] => [batch, 30, 30, 1]
        with tf.variable_scope("layer_%d" % (len(layers) + 1)):
            convolved = conv(rectified, out_channels=1, stride=1)
            #output = tf.sigmoid(convolved)
            #output = convolved# ossgan # not work for most f-divergence except for Total Variation and Pearson
            output = tf.tanh(convolved)  # it works
            #output = tf.nn.relu(convolved)
            layers.append(output)

        return layers[-1]
Esempio n. 3
0
def create_generator_pix2pix(generator_inputs, generator_outputs_channels, a):
    layers = []
    input_images, input_captions, sequence_lengths = generator_inputs
    with tf.variable_scope('pix2pix'):

        # encoder_1: [batch, 256, 256, in_channels] => [batch, 128, 128, ngf]
        with tf.variable_scope("encoder_1"):
            output = conv(input_images, a.ngf, stride=2)
            layers.append(output)

        layer_specs = [
            a.ngf *
            2,  # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]
            a.ngf *
            4,  # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]
            a.ngf *
            8,  # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8]
            a.ngf *
            8,  # encoder_5: [batch, 16, 16, ngf * 8] => [batch, 8, 8, ngf * 8]
            a.ngf *
            8,  # encoder_6: [batch, 8, 8, ngf * 8] => [batch, 4, 4, ngf * 8]
            a.ngf *
            8,  # encoder_7: [batch, 4, 4, ngf * 8] => [batch, 2, 2, ngf * 8]
            a.ngf *
            8,  # encoder_8: [batch, 2, 2, ngf * 8] => [batch, 1, 1, ngf * 8]
        ]

        for i, out_channels in enumerate(layer_specs):
            with tf.variable_scope("encoder_%d" % (len(layers) + 1)):
                rectified = lrelu(layers[-1], 0.2)
                # [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels]
                convolved = conv(rectified, out_channels, stride=2)
                if i != 6:
                    output = batchnorm(convolved)
                else:
                    output = convolved
                layers.append(output)

        combined_layer, embedding_placeholder, emb_init = create_text_embedding(
            input_captions, sequence_lengths, layers[-1], a)
        layers[-1] = combined_layer
        output = batchnorm(convolved)
        mid_layer = lrelu(layers[-1], 0.2)

        layer_specs = [
            (
                a.ngf * 8, 0.5
            ),  # decoder_8: [batch, 1, 1, ngf * 8] => [batch, 2, 2, ngf * 8 * 2]
            (
                a.ngf * 8, 0.5
            ),  # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2]
            (
                a.ngf * 8, 0.5
            ),  # decoder_6: [batch, 4, 4, ngf * 8 * 2] => [batch, 8, 8, ngf * 8 * 2]
            (
                a.ngf * 8, 0.0
            ),  # decoder_5: [batch, 8, 8, ngf * 8 * 2] => [batch, 16, 16, ngf * 8 * 2]
            (
                a.ngf * 4, 0.0
            ),  # decoder_4: [batch, 16, 16, ngf * 8 * 2] => [batch, 32, 32, ngf * 4 * 2]
            (
                a.ngf * 2, 0.0
            ),  # decoder_3: [batch, 32, 32, ngf * 4 * 2] => [batch, 64, 64, ngf * 2 * 2]
            (
                a.ngf, 0.0
            ),  # decoder_2: [batch, 64, 64, ngf * 2 * 2] => [batch, 128, 128, ngf * 2]
        ]

        num_encoder_layers = len(layers)
        for decoder_layer, (out_channels, dropout) in enumerate(layer_specs):
            skip_layer = num_encoder_layers - decoder_layer - 1
            with tf.variable_scope("decoder_%d" % (skip_layer + 1)):
                if decoder_layer == 0:
                    # first decoder layer doesn't have skip connections
                    # since it is directly connected to the skip_layer
                    input = layers[-1]
                else:
                    input = tf.concat([layers[-1], layers[skip_layer]], axis=3)

                rectified = tf.nn.relu(input)
                # [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels]
                output = deconv(rectified, out_channels)
                output = batchnorm(output)

                if dropout > 0.0:
                    output = tf.nn.dropout(output, keep_prob=1 - dropout)

                layers.append(output)

        # decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels]
        with tf.variable_scope("decoder_1"):
            input = tf.concat([layers[-1], layers[0]], axis=3)
            rectified = tf.nn.relu(input)
            output = deconv(rectified, generator_outputs_channels)
            output = tf.tanh(output)
            layers.append(output)

    return layers[-1], emb_init, embedding_placeholder
Esempio n. 4
0
def LearningRegularization(cv_left,
                           cv_right,
                           tst,
                           batch_size=1,
                           F=32,
                           D=192,
                           H=256,
                           W=512,
                           SHARE=None):

    with tf.name_scope('Conv3d19'):
        with tf.variable_scope('params', reuse=SHARE):
            W19 = weight_variable((3, 3, 3, 2 * F, F))
            B19 = bias_variable((F, ))
        Y19_left = conv3d(cv_left, W19, stride=1)  ###batch D/2 H/2 W/2 F
        Y19_right = conv3d(cv_right, W19, stride=1)
        Y19bn_left, update_ema19_left = batchnorm(Y19_left, tst, B19)
        Y19bn_right, update_ema19_right = batchnorm(Y19_right, tst, B19)
        # Y19bn_left, _= batchnorm(Y19_left, tst, B19)
        # Y19bn_right, _= batchnorm(Y19_right, tst, B19)
        Y19relu_left = tf.nn.relu(Y19bn_left)
        Y19relu_right = tf.nn.relu(Y19bn_right)

    with tf.name_scope('Conv3d20'):
        with tf.variable_scope('params', reuse=SHARE):
            W20 = weight_variable((3, 3, 3, F, F))
            B20 = bias_variable((F, ))
        Y20_left = conv3d(Y19relu_left, W20, stride=1)  ###batch D/2 H/2 W/2 F
        Y20_right = conv3d(Y19relu_right, W20, stride=1)
        Y20bn_left, update_ema20_left = batchnorm(Y20_left, tst, B20)
        Y20bn_right, update_ema20_right = batchnorm(Y20_right, tst, B20)
        # Y20bn_left, _= batchnorm(Y20_left, tst, B20)
        # Y20bn_right, _= batchnorm(Y20_right, tst, B20)
        Y20relu_left = tf.nn.relu(Y20bn_left)
        Y20relu_right = tf.nn.relu(Y20bn_right)

    with tf.name_scope('Conv3d21'):
        with tf.variable_scope('params', reuse=SHARE):
            W21 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B21 = bias_variable((2 * F, ))
        Y21_left = conv3d(cv_left, W21, stride=2)  ###batch D/4 H/4 W/4 2F
        Y21_right = conv3d(cv_right, W21, stride=2)
        Y21bn_left, update_ema21_left = batchnorm(Y21_left, tst, B21)
        Y21bn_right, update_ema21_right = batchnorm(Y21_right, tst, B21)
        # Y21bn_left, _= batchnorm(Y21_left, tst, B21)
        # Y21bn_right, _= batchnorm(Y21_right, tst, B21)
        Y21relu_left = tf.nn.relu(Y21bn_left)
        Y21relu_right = tf.nn.relu(Y21bn_right)

    with tf.name_scope('Conv3d22'):
        with tf.variable_scope('params', reuse=SHARE):
            W22 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B22 = bias_variable((2 * F, ))
        Y22_left = conv3d(Y21relu_left, W22, stride=1)  ###batch D/4 H/4 W/4 2F
        Y22_right = conv3d(Y21relu_right, W22, stride=1)
        Y22bn_left, update_ema22_left = batchnorm(Y22_left, tst, B22)
        Y22bn_right, update_ema22_right = batchnorm(Y22_right, tst, B22)
        # Y22bn_left, _= batchnorm(Y22_left, tst, B22)
        # Y22bn_right, _= batchnorm(Y22_right, tst, B22)
        Y22relu_left = tf.nn.relu(Y22bn_left)
        Y22relu_right = tf.nn.relu(Y22bn_right)

    with tf.name_scope('Conv3d23'):
        with tf.variable_scope('params', reuse=SHARE):
            W23 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B23 = bias_variable((2 * F, ))
        Y23_left = conv3d(Y22relu_left, W23, stride=1)  ###batch D/4 H/4 W/4 2F
        Y23_right = conv3d(Y22relu_right, W23, stride=1)
        Y23bn_left, update_ema23_left = batchnorm(Y23_left, tst, B23)
        Y23bn_right, update_ema23_right = batchnorm(Y23_right, tst, B23)
        # Y23bn_left, _= batchnorm(Y23_left, tst, B23)
        # Y23bn_right, _= batchnorm(Y23_right, tst, B23)
        Y23relu_left = tf.nn.relu(Y23bn_left)
        Y23relu_right = tf.nn.relu(Y23bn_right)

    with tf.name_scope('Conv3d24'):
        with tf.variable_scope('params', reuse=SHARE):
            W24 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B24 = bias_variable((2 * F, ))
        Y24_left = conv3d(Y21relu_left, W24, stride=2)  ###batch D/8 H/8 W/8 2F
        Y24_right = conv3d(Y21relu_right, W24, stride=2)
        Y24bn_left, update_ema24_left = batchnorm(Y24_left, tst, B24)
        Y24bn_right, update_ema24_right = batchnorm(Y24_right, tst, B24)
        # Y24bn_left, _= batchnorm(Y24_left, tst, B24)
        # Y24bn_right, _= batchnorm(Y24_right, tst, B24)
        Y24relu_left = tf.nn.relu(Y24bn_left)
        Y24relu_right = tf.nn.relu(Y24bn_right)

    with tf.name_scope('Conv3d25'):
        with tf.variable_scope('params', reuse=SHARE):
            W25 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B25 = bias_variable((2 * F, ))
        Y25_left = conv3d(Y24relu_left, W25, stride=1)  ###batch D/8 H/8 W/8 2F
        Y25_right = conv3d(Y24relu_right, W25, stride=1)
        Y25bn_left, update_ema25_left = batchnorm(Y25_left, tst, B25)
        Y25bn_right, update_ema25_right = batchnorm(Y25_right, tst, B25)
        # Y25bn_left, _= batchnorm(Y25_left, tst, B25)
        # Y25bn_right, _= batchnorm(Y25_right, tst, B25)
        Y25relu_left = tf.nn.relu(Y25bn_left)
        Y25relu_right = tf.nn.relu(Y25bn_right)

    with tf.name_scope('Conv3d26'):
        with tf.variable_scope('params', reuse=SHARE):
            W26 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B26 = bias_variable((2 * F, ))
        Y26_left = conv3d(Y25relu_left, W26, stride=1)  ###batch D/8 H/8 W/8 2F
        Y26_right = conv3d(Y25relu_right, W26, stride=1)
        Y26bn_left, update_ema26_left = batchnorm(Y26_left, tst, B26)
        Y26bn_right, update_ema26_right = batchnorm(Y26_right, tst, B26)
        # Y26bn_left, _= batchnorm(Y26_left, tst, B26)
        # Y26bn_right, _= batchnorm(Y26_right, tst, B26)
        Y26relu_left = tf.nn.relu(Y26bn_left)
        Y26relu_right = tf.nn.relu(Y26bn_right)

    with tf.name_scope('Conv3d27'):
        with tf.variable_scope('params', reuse=SHARE):
            W27 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B27 = bias_variable((2 * F, ))

        Y27_left = conv3d(Y24relu_left, W27,
                          stride=2)  ###batch D/16 H/16 W/16 2F
        Y27bn_left, update_ema27_left = batchnorm(Y27_left, tst, B27)
        # Y27bn_left, _= batchnorm(Y27_left, tst, B27)
        Y27relu_left = tf.nn.relu(Y27bn_left)

        Y27_right = conv3d(Y24relu_right, W27, stride=2)
        Y27bn_right, update_ema27_right = batchnorm(Y27_right, tst, B27)
        # Y27bn_right, _= batchnorm(Y27_right, tst, B27)
        Y27relu_right = tf.nn.relu(Y27bn_right)

    with tf.name_scope('Conv3d28'):
        with tf.variable_scope('params', reuse=SHARE):
            W28 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B28 = bias_variable((2 * F, ))
        Y28_left = conv3d(Y27relu_left, W28,
                          stride=1)  ###batch D/16 H/16 W/16 2F
        Y28bn_left, update_ema28_left = batchnorm(Y28_left, tst, B28)
        # Y28bn_left, _= batchnorm(Y28_left, tst, B28)
        Y28relu_left = tf.nn.relu(Y28bn_left)

        Y28_right = conv3d(Y27relu_right, W28, stride=1)
        Y28bn_right, update_ema28_right = batchnorm(Y28_right, tst, B28)
        # Y28bn_right, _= batchnorm(Y28_right, tst, B28)
        Y28relu_right = tf.nn.relu(Y28bn_right)

    with tf.name_scope('Conv3d29'):
        with tf.variable_scope('params', reuse=SHARE):
            W29 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B29 = bias_variable((2 * F, ))
        Y29_left = conv3d(Y28relu_left, W29,
                          stride=1)  ###batch D/16 H/16 W/16 2F
        Y29bn_left, update_ema29_left = batchnorm(Y29_left, tst, B29)
        # Y29bn_left, _= batchnorm(Y29_left, tst, B29)
        Y29relu_left = tf.nn.relu(Y29bn_left)

        Y29_right = conv3d(Y28relu_right, W29, stride=1)
        Y29bn_right, update_ema29_right = batchnorm(Y29_right, tst, B29)
        # Y29bn_right, _= batchnorm(Y29_right, tst, B29)
        Y29relu_right = tf.nn.relu(Y29bn_right)

    with tf.name_scope('Conv3d30'):
        with tf.variable_scope('params', reuse=SHARE):
            W30 = weight_variable((3, 3, 3, 2 * F, 4 * F))
            B30 = bias_variable((4 * F, ))
        Y30_left = conv3d(Y27relu_left, W30,
                          stride=2)  ###batch D/32 H/32 W/32 4F
        Y30bn_left, update_ema30_left = batchnorm(Y30_left, tst, B30)
        # Y30bn_left, _= batchnorm(Y30_left, tst, B30)
        Y30relu_left = tf.nn.relu(Y30bn_left)

        Y30_right = conv3d(Y27relu_right, W30, stride=2)
        Y30bn_right, update_ema30_right = batchnorm(Y30_left, tst, B30)
        # Y30bn_right, _= batchnorm(Y30_left, tst, B30)
        Y30relu_right = tf.nn.relu(Y30bn_right)

    with tf.name_scope('Conv3d31'):
        with tf.variable_scope('params', reuse=SHARE):
            W31 = weight_variable((3, 3, 3, 4 * F, 4 * F))
            B31 = bias_variable((4 * F, ))
        Y31_left = conv3d(Y30relu_left, W31,
                          stride=1)  ###batch D/32 H/32 W/32 4F
        Y31bn_left, update_ema31_left = batchnorm(Y31_left, tst, B31)
        # Y31bn_left, _= batchnorm(Y31_left, tst, B31)
        Y31relu_left = tf.nn.relu(Y31bn_left)

        Y31_right = conv3d(Y30relu_right, W31, stride=1)
        Y31bn_right, update_ema31_right = batchnorm(Y31_right, tst, B31)
        # Y31bn_right, _= batchnorm(Y31_right, tst, B31)
        Y31relu_right = tf.nn.relu(Y31bn_right)

    with tf.name_scope('Conv3d32'):
        with tf.variable_scope('params', reuse=SHARE):
            W32 = weight_variable((3, 3, 3, 4 * F, 4 * F))
            B32 = bias_variable((4 * F, ))
        Y32_left = conv3d(Y31relu_left, W32,
                          stride=1)  ###batch D/32 H/32 W/32 4F
        Y32bn_left, update_ema32_left = batchnorm(Y32_left, tst, B32)
        # Y32bn_left, _= batchnorm(Y32_left, tst, B32)
        Y32relu_left = tf.nn.relu(Y32bn_left)

        Y32_right = conv3d(Y31relu_right, W32, stride=1)
        Y32bn_right, update_ema32_right = batchnorm(Y32_right, tst, B32)
        # Y32bn_right, _= batchnorm(Y32_right, tst, B32)
        Y32relu_right = tf.nn.relu(Y32bn_right)

    with tf.name_scope('Conv3d33'):
        with tf.variable_scope('params', reuse=SHARE):
            W33 = weight_variable((3, 3, 3, 2 * F, 4 * F))
            B33 = bias_variable((2 * F, ))
        output33shape = [batch_size, D // 16, H // 16, W // 16, 2 * F]
        _Y33_left = conv3dt(Y32relu_left,
                            W33,
                            outputshape=output33shape,
                            stride=2)  ###batch D/16 H/16 W/16 2F
        _Y33bn_left, update_ema33_left = batchnorm(_Y33_left, tst, B33)
        # _Y33bn_left, _= batchnorm(_Y33_left, tst, B33)
        _Y33relu_left = tf.nn.relu(_Y33bn_left)

        _Y33_right = conv3dt(Y32relu_right,
                             W33,
                             outputshape=output33shape,
                             stride=2)
        _Y33bn_right, update_ema33_right = batchnorm(_Y33_right, tst, B33)
        # _Y33bn_right, _= batchnorm(_Y33_right, tst, B33)
        _Y33relu_right = tf.nn.relu(_Y33bn_right)

        Y33relu_left = _Y33relu_left + Y29relu_left
        Y33relu_right = _Y33relu_right + Y29relu_right

    with tf.name_scope('Conv3d34'):
        with tf.variable_scope('params', reuse=SHARE):
            W34 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B34 = bias_variable((2 * F, ))
        output34shape = [batch_size, D // 8, H // 8, W // 8, 2 * F]
        _Y34_left = conv3dt(Y33relu_left,
                            W34,
                            outputshape=output34shape,
                            stride=2)  ###batch D/8 H/8 W/8 2F
        _Y34bn_left, update_ema34_left = batchnorm(_Y34_left, tst, B34)
        # _Y34bn_left, _= batchnorm(_Y34_left, tst, B34)
        _Y34relu_left = tf.nn.relu(_Y34bn_left)

        _Y34_right = conv3dt(Y33relu_right,
                             W34,
                             outputshape=output34shape,
                             stride=2)
        _Y34bn_right, update_ema34_right = batchnorm(_Y34_right, tst, B34)
        # _Y34bn_right, _= batchnorm(_Y34_right, tst, B34)
        _Y34relu_right = tf.nn.relu(_Y34bn_right)

        Y34relu_left = _Y34relu_left + Y26relu_left
        Y34relu_right = _Y34relu_right + Y26relu_right

    with tf.name_scope('Conv3d35'):
        with tf.variable_scope('params', reuse=SHARE):
            W35 = weight_variable((3, 3, 3, 2 * F, 2 * F))
            B35 = bias_variable((2 * F, ))
        output35shape = [batch_size, D // 4, H // 4, W // 4, 2 * F]
        _Y35_left = conv3dt(Y34relu_left,
                            W35,
                            outputshape=output35shape,
                            stride=2)  ###batch D/4 H/4 W/4 2F
        _Y35bn_left, update_ema35_left = batchnorm(_Y35_left, tst, B35)
        # _Y35bn_left, _= batchnorm(_Y35_left, tst, B35)
        _Y35relu_left = tf.nn.relu(_Y35bn_left)

        _Y35_right = conv3dt(Y34relu_right,
                             W35,
                             outputshape=output35shape,
                             stride=2)
        _Y35bn_right, update_ema35_right = batchnorm(_Y35_right, tst, B35)
        # _Y35bn_right, _= batchnorm(_Y35_right, tst, B35)
        _Y35relu_right = tf.nn.relu(_Y35bn_right)

        Y35relu_left = _Y35relu_left + Y23relu_left
        Y35relu_right = _Y35relu_right + Y23relu_right

    with tf.name_scope('Conv3d36'):
        with tf.variable_scope('params', reuse=SHARE):
            W36 = weight_variable((3, 3, 3, F, 2 * F))
            B36 = bias_variable((F, ))
        output36shape = [batch_size, D // 2, H // 2, W // 2, F]
        _Y36_left = conv3dt(Y35relu_left,
                            W36,
                            outputshape=output36shape,
                            stride=2)  ###batch D/2 H/2 W/2 F
        _Y36bn_left, update_ema36_left = batchnorm(_Y36_left, tst, B36)
        # _Y36bn_left, _= batchnorm(_Y36_left, tst, B36)
        _Y36relu_left = tf.nn.relu(_Y36bn_left)

        _Y36_right = conv3dt(Y35relu_right,
                             W36,
                             outputshape=output36shape,
                             stride=2)
        _Y36bn_right, update_ema36_right = batchnorm(_Y36_right, tst, B36)
        # _Y36bn_right, _= batchnorm(_Y36_right, tst, B36)
        _Y36relu_right = tf.nn.relu(_Y36bn_right)

        Y36relu_left = _Y36relu_left + Y20relu_left
        Y36relu_right = _Y36relu_right + Y20relu_right

    update_lr_left = tf.group(
        update_ema19_left, update_ema20_left, update_ema21_left,
        update_ema22_left, update_ema23_left, update_ema24_left,
        update_ema25_left, update_ema26_left, update_ema27_left,
        update_ema28_left, update_ema29_left, update_ema30_left,
        update_ema31_left, update_ema32_left, update_ema33_left,
        update_ema34_left, update_ema35_left, update_ema36_left)

    update_lr_right = tf.group(
        update_ema19_right, update_ema20_right, update_ema21_right,
        update_ema22_right, update_ema23_right, update_ema24_right,
        update_ema25_right, update_ema26_right, update_ema27_right,
        update_ema28_right, update_ema29_right, update_ema30_right,
        update_ema31_right, update_ema32_right, update_ema33_right,
        update_ema34_right, update_ema35_right, update_ema36_right)

    with tf.name_scope('Conv3d37'):
        with tf.variable_scope('params', reuse=SHARE):
            W37 = weight_variable((3, 3, 3, 1, F))
        output37shape = [batch_size, D, H, W, 1]
        Y37_left = conv3dt(Y36relu_left,
                           W37,
                           outputshape=output37shape,
                           stride=2)
        Y37_right = conv3dt(Y36relu_right,
                            W37,
                            outputshape=output37shape,
                            stride=2)

    return Y37_left, Y37_right, update_lr_left, update_lr_right
Esempio n. 5
0
    def _create_network(self,
                        input_batch,
                        keep_prob,
                        num_classes,
                        use_fuse=False):
        """Construct DeepLab-LargeFOV network.
        
        Args:
          input_batch: batch of pre-processed images.
          keep_prob: probability of keeping neurons intact.
          
        Returns:
          A downsampled segmentation mask. 
        """
        current = input_batch

        v_idx = 0  # Index variable.

        # Last block is the classification layer.
        for b_idx in range(len(dilations) - 1):
            for l_idx, dilation in enumerate(dilations[b_idx]):
                w = self.variables[v_idx * 2]
                b = self.variables[v_idx * 2 + 1]
                if dilation == 1:
                    conv = tf.nn.conv2d(current,
                                        w,
                                        strides=[1, 1, 1, 1],
                                        padding='SAME')
                else:
                    conv = tf.nn.atrous_conv2d(current,
                                               w,
                                               dilation,
                                               padding='SAME')
                conv = tf.nn.bias_add(conv, b)
                if b_idx < 5:
                    conv = batchnorm(conv,
                                     scope='conv{:d}_batchnorm{:d}'.format(
                                         b_idx + 1, l_idx + 1))

                current = tf.nn.relu(conv)
                v_idx += 1
            # Optional pooling and dropout after each block.
            if b_idx < 3:
                if b_idx == 1:
                    block_1 = current
                if b_idx == 2:
                    block_2 = current
                current = tf.nn.max_pool(current,
                                         ksize=[1, ks, ks, 1],
                                         strides=[1, 2, 2, 1],
                                         padding='SAME')
            elif b_idx == 3:
                block_3 = current
                current = tf.nn.max_pool(current,
                                         ksize=[1, ks, ks, 1],
                                         strides=[1, 1, 1, 1],
                                         padding='SAME')
            elif b_idx == 4:
                current = tf.nn.max_pool(current,
                                         ksize=[1, ks, ks, 1],
                                         strides=[1, 1, 1, 1],
                                         padding='SAME')
                current = tf.nn.avg_pool(current,
                                         ksize=[1, ks, ks, 1],
                                         strides=[1, 1, 1, 1],
                                         padding='SAME')
            elif b_idx <= 6:
                current = tf.nn.dropout(current, keep_prob=keep_prob)

        block_1 = tf.image.resize_bilinear(block_1,
                                           input_batch.get_shape()[1:3])
        block_2 = tf.image.resize_bilinear(block_2,
                                           input_batch.get_shape()[1:3])
        block_3 = tf.image.resize_bilinear(block_3,
                                           input_batch.get_shape()[1:3])

        current_final = tf.image.resize_bilinear(current,
                                                 input_batch.get_shape()[1:3])
        current_final = tf.concat([block_1, block_2, current_final], axis=-1)
        if use_fuse:
            edgenet = utils_conv(tf.concat([block_1, block_2, block_3],
                                           axis=-1),
                                 num_classes,
                                 stride=1,
                                 padding='SAME',
                                 name='fc8_voc12_edge')
            #edgenet=utils_conv(current_final, num_classes, stride=1, padding='SAME', name='fc8_voc12_edge')
            segnet = utils_conv(current,
                                num_classes,
                                stride=1,
                                padding='SAME',
                                name='fc8_voc12')
            current = tf.image.resize_bilinear(segnet,
                                               edgenet.get_shape()[1:3])
            current = tf.concat([current, edgenet], axis=-1)
            current = utils_conv(current,
                                 num_classes,
                                 stride=1,
                                 padding='SAME',
                                 name='fc9_voc12')
            return current, edgenet, segnet
        else:
            current = utils_conv(current,
                                 num_classes,
                                 stride=1,
                                 padding='SAME',
                                 name='fc8_voc12')

            return current
Esempio n. 6
0
def UnaryFeatures(left_image, right_image, tst, F=32, SHARE=None):

    with tf.name_scope('Conv1'):
        with tf.variable_scope('params', reuse=SHARE):
            W1 = weight_variable((5, 5, 1, F))
            B1 = bias_variable((F, ))
        Y1_left = conv2d(left_image, W1, stride=2)
        Y1_right = conv2d(right_image, W1, stride=2)
        Y1bn_left, update_ema1_left = batchnorm(Y1_left,
                                                tst,
                                                B1,
                                                convolutional=True)
        Y1bn_right, update_ema1_right = batchnorm(Y1_right,
                                                  tst,
                                                  B1,
                                                  convolutional=True)
        # Y1bn_left, _= batchnorm(Y1_left, tst, B1, convolutional=True)
        # Y1bn_right, _= batchnorm(Y1_right, tst, B1, convolutional=True)
        Y1relu_left = tf.nn.relu(Y1bn_left)
        Y1relu_right = tf.nn.relu(Y1bn_right)
        # print('Output/Conv1->',Y1r_left.shape, Y1r_right.shape)

    with tf.name_scope('Conv2'):
        with tf.variable_scope('params', reuse=SHARE):
            W2 = weight_variable((3, 3, F, F))
            B2 = bias_variable((F, ))
        Y2_left = conv2d(Y1relu_left, W2, stride=1)
        Y2_right = conv2d(Y1relu_right, W2, stride=1)
        Y2bn_left, update_ema2_left = batchnorm(Y2_left,
                                                tst,
                                                B2,
                                                convolutional=True)
        Y2bn_right, update_ema2_right = batchnorm(Y2_right,
                                                  tst,
                                                  B2,
                                                  convolutional=True)
        # Y2bn_left, _= batchnorm(Y2_left, tst, B2, convolutional=True)
        # Y2bn_right, _= batchnorm(Y2_right, tst, B2, convolutional=True)
        Y2relu_left = tf.nn.relu(Y2bn_left)
        Y2relu_right = tf.nn.relu(Y2bn_right)
        # print('Output/Conv2->',Y2r_left.shape, Y2r_right.shape)

    with tf.name_scope('Conv3'):
        with tf.variable_scope('params', reuse=SHARE):
            W3 = weight_variable((3, 3, F, F))
            B3 = bias_variable((F, ))
        Y3_left = conv2d(Y2relu_left, W3, stride=1)
        Y3_right = conv2d(Y2relu_right, W3, stride=1)
        Y3bn_left, update_ema3_left = batchnorm(Y3_left + Y1_left,
                                                tst,
                                                B3,
                                                convolutional=True)
        Y3bn_right, update_ema3_right = batchnorm(Y3_right + Y1_right,
                                                  tst,
                                                  B3,
                                                  convolutional=True)
        # Y3bn_left, _= batchnorm(Y3_left + Y1_left, tst, B3, convolutional=True)
        # Y3bn_right, _= batchnorm(Y3_right + Y1_right, tst, B3, convolutional=True)
        Y3relu_left = tf.nn.relu(Y3bn_left)
        Y3relu_right = tf.nn.relu(Y3bn_right)
        # print('Output/Conv3->', Y3r_left.shape, Y3r_right.shape)

    with tf.name_scope('Conv4'):
        with tf.variable_scope('params', reuse=SHARE):
            W4 = weight_variable((3, 3, F, F))
            B4 = bias_variable((F, ))
        Y4_left = conv2d(Y3relu_left, W4, stride=1)
        Y4_right = conv2d(Y3relu_right, W4, stride=1)
        Y4bn_left, update_ema4_left = batchnorm(Y4_left,
                                                tst,
                                                B4,
                                                convolutional=True)
        Y4bn_right, update_ema4_right = batchnorm(Y4_right,
                                                  tst,
                                                  B4,
                                                  convolutional=True)
        # Y4bn_left, _= batchnorm(Y4_left, tst, B4, convolutional=True)
        # Y4bn_right, _= batchnorm(Y4_right, tst, B4, convolutional=True)
        Y4relu_left = tf.nn.relu(Y4bn_left)
        Y4relu_right = tf.nn.relu(Y4bn_right)

    with tf.name_scope('Conv5'):
        with tf.variable_scope('params', reuse=SHARE):
            W5 = weight_variable((3, 3, F, F))
            B5 = bias_variable((F, ))
        Y5_left = conv2d(Y4relu_left, W5, stride=1)
        Y5_right = conv2d(Y4relu_right, W5, stride=1)
        Y5bn_left, update_ema5_left = batchnorm(Y5_left + Y3_left + Y1_left,
                                                tst,
                                                B5,
                                                convolutional=True)
        Y5bn_right, update_ema5_right = batchnorm(Y5_right + Y3_right +
                                                  Y1_right,
                                                  tst,
                                                  B5,
                                                  convolutional=True)
        # Y5bn_left, _= batchnorm(Y5_left + Y3_left + Y1_left, tst, B5, convolutional=True)
        # Y5bn_right, _= batchnorm(Y5_right + Y3_right + Y1_right, tst, B5, convolutional=True)
        Y5relu_left = tf.nn.relu(Y5bn_left)
        Y5relu_right = tf.nn.relu(Y5bn_right)

    with tf.name_scope('Conv6'):
        with tf.variable_scope('params', reuse=SHARE):
            W6 = weight_variable((3, 3, F, F))
            B6 = bias_variable((F, ))
        Y6_left = conv2d(Y5relu_left, W6, stride=1)
        Y6_right = conv2d(Y5relu_right, W6, stride=1)
        Y6bn_left, update_ema6_left = batchnorm(Y6_left,
                                                tst,
                                                B6,
                                                convolutional=True)
        Y6bn_right, update_ema6_right = batchnorm(Y6_right,
                                                  tst,
                                                  B6,
                                                  convolutional=True)
        # Y6bn_left, _= batchnorm(Y6_left, tst, B6, convolutional=True)
        # Y6bn_right, _= batchnorm(Y6_right, tst, B6, convolutional=True)
        Y6relu_left = tf.nn.relu(Y6bn_left)
        Y6relu_right = tf.nn.relu(Y6bn_right)

    with tf.name_scope('Conv7'):
        with tf.variable_scope('params', reuse=SHARE):
            W7 = weight_variable((3, 3, F, F))
            B7 = bias_variable((F, ))
        Y7_left = conv2d(Y6relu_left, W7, stride=1)
        Y7_right = conv2d(Y6relu_right, W7, stride=1)
        Y7bn_left, update_ema7_left = batchnorm(Y7_left + Y5_left + Y3_left +
                                                Y1_left,
                                                tst,
                                                B7,
                                                convolutional=True)
        Y7bn_right, update_ema7_right = batchnorm(Y7_right + Y5_right +
                                                  Y3_right + Y1_right,
                                                  tst,
                                                  B7,
                                                  convolutional=True)
        # Y7bn_left, _= batchnorm(Y7_left + Y5_left + Y3_left + Y1_left, tst, B7, convolutional=True)
        # Y7bn_right, _= batchnorm(Y7_right + Y5_right + Y3_right + Y1_right, tst, B7,
        #                                           convolutional=True)

        Y7relu_left = tf.nn.relu(Y7bn_left)
        Y7relu_right = tf.nn.relu(Y7bn_right)

    with tf.name_scope('Conv8'):
        with tf.variable_scope('params', reuse=SHARE):
            W8 = weight_variable((3, 3, F, F))
            B8 = bias_variable((F, ))
        Y8_left = conv2d(Y7relu_left, W7, stride=1)
        Y8_right = conv2d(Y7relu_right, W7, stride=1)
        Y8bn_left, update_ema8_left = batchnorm(Y8_left,
                                                tst,
                                                B8,
                                                convolutional=True)
        Y8bn_right, update_ema8_right = batchnorm(Y8_right,
                                                  tst,
                                                  B8,
                                                  convolutional=True)
        # Y8bn_left, _= batchnorm(Y8_left, tst, B8, convolutional=True)
        # Y8bn_right, _= batchnorm(Y8_right, tst, B8, convolutional=True)
        Y8relu_left = tf.nn.relu(Y8bn_left)
        Y8relu_right = tf.nn.relu(Y8bn_right)

    with tf.name_scope('Conv9'):
        with tf.variable_scope('params', reuse=SHARE):
            W9 = weight_variable((3, 3, F, F))
            B9 = bias_variable((F, ))
        Y9_left = conv2d(Y8relu_left, W9, stride=1)
        Y9_right = conv2d(Y8relu_right, W9, stride=1)
        Y9bn_left, update_ema9_left = batchnorm(Y9_left + Y7_left + Y5_left +
                                                Y3_left + Y1_left,
                                                tst,
                                                B9,
                                                convolutional=True)
        Y9bn_right, update_ema9_right = batchnorm(
            Y9_right + Y7_right + Y5_right + Y3_right + Y1_right,
            tst,
            B9,
            convolutional=True)
        # Y9bn_left, _= batchnorm(Y9_left + Y7_left + Y5_left + Y3_left + Y1_left,
        #                                         tst, B9, convolutional=True)
        # Y9bn_right, _= batchnorm(Y9_right + Y7_right + Y5_right + Y3_right + Y1_right,
        #                                           tst, B9, convolutional=True)
        Y9relu_left = tf.nn.relu(Y9bn_left)
        Y9relu_right = tf.nn.relu(Y9bn_right)

    with tf.name_scope('Conv10'):
        with tf.variable_scope('params', reuse=SHARE):
            W10 = weight_variable((3, 3, F, F))
            B10 = bias_variable((F, ))
        Y10_left = conv2d(Y9relu_left, W10, stride=1)
        Y10_right = conv2d(Y9relu_right, W10, stride=1)
        Y10bn_left, update_ema10_left = batchnorm(Y10_left,
                                                  tst,
                                                  B10,
                                                  convolutional=True)
        Y10bn_right, update_ema10_right = batchnorm(Y10_right,
                                                    tst,
                                                    B10,
                                                    convolutional=True)
        # Y10bn_left, _= batchnorm(Y10_left, tst, B10, convolutional=True)
        # Y10bn_right, _= batchnorm(Y10_right, tst, B10, convolutional=True)
        Y10relu_left = tf.nn.relu(Y10bn_left)
        Y10relu_right = tf.nn.relu(Y10bn_right)

    with tf.name_scope('Conv11'):
        with tf.variable_scope('params', reuse=SHARE):
            W11 = weight_variable((3, 3, F, F))
            B11 = bias_variable((F, ))
        Y11_left = conv2d(Y10relu_left, W11, stride=1)
        Y11_right = conv2d(Y10relu_right, W11, stride=1)
        Y11bn_left, update_ema11_left = batchnorm(
            Y11_left + Y9_left + Y7_left + Y5_left + Y3_left + Y1_left,
            tst,
            B11,
            convolutional=True)
        Y11bn_right, update_ema11_right = batchnorm(
            Y11_right + Y9_right + Y7_right + Y5_right + Y3_right + Y1_right,
            tst,
            B11,
            convolutional=True)
        # Y11bn_left, _= batchnorm(Y11_left + Y9_left + Y7_left + Y5_left + Y3_left + Y1_left,
        #                                           tst, B11, convolutional=True)
        # Y11bn_right, _= batchnorm(Y11_right + Y9_right + Y7_right + Y5_right + Y3_right + Y1_right, tst, B11, convolutional=True)
        Y11relu_left = tf.nn.relu(Y11bn_left)
        Y11relu_right = tf.nn.relu(Y11bn_right)

    with tf.name_scope('Conv12'):
        with tf.variable_scope('params', reuse=SHARE):
            W12 = weight_variable((3, 3, F, F))
            B12 = bias_variable((F, ))
        Y12_left = conv2d(Y11relu_left, W12, stride=1)
        Y12_right = conv2d(Y11relu_right, W12, stride=1)
        Y12bn_left, update_ema12_left = batchnorm(Y12_left,
                                                  tst,
                                                  B12,
                                                  convolutional=True)
        Y12bn_right, update_ema12_right = batchnorm(Y12_right,
                                                    tst,
                                                    B12,
                                                    convolutional=True)
        # Y12bn_left, _= batchnorm(Y12_left, tst, B12, convolutional=True)
        # Y12bn_right, _= batchnorm(Y12_right, tst, B12, convolutional=True)
        Y12relu_left = tf.nn.relu(Y12bn_left)
        Y12relu_right = tf.nn.relu(Y12bn_right)

    with tf.name_scope('Conv13'):
        with tf.variable_scope('params', reuse=SHARE):
            W13 = weight_variable((3, 3, F, F))
            B13 = bias_variable((F, ))
        Y13_left = conv2d(Y12relu_left, W13, stride=1)
        Y13_right = conv2d(Y12relu_right, W13, stride=1)
        Y13bn_left, update_ema13_left = batchnorm(
            Y13_left + Y11_left + Y9_left + Y7_left + Y5_left + Y3_left +
            Y1_left,
            tst,
            B13,
            convolutional=True)
        Y13bn_right, update_ema13_right = batchnorm(
            Y13_right + Y11_right + Y9_right + Y7_right + Y5_right + Y3_right +
            Y1_right,
            tst,
            B13,
            convolutional=True)
        # Y13bn_left, _= batchnorm(
        #     Y13_left + Y11_left + Y9_left + Y7_left + Y5_left + Y3_left + Y1_left,
        #     tst, B13, convolutional=True)
        # Y13bn_right, _= batchnorm(
        #     Y13_right + Y11_right + Y9_right + Y7_right + Y5_right + Y3_right + Y1_right,
        #     tst, B13, convolutional=True)
        Y13relu_left = tf.nn.relu(Y13bn_left)
        Y13relu_right = tf.nn.relu(Y13bn_right)

    with tf.name_scope('Conv14'):
        with tf.variable_scope('params', reuse=SHARE):
            W14 = weight_variable((3, 3, F, F))
            B14 = bias_variable((F, ))
        Y14_left = conv2d(Y13relu_left, W14, stride=1)
        Y14_right = conv2d(Y13relu_right, W14, stride=1)
        Y14bn_left, update_ema14_left = batchnorm(Y14_left,
                                                  tst,
                                                  B14,
                                                  convolutional=True)
        Y14bn_right, update_ema14_right = batchnorm(Y14_right,
                                                    tst,
                                                    B14,
                                                    convolutional=True)
        # Y14bn_left, _= batchnorm(Y14_left, tst, B14, convolutional=True)
        # Y14bn_right, _= batchnorm(Y14_right, tst, B14, convolutional=True)
        Y14relu_left = tf.nn.relu(Y14bn_left)
        Y14relu_right = tf.nn.relu(Y14bn_right)

    with tf.name_scope('Conv15'):
        with tf.variable_scope('params', reuse=SHARE):
            W15 = weight_variable((3, 3, F, F))
            B15 = bias_variable((F, ))
        Y15_left = conv2d(Y14relu_left, W15, stride=1)
        Y15_right = conv2d(Y14relu_right, W15, stride=1)
        Y15bn_left, update_ema15_left = batchnorm(
            Y15_left + Y13_left + Y11_left + Y9_left + Y7_left + Y5_left +
            Y3_left + Y1_left,
            tst,
            B15,
            convolutional=True)
        Y15bn_right, update_ema15_right = batchnorm(
            Y15_right + Y13_right + Y11_right + Y9_right + Y7_right +
            Y5_right + Y3_right + Y1_right,
            tst,
            B15,
            convolutional=True)
        # Y15bn_left, _= batchnorm(
        #     Y15_left + Y13_left + Y11_left + Y9_left + Y7_left + Y5_left + Y3_left + Y1_left,
        #     tst, B15, convolutional=True)
        # Y15bn_right, _= batchnorm(
        #     Y15_right + Y13_right + Y11_right + Y9_right + Y7_right + Y5_right + Y3_right + Y1_right,
        #     tst, B15, convolutional=True)
        Y15relu_left = tf.nn.relu(Y15bn_left)
        Y15relu_right = tf.nn.relu(Y15bn_right)

    with tf.name_scope('Conv16'):
        with tf.variable_scope('params', reuse=SHARE):
            W16 = weight_variable((3, 3, F, F))
            B16 = bias_variable((F, ))
        Y16_left = conv2d(Y15relu_left, W16, stride=1)
        Y16_right = conv2d(Y15relu_right, W16, stride=1)
        Y16bn_left, update_ema16_left = batchnorm(Y16_left,
                                                  tst,
                                                  B16,
                                                  convolutional=True)
        Y16bn_right, update_ema16_right = batchnorm(Y16_right,
                                                    tst,
                                                    B16,
                                                    convolutional=True)
        # Y16bn_left, _= batchnorm(Y16_left, tst, B16, convolutional=True)
        # Y16bn_right, _= batchnorm(Y16_right, tst, B16, convolutional=True)
        Y16relu_left = tf.nn.relu(Y16bn_left)
        Y16relu_right = tf.nn.relu(Y16bn_right)

    with tf.name_scope('Conv17'):
        with tf.variable_scope('params', reuse=SHARE):
            W17 = weight_variable((3, 3, F, F))
            B17 = bias_variable((F, ))
        Y17_left = conv2d(Y16relu_left, W17, stride=1)
        Y17_right = conv2d(Y16relu_right, W17, stride=1)
        Y17bn_left, update_ema17_left = batchnorm(
            Y17_left + Y15_left + Y13_left + Y11_left + Y9_left + Y7_left +
            Y5_left + Y3_left + Y1_left,
            tst,
            B17,
            convolutional=True)
        Y17bn_right, update_ema17_right = batchnorm(
            Y17_right + Y15_right + Y13_right + Y11_right + Y9_right +
            Y7_right + Y5_right + Y3_right + Y1_right,
            tst,
            B17,
            convolutional=True)
        # Y17bn_left, _= batchnorm(
        #     Y17_left + Y15_left + Y13_left + Y11_left + Y9_left + Y7_left + Y5_left + Y3_left + Y1_left,
        #     tst, B17, convolutional=True
        # )
        # Y17bn_right, _= batchnorm(
        #     Y17_right + Y15_right + Y13_right + Y11_right + Y9_right + Y7_right + Y5_right + Y3_right + Y1_right,
        #     tst, B17, convolutional=True
        # )
        Y17relu_left = tf.nn.relu(Y17bn_left)
        Y17relu_right = tf.nn.relu(Y17bn_right)

    with tf.name_scope('Conv18'):
        with tf.variable_scope('params', reuse=SHARE):
            W18 = weight_variable((3, 3, F, F))
            B18 = bias_variable((F, ))
        Y18_left = conv2d(Y17relu_left, W18, stride=1)
        Y18_right = conv2d(Y17relu_right, W18, stride=1)

    update_uf_left = tf.group(
        update_ema1_left, update_ema2_left, update_ema3_left, update_ema4_left,
        update_ema5_left, update_ema6_left, update_ema7_left, update_ema8_left,
        update_ema9_left, update_ema10_left, update_ema11_left,
        update_ema12_left, update_ema13_left, update_ema14_left,
        update_ema15_left, update_ema16_left, update_ema17_left)

    update_uf_right = tf.group(
        update_ema1_right, update_ema2_right, update_ema3_right,
        update_ema4_right, update_ema5_right, update_ema6_right,
        update_ema7_right, update_ema8_right, update_ema9_right,
        update_ema10_right, update_ema11_right, update_ema12_right,
        update_ema13_right, update_ema14_right, update_ema15_right,
        update_ema16_right, update_ema17_right)

    return Y18_left, Y18_right, update_uf_left, update_uf_right
Esempio n. 7
0
def disc_net(tensor):
    # five convolutional layers with their channel counts
    K = 64  # first convolutional layer output depth
    L = 128  # second convolutional layer output depth
    M = 256  # third convolutional layer
    N = 512  # fourth convolutional layer
    O = 2  # fifth layer (binary classifier)

    W1 = tf.Variable(tf.truncated_normal(
        [2, 2, 2, 3, K],
        stddev=0.1))  # 4x4 patch, 100 input channels, K output channels

    W2 = tf.Variable(tf.truncated_normal([4, 4, 4, K, L], stddev=0.1))
    B2 = tf.Variable(tf.constant(0.1, tf.float32, [L]))
    W3 = tf.Variable(tf.truncated_normal([4, 4, 4, L, M], stddev=0.1))
    B3 = tf.Variable(tf.constant(0.1, tf.float32, [M]))
    W4 = tf.Variable(tf.truncated_normal([4, 4, 4, M, N], stddev=0.1))
    B4 = tf.Variable(tf.constant(0.1, tf.float32, [N]))
    W5 = tf.Variable(tf.truncated_normal([2, 4, 4, N, O], stddev=0.1))
    B5 = tf.Variable(tf.constant(0.1, tf.float32, [O]))

    #set dropout = True
    dropout = tf.constant(True)
    #Layer 1
    Y1l = tf.nn.conv3d(tensor, W1, strides=[1, 2, 2, 2, 1], padding='SAME')
    Y1r = utils.lrelu(Y1l, leak=0.2)

    #layer 2
    Y2l = tf.nn.conv3d(Y1r, W2, strides=[1, 2, 2, 2, 1], padding='SAME')
    Y2bn, update_ema2 = utils.batchnorm(Y2l,
                                        dropout,
                                        iter,
                                        B2,
                                        convolutional=True)
    Y2r = utils.lrelu(Y2bn, leak=0.2)

    #layer 3
    Y3l = tf.nn.conv3d(Y2r, W3, strides=[1, 2, 2, 2, 1], padding='SAME')
    Y3bn, update_ema3 = utils.batchnorm(Y3l,
                                        dropout,
                                        iter,
                                        B3,
                                        convolutional=True)
    Y3r = utils.lrelu(Y3bn, 0.2)

    #layer 4
    Y4l = tf.nn.conv3d(Y3r, W4, strides=[1, 2, 2, 2, 1], padding='SAME')
    Y4bn, update_ema3 = utils.batchnorm(Y4l,
                                        dropout,
                                        iter,
                                        B4,
                                        convolutional=True)
    Y4r = utils.lrelu(Y4bn, 0.2)

    #layer 4
    Y5l = tf.nn.conv3d(Y4r, W5, strides=[1, 1, 1, 1, 1], padding='VALID')
    Y5bn, update_ema3 = utils.batchnorm(Y5l,
                                        dropout,
                                        iter,
                                        B5,
                                        convolutional=True)

    #h0 = lrelu(tf.layers.conv3d(inputs = tensor, filters = 64, kernel_size = [4,4,4], strides = [2,2,2], padding = "SAME"), leak = 0.2)
    #h1 = lrelu(tf.contrib.layers.batch_norm(tf.layers.conv3d(inputs = h0, filters = 128, kernel_size = [4,4,4], strides = [2,2,2], padding = "SAME") , decay = .0001), leak = 0.2)
    #h2 = lrelu(tf.contrib.layers.batch_norm(tf.layers.conv3d(inputs = h1, filters = 256, kernel_size = [4,4,4], strides = [2,2,2], padding = "SAME") , decay = .0001), leak = 0.2)
    #h3 = lrelu(tf.contrib.layers.batch_norm(tf.layers.conv3d(inputs = h2, filters = 512, kernel_size = [4,4,4], strides = [2,2,2], padding = "SAME") , decay = .0001), leak = 0.2)
    #h4 = tf.layers.conv3d(inputs = h3, filters = 2, kernel_size = [2,4,4], strides = [1,1,1], padding = "VALID")
    return tf.Print(Y5bn, [Y5bn])
Esempio n. 8
0
def Model(data, dom_l, cls_l, keep=0.5, reuse=None):
    with tf.variable_scope("feature_extractor") as scope_f:

        # List to store the output of each CNN path
        filter_output = []

        # CNNs with small filter size at the first layer
        ws1, bs1 = ut.get_params(name="sconv1", shape=[1, st.fs / 2, 1, 64])
        sconv1 = ut.conv(input=data, w=ws1, b=bs1, stride=st.fs / 16)
        sconv1 = ut.relu(ut.batchnorm(sconv1))
        sconv1 = tf.nn.max_pool(sconv1,
                                ksize=[1, 1, 8, 1],
                                strides=[1, 1, 8, 1],
                                padding="SAME")
        sconv1 = ut.dropout(sconv1, keep_prob=keep)

        # CNNs with small filter size at the second layer
        ws2, bs2 = ut.get_params(name="sconv2", shape=[1, 8, 64, 128])
        sconv2 = ut.conv(input=sconv1, w=ws2, b=bs2, stride=1)
        sconv2 = ut.relu(ut.batchnorm(sconv2))

        # CNNs with small filter size at the third layer
        ws3, bs3 = ut.get_params(name="sconv3", shape=[1, 8, 128, 128])
        sconv3 = ut.conv(input=sconv2, w=ws3, b=bs3, stride=1)
        sconv3 = ut.relu(ut.batchnorm(sconv3))

        # CNNs with small filter size at the fourth layer
        ws4, bs4 = ut.get_params(name="sconv4", shape=[1, 8, 128, 128])
        sconv4 = ut.conv(input=sconv3, w=ws4, b=bs4, stride=1)
        sconv4 = ut.relu(ut.batchnorm(sconv4))
        sconv4 = tf.nn.max_pool(sconv4,
                                ksize=[1, 1, 4, 1],
                                strides=[1, 1, 4, 1],
                                padding="SAME")

        # Spatial Convolution
        wssp, bssp = ut.get_params(name="sspconv", shape=[22, 1, 128, 128])
        sspconv = tf.nn.bias_add(
            tf.nn.conv2d(input=sconv4,
                         filter=wssp,
                         strides=[1, 1, 1, 1],
                         padding="VALID"), bssp)
        sspconv = ut.relu(ut.batchnorm(sspconv))

        sf = tf.reshape(sspconv, shape=(-1, 1024))

        ###################################################
        # CNNs with large filter size at the first layer
        wl1, bl1 = ut.get_params(name="lconv1", shape=[1, st.fs * 2, 1, 64])
        lconv1 = ut.conv(input=data, w=wl1, b=bl1, stride=st.fs / 2)
        lconv1 = ut.relu(ut.batchnorm(lconv1))
        lconv1 = tf.nn.max_pool(lconv1,
                                ksize=[1, 1, 4, 1],
                                strides=[1, 1, 4, 1],
                                padding="SAME")
        lconv1 = ut.dropout(lconv1, keep_prob=keep)

        # CNNs with large filter size at the second layer
        wl2, bl2 = ut.get_params(name="lconv2", shape=[1, 6, 64, 128])
        lconv2 = ut.conv(input=lconv1, w=wl2, b=bl2, stride=1)
        lconv2 = ut.relu(ut.batchnorm(lconv2))

        # CNNs with large filter size at the third layer
        wl3, bl3 = ut.get_params(name="lconv3", shape=[1, 6, 128, 128])
        lconv3 = ut.conv(input=lconv2, w=wl3, b=bl3, stride=1)
        lconv3 = ut.relu(ut.batchnorm(lconv3))

        # CNNs with large filter size at the fourth layer
        wl4, bl4 = ut.get_params(name="lconv4", shape=[1, 6, 128, 128])
        lconv4 = ut.conv(input=lconv3, w=wl4, b=bl4, stride=1)
        lconv4 = ut.relu(ut.batchnorm(lconv4))
        lconv4 = tf.nn.max_pool(lconv4,
                                ksize=[1, 1, 2, 1],
                                strides=[1, 1, 2, 1],
                                padding="SAME")

        # Spatial Convolution
        wlsp, blsp = ut.get_params(name="lspconv", shape=[22, 1, 128, 128])
        lspconv = tf.nn.bias_add(
            tf.nn.conv2d(input=lconv4,
                         filter=wlsp,
                         strides=[1, 1, 1, 1],
                         padding="VALID"), blsp)
        lspconv = ut.relu(ut.batchnorm(lspconv))

        lf = tf.reshape(lspconv, shape=(-1, 512))  # small filter's feature

        # Concatenate sf and lf
        filter_output.append(sf)
        filter_output.append(lf)

        feature = tf.concat(values=filter_output, axis=1)

    with tf.variable_scope("domain_classifier") as scope_c:

        fc1 = tf.contrib.layers.fully_connected(inputs=feature,
                                                num_outputs=512,
                                                activation_fn=None)
        fc1 = ut.leaky_relu(ut.batchnorm(fc1))

        fc2 = tf.contrib.layers.fully_connected(inputs=fc1,
                                                num_outputs=128,
                                                activation_fn=None)
        fc2 = ut.leaky_relu(ut.batchnorm(fc2))

        fc3 = tf.contrib.layers.fully_connected(inputs=fc2,
                                                num_outputs=32,
                                                activation_fn=None)
        fc3 = ut.leaky_relu(ut.batchnorm(fc3))

        output_d = tf.contrib.layers.fully_connected(inputs=fc3,
                                                     num_outputs=2,
                                                     activation_fn=None)
        domain_cast_label = tf.cast(dom_l, tf.int64)
        domain_loss = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(
                domain_cast_label, depth=2),
                                                    logits=output_d))

    with tf.variable_scope("label_predictor_target") as scope_pt:

        fc4 = tf.contrib.layers.fully_connected(inputs=feature,
                                                num_outputs=512,
                                                activation_fn=None)
        fc4 = ut.leaky_relu(ut.batchnorm(fc4))

        fc5 = tf.contrib.layers.fully_connected(inputs=fc4,
                                                num_outputs=128,
                                                activation_fn=None)
        fc5 = ut.leaky_relu(ut.batchnorm(fc5))

        fc6 = tf.contrib.layers.fully_connected(inputs=fc5,
                                                num_outputs=32,
                                                activation_fn=None)
        fc6 = ut.leaky_relu(ut.batchnorm(fc6))

        output_l1 = tf.contrib.layers.fully_connected(inputs=fc6,
                                                      num_outputs=4,
                                                      activation_fn=None)
        class_cast_label = tf.cast(cls_l, tf.int64)
        label_loss1 = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(
                class_cast_label, depth=4),
                                                    logits=output_l1))
        label_loss1 = label_loss1 * tf.cast(tf.equal(dom_l, 0), tf.float32)
        label_pred1 = tf.argmax(tf.sigmoid(output_l1), -1)

    with tf.variable_scope("label_predictor_source") as scope_ps:

        fc7 = tf.contrib.layers.fully_connected(inputs=feature,
                                                num_outputs=512,
                                                activation_fn=None)
        fc7 = ut.leaky_relu(ut.batchnorm(fc7))

        fc8 = tf.contrib.layers.fully_connected(inputs=fc7,
                                                num_outputs=128,
                                                activation_fn=None)
        fc8 = ut.leaky_relu(ut.batchnorm(fc8))

        fc9 = tf.contrib.layers.fully_connected(inputs=fc8,
                                                num_outputs=32,
                                                activation_fn=None)
        fc9 = ut.leaky_relu(ut.batchnorm(fc9))

        output_l2 = tf.contrib.layers.fully_connected(inputs=fc9,
                                                      num_outputs=4,
                                                      activation_fn=None)
        label_loss2 = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits(labels=tf.one_hot(
                class_cast_label, depth=4),
                                                    logits=output_l2))
        label_loss2 = label_loss2 * tf.cast(tf.equal(dom_l, 1), tf.float32)
        label_pred2 = tf.argmax(tf.sigmoid(output_l2), -1)

    return domain_loss, label_loss1, label_pred1, label_loss2, label_pred2
Esempio n. 9
0
def create_generator(generator_inputs, generator_outputs_channels):
    layers = []

    # encoder_1: [batch, 256, 256, in_channels] => [batch, 128, 128, ngf]
    output = gen_conv(generator_inputs, a.ngf)
    layers.append(output)

    layer_specs = [
        a.ngf * 2,  # encoder_2: [batch, 128, 128, ngf] => [batch, 64, 64, ngf * 2]
        a.ngf * 4,  # encoder_3: [batch, 64, 64, ngf * 2] => [batch, 32, 32, ngf * 4]
        a.ngf * 8,  # encoder_4: [batch, 32, 32, ngf * 4] => [batch, 16, 16, ngf * 8]
        a.ngf * 8,  # encoder_5: [batch, 16, 16, ngf * 8] => [batch, 8, 8, ngf * 8]
        a.ngf * 8,  # encoder_6: [batch, 8, 8, ngf * 8] => [batch, 4, 4, ngf * 8]
        a.ngf * 8,  # encoder_7: [batch, 4, 4, ngf * 8] => [batch, 2, 2, ngf * 8]
        a.ngf * 8,  # encoder_8: [batch, 2, 2, ngf * 8] => [batch, 1, 1, ngf * 8]
    ]

    for out_channels in layer_specs:
        rectified = utils.lrelu(layers[-1], 0.2)
        # [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels]
        convolved = gen_conv(rectified, out_channels)
        output = utils.batchnorm(convolved)
        layers.append(output)

    layer_specs = [
        (a.ngf * 8, 0.5),  # decoder_8: [batch, 1, 1, ngf * 8] => [batch, 2, 2, ngf * 8 * 2]
        (a.ngf * 8, 0.5),  # decoder_7: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 8 * 2]
        (a.ngf * 8, 0.5),  # decoder_6: [batch, 4, 4, ngf * 8 * 2] => [batch, 8, 8, ngf * 8 * 2]
        (a.ngf * 8, 0.0),  # decoder_5: [batch, 8, 8, ngf * 8 * 2] => [batch, 16, 16, ngf * 8 * 2]
        (a.ngf * 4, 0.0),  # decoder_4: [batch, 16, 16, ngf * 8 * 2] => [batch, 32, 32, ngf * 4 * 2]
        (a.ngf * 2, 0.0),  # decoder_3: [batch, 32, 32, ngf * 4 * 2] => [batch, 64, 64, ngf * 2 * 2]
        (a.ngf, 0.0),  # decoder_2: [batch, 64, 64, ngf * 2 * 2] => [batch, 128, 128, ngf * 2]
    ]

    num_encoder_layers = len(layers)
    for decoder_layer, (out_channels, dropout) in enumerate(layer_specs):
        skip_layer = num_encoder_layers - decoder_layer - 1
        if decoder_layer == 0:
            # first decoder layer doesn't have skip connections
            # since it is directly connected to the skip_layer
            input = layers[-1]
        else:
            input = fluid.layers.concat(input=[layers[-1], layers[skip_layer]], axis=1)

        rectified = fluid.layers.relu(input)
        # [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels]
        output = gen_deconv(rectified, out_channels)
        output = utils.batchnorm(output)

        if dropout > 0.0:
            output = fluid.layers.dropout(output, dropout_prob=dropout)

        layers.append(output)

    # decoder_1: [batch, 128, 128, ngf * 2] => [batch, 256, 256, generator_outputs_channels]
    input = fluid.layers.concat(input=[layers[-1], layers[0]], axis=1)
    rectified = fluid.layers.relu(input)
    output = gen_deconv(rectified, generator_outputs_channels)
    output = fluid.layers.tanh(output)
    layers.append(output)

    return layers[-1]