Exemplo n.º 1
0
def g_net(img, scope, gf_dim=64, is_training=True, reuse=False):
    global bn
    bn = functools.partial(bn, is_training=is_training)
    def res_block(x, dim, scope='res'):
        y = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
        y = relu(bn(conv(y, dim, kernel_size=3, stride=1, padding='VALID', 
                        scope=scope + '_conv1'), scope=scope + '_bn1'))
        y = tf.pad(y, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
        y = bn(conv(y, dim, kernel_size=3, stride=1, padding='VALID', 
                    scope=scope + '_conv2'), scope=scope + '_bn2')
        return y + x

    with tf.variable_scope(scope + '_g', reuse=reuse):
        c0 = tf.pad(img, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
        c1 = relu(bn(conv(c0, gf_dim, 7, 1, padding='VALID', scope='c1_conv'), scope='c1_bn'))
        c2 = relu(bn(conv(c1, gf_dim * 2, 3, 2, scope='c2_conv'), scope='c2_bn'))
        c3 = relu(bn(conv(c2, gf_dim * 4, 3, 2, scope='c3_conv'), scope='c3_bn'))

        r1 = res_block(c3, gf_dim * 4, scope='r1')
        r2 = res_block(r1, gf_dim * 4, scope='r2')
        r3 = res_block(r2, gf_dim * 4, scope='r3')
        r4 = res_block(r3, gf_dim * 4, scope='r4')
        r5 = res_block(r4, gf_dim * 4, scope='r5')
        r6 = res_block(r5, gf_dim * 4, scope='r6')
        r7 = res_block(r6, gf_dim * 4, scope='r7')
        r8 = res_block(r7, gf_dim * 4, scope='r8')
        r9 = res_block(r8, gf_dim * 4, scope='r9')

        d1 = relu(bn(deconv(r9, gf_dim * 2, 3, 2, scope='d1_dconv'), scope='d1_bn'))
        d2 = relu(bn(deconv(d1, gf_dim, 3, 2, scope='d2_dconv'), scope='d2_bn'))
        d2 = tf.pad(d2, [[0, 0], [3, 3], [3, 3], [0, 0]], "REFLECT")
        pred = conv(d2, 3, 7, 1, padding='VALID', scope='pred_conv')
        pred = tf.nn.tanh(pred)

    return pred
Exemplo n.º 2
0
def decoder_block(img, fe, is_t):

    with tf.variable_scope('upsample'):
        with slim.arg_scope([slim.separable_conv2d], depth_multiplier=1):

            h, w = int(img.shape[1]), int(img.shape[2])

            #block1 output_size=16
            im = pixel_dcl(img, 256, name='pixel_dcl_256')
            im = slim.separable_conv2d(im, 256, [3, 3], scope='conv_sp_256')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_sp_256'))
            im = tf.concat([im, fe[4]], 3)
            im = tf.image.resize_bilinear(im, [h * 2, w * 2],
                                          name='upsample_256')
            im = slim.separable_conv2d(im, 512, [3, 3], scope='conv_cat_512')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_cat_512'))

            #block2 output_size=32
            im = pixel_dcl(im, 128, name='pixel_dcl_128')
            im = slim.separable_conv2d(im, 128, [3, 3], scope='conv_sp_128')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_sp_128'))
            im = tf.concat([im, fe[3]], 3)
            im = tf.image.resize_bilinear(im, [h * 4, w * 4],
                                          name='upsample_128')
            im = slim.separable_conv2d(im, 256, [3, 3], scope='conv_cat_128')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_cat_128'))
            im = se_layer(im, 256, 16, name='conv_se_128')
            #block3 output_size=64
            im = pixel_dcl(im, 64, name='pixel_dcl_64')
            im = slim.separable_conv2d(im, 64, [3, 3], scope='conv_sp_64')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_sp_64'))
            im = tf.concat([im, fe[2]], 3)
            im = tf.image.resize_bilinear(im, [h * 8, w * 8],
                                          name='upsample_64')
            im = slim.separable_conv2d(im, 128, [3, 3], scope='conv_cat_64')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_cat_64'))

            #block4 output_size=128
            im = pixel_dcl(im, 32, name='pixel_dcl_32')
            im = slim.separable_conv2d(im, 32, [3, 3], scope='conv_sp_32')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_sp_32'))
            im = tf.concat([im, fe[1]], 3)
            im = tf.image.resize_bilinear(im, [h * 16, w * 16],
                                          name='upsample_32')
            im = slim.separable_conv2d(im, 64, [3, 3], scope='conv_cat_32')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_cat_32'))
            im = se_layer(im, 64, 16, name='conv_se_32')

            #block4 output_size=256
            im = pixel_dcl(im, 16, name='pixel_dcl_16')
            im = slim.separable_conv2d(im, 16, [3, 3], scope='conv_sp_16')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_sp_16'))
            im = tf.concat([im, fe[0]], 3)
            im = tf.image.resize_bilinear(im, [h * 32, w * 32],
                                          name='upsample_16')
            im = slim.separable_conv2d(im, 32, [3, 3], scope='conv_cat_16')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_cat_16'))
            im = conv2d(im, 3, k_w=1, k_h=1, name='conv_output')
            return im
Exemplo n.º 3
0
 def res_block(x, dim, scope='res'):
     y = tf.pad(x, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
     y = relu(bn(conv(y, dim, kernel_size=3, stride=1, padding='VALID', 
                     scope=scope + '_conv1'), scope=scope + '_bn1'))
     y = tf.pad(y, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
     y = bn(conv(y, dim, kernel_size=3, stride=1, padding='VALID', 
                 scope=scope + '_conv2'), scope=scope + '_bn2')
     return y + x
Exemplo n.º 4
0
def disc_block(img, dim, is_t, batch_size, sn, uc, senet):

    im = conv2d(img, dim, name='conv_1_{}'.format(dim))
    im = lrelu(bn(im, is_t=is_t, name='conv_bn_1_{}'.format(dim)))
    im = conv2d(im, dim, stride=2, k_w=1, k_h=1, name='conv_2_{}'.format(dim))
    im = bn(im, is_t=is_t, name='conv_bn_2_{}'.format(dim))
    if senet == True:
        im = se_layer(im, dim, 8, name='conv_se_{}'.format(dim))

    return im
Exemplo n.º 5
0
def d_net(img, scope, df_dim=64, is_training=True, reuse=False):
    global bn
    bn = functools.partial(bn, is_training=is_training)
    with tf.variable_scope(scope + '_d', reuse=reuse):
        n = lrelu(conv(img, df_dim, kernel_size=4, stride=2, scope='conv1'))
        n = lrelu(bn(conv(n, df_dim * 2, kernel_size=4, stride=2, scope='conv2'), scope='bn1'))
        # (64 x 64 x df_dim*2)
        n = lrelu(bn(conv(n, df_dim * 4, kernel_size=4, stride=2, scope='conv3'), scope='bn2'))
        # (32x 32 x df_dim*4)
        n = lrelu(bn(conv(n, df_dim * 8, kernel_size=4, stride=1, scope='conv4'), scope='bn3'))
        # (32 x 32 x df_dim*8)
        n = conv(n, 1, kernel_size=4, stride=1, scope='conv5') 
        # (32 x 32 x 1)
    return n
Exemplo n.º 6
0
def discriminator(x_in, y_in, reuse=False):
    ndf = 64
    input_ = tf.concat((x_in, y_in), 3)

    with tf.variable_scope('d_conv_1', reuse=reuse) as scope:
        k = tf.get_variable(scope.name + '_k', [5, 5, 4, ndf],
                            dtype=tf.float32,
                            initializer=tf.random_normal_initializer(
                                0.0, 0.02))
        pre = tf.nn.conv2d(input_, k, [1, 2, 2, 1], 'SAME')
        pre = lrelu(bn(pre))

    with tf.variable_scope('d_conv_2', reuse=reuse) as scope:
        k = tf.get_variable(scope.name + '_k', [5, 5, ndf, ndf * 2],
                            dtype=tf.float32,
                            initializer=tf.random_normal_initializer(0, 0.02))
        conv2 = tf.nn.conv2d(pre, k, [1, 2, 2, 1], 'SAME')
        conv2 = lrelu(bn(conv2))

    with tf.variable_scope('d_conv_3', reuse=reuse) as scope:
        k = tf.get_variable(scope.name + '_k', [5, 5, ndf * 2, ndf * 4],
                            dtype=tf.float32,
                            initializer=tf.random_normal_initializer(0, 0.02))
        conv3 = tf.nn.conv2d(conv2, k, [1, 2, 2, 1], 'SAME')
        conv3 = lrelu(bn(conv3))
    layers = []
    layers.append(conv3)
    for i in range(1):
        with tf.variable_scope('d_rescov_%d' % i, reuse=reuse) as scope:
            k = tf.get_variable(scope.name + '_k', [5, 5, ndf * 4, ndf * 8],
                                dtype=tf.float32,
                                initializer=tf.random_normal_initializer(
                                    0., 0.02))
            res = tf.nn.conv2d(layers[-1], k, [1, 1, 1, 1], 'SAME')
            res = lrelu(bn(res))
            layers.append(res)
    with tf.variable_scope('d_out', reuse=reuse) as scope:
        k = tf.get_variable(scope.name + '_k', [5, 5, ndf * 8, 1],
                            dtype=tf.float32,
                            initializer=tf.random_normal_initializer(0, 0.02))
        out = tf.nn.conv2d(layers[-1], k, [1, 1, 1, 1], 'SAME')
        out = tf.sigmoid(out)  # pix2pix
        layers.append(out)

    return layers[-1]
Exemplo n.º 7
0
def decoder_block(img, dim, is_t, batch_size, skip, senet):

    with tf.variable_scope('upsample'):

        h = skip.shape[1]
        w = skip.shape[2]

        im = tf.image.resize_bilinear(img, [h, w], name='bilinear')
        im = conv2d(im, dim, k_w=1, k_h=1, name='conv_bilinear_{}'.format(dim))
        im = tf.nn.relu(
            bn(im, is_t=is_t, name='conv_bilinear_bn_{}'.format(dim)))
        im = conv2d(im, dim, name='conv_{}'.format(dim))
        im = tf.nn.relu(bn(im, name='conv_bn_{}'.format(dim)))
        if senet == True:
            im = se_layer(im, dim, 8, name='conv_se_{}'.format(dim))
        im = tf.concat([im, skip], 3)

        return im
Exemplo n.º 8
0
def encoder_block(img, dim, is_t, batch_size, senet):

    with tf.variable_scope('downsample'):

        im = conv2d(img,
                    dim,
                    stride=2,
                    k_w=1,
                    k_h=1,
                    name='conv_{}'.format(dim))
        im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_{}'.format(dim)))
        im = conv2d(im, dim, name='conv_1_{}'.format(dim))
        im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_1_{}'.format(dim)))
        if senet == True:
            im = se_layer(im, dim, 8, name='conv_se_{}'.format(dim))
        im = conv2d(im, dim * 2, name='conv_2_{}'.format(dim * 2))
        im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_2_{}'.format(dim * 2)))

        return im
Exemplo n.º 9
0
    def HEU(self, input_x, is_training=False, scope='HEU'):
        with tf.variable_scope(scope) as scope:
            local_shortcut = input_x
            dense_shortcut = input_x

            for i in range(1, 3):
                with tf.variable_scope('ResBlock_{}'.format(i)):
                    with tf.variable_scope('Conv1'):
                        conv_tmp1 = slim.conv2d(local_shortcut,
                                                self.channel_dim, 3, 1)
                        conv_tmp1_bn = bn(conv_tmp1, is_training,
                                          UPDATE_G_OPS_COLLECTION)
                        out_tmp1 = tf.nn.relu(conv_tmp1_bn)

                    with tf.variable_scope('Conv2'):
                        conv_tmp2 = slim.conv2d(out_tmp1, self.channel_dim, 3,
                                                1)
                        conv_tmp2_bn = bn(conv_tmp2, is_training,
                                          UPDATE_G_OPS_COLLECTION)
                        out_tmp2 = tf.nn.relu(conv_tmp2_bn)
                        conv_shortcut = tf.add(local_shortcut, out_tmp2)

                dense_shortcut = tf.concat([dense_shortcut, conv_shortcut], -1)
                local_shortcut = conv_shortcut

            with tf.variable_scope('Trans'):
                conv_tmp3 = slim.conv2d(dense_shortcut, self.channel_dim, 3, 1)
                conv_tmp3_bn = bn(conv_tmp3, is_training,
                                  UPDATE_G_OPS_COLLECTION)
                conv_tmp3_se = self.SEBlock(conv_tmp3_bn,
                                            self.channel_dim,
                                            reduce_dim=int(self.channel_dim /
                                                           4))
                out_tmp3 = tf.nn.relu(conv_tmp3_se)
                heu_f = tf.add(input_x, out_tmp3)

            return heu_f
Exemplo n.º 10
0
    def generator(self):

        with tf.variable_scope('gen'):

            skip = []
            net = conv2d(self.img, 16, k_h=1, k_w=1, name='conv_input')
            net = tf.nn.relu(bn(net, self.is_t, name='conv_input_bn_64'))
            skip.append(net)

            for i in range(5):
                net = encoder_block(net, 2**(i + 4), self.is_t,
                                    self.batch_size, self.senet)
                skip.append(net)

            skip.reverse()

            for i in range(5):
                net = decoder_block(net, 2**(9 - i), self.is_t,
                                    self.batch_size, skip[i + 1], self.senet)

            net = conv2d(net, 3, name='conv_output')

            return tf.nn.tanh(net)
def decoder_block(img, skip, is_t):
    with tf.variable_scope('gen_upsample'):
        with slim.arg_scope([slim.separable_conv2d], depth_multiplier=1):
            shape = tf.shape(img)
            h, w = shape[1], shape[2]
            #block0 output_size=16
            im = tf.image.resize_bilinear(img, [h * 2, w * 2],
                                          name='upsample_16')
            im = slim.separable_conv2d(im, 512, [3, 3], scope='conv_sp_512')
            im = bn(im, is_t=is_t, name='bn_sp_512')
            im = concat(im, skip[4], name='cat_512')
            #block1 output_size=32
            im = conv2d(im, output_dim=256, name='conv_256')
            im = tf.image.resize_bilinear(im, [h * 4, w * 4],
                                          name='upsample_32')
            im = slim.separable_conv2d(im, 256, [3, 3], scope='conv_sp_256')
            im = bn(im, is_t=is_t, name='bn_sp_256')
            im = concat(im, skip[3], name='cat_256')
            #block2 output_size=64
            im = conv2d(im, output_dim=128, name='conv_128')
            im = tf.image.resize_bilinear(im, [h * 8, w * 8],
                                          name='upsample_64')
            im = slim.separable_conv2d(im, 128, [3, 3], scope='conv_sp_128')
            im = bn(im, is_t=is_t, name='bn_sp_128')
            im = concat(im, skip[2], name='cat_128')
            #block3 output_size=128
            im = conv2d(im, output_dim=64, name='conv_64')
            im = tf.image.resize_bilinear(im, [h * 16, w * 16],
                                          name='upsample_32')
            im = slim.separable_conv2d(im, 64, [3, 3], scope='conv_sp_64')
            im = bn(im, is_t=is_t, name='bn_sp_64')
            im = concat(im, skip[1], name='cat_64')
            #block2 output_size=256
            im = conv2d(im, output_dim=32, name='conv_32')
            im = tf.image.resize_bilinear(im, [h * 32, w * 32],
                                          name='upsample_128')
            im = slim.separable_conv2d(im, 32, [3, 3], scope='conv_sp_32')
            im = bn(im, is_t=is_t, name='bn_sp_32')
            im = concat(im, skip[0], name='cat_32')
            #output
            im = conv2d(im, output_dim=16, name='output_16')
            im = tf.nn.relu(bn(im, is_t=is_t, name='output_bn_16'))
            im = conv2d(im, output_dim=3, k_h=1, k_w=1, name='output_3')

            return im
def encoder_block(img, is_t, senet):

    with tf.variable_scope('gen_downsample'):
        with slim.arg_scope([slim.separable_conv2d], depth_multiplier=1):

            skip = []
            im = conv2d(img, output_dim=16, k_w=1, k_h=1, name='conv0_16')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv0_bn_16'))
            skip.append(im)
            # block1 output_size=128
            im = conv2d(im, output_dim=32, stride=2, name='conv1_32')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv1_bn_32'))
            im = conv2d(im, output_dim=64, name='conv1_64')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv1_bn_64'))
            im_res = conv2d(im,
                            output_dim=128,
                            k_h=1,
                            k_w=1,
                            stride=2,
                            name='conv1_res_128')
            im_res = bn(im_res, is_t=is_t, name='conv1_res_bn_128')
            skip.append(im)
            # block2 output_size=64
            im = slim.separable_conv2d(im, 128, [3, 3], scope='conv2_128')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv2_bn_128'))
            im = slim.separable_conv2d(im, 128, [3, 3], scope='conv2_128_2')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv2_bn_128_2'))
            im = slim.separable_conv2d(im,
                                       128, [3, 3],
                                       stride=2,
                                       scope='conv2_128_3')
            im = bn(im, is_t=is_t, name='conv2_bn_128_3')
            if senet == True:
                im = se_layer(im, 128, 8, name='conv_se_128')
            im = tf.add(im, im_res)
            im_res = conv2d(im,
                            output_dim=256,
                            k_h=1,
                            k_w=1,
                            stride=2,
                            name='conv2_res_256')
            im_res = bn(im_res, is_t=is_t, name='conv2_res_bn_256')
            skip.append(im)
            #block3 output_size=32
            im = tf.nn.relu(im)
            im = slim.separable_conv2d(im, 256, [3, 3], scope='conv3_256')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv3_bn_256'))
            im = slim.separable_conv2d(im, 256, [3, 3], scope='conv3_256_2')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv3_bn_256_2'))
            im = slim.separable_conv2d(im,
                                       256, [3, 3],
                                       stride=2,
                                       scope='conv3_256_3')
            im = bn(im, is_t=is_t, name='conv3_bn_256_3')
            if senet == True:
                im = se_layer(im, 256, 8, name='conv_se_256')
            im = tf.add(im, im_res)
            im_res = conv2d(im,
                            output_dim=728,
                            k_h=1,
                            k_w=1,
                            stride=2,
                            name='conv3_res_728')
            im_res = bn(im_res, is_t=is_t, name='conv3_res_bn_728')
            skip.append(im)
            #block4 output_size=16
            im = tf.nn.relu(im)
            im = slim.separable_conv2d(im, 728, [3, 3], scope='conv4_728')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv4_bn_728'))
            im = slim.separable_conv2d(im, 728, [3, 3], scope='conv4_728_2')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv4_bn_728_2'))
            im = slim.separable_conv2d(im,
                                       728, [3, 3],
                                       stride=2,
                                       scope='conv4_728_3')
            im = bn(im, is_t=is_t, name='conv4_bn_728_3')
            if senet == True:
                im = se_layer(im, 728, 8, name='conv_se_728')
            im = tf.add(im, im_res)
            skip.append(im)

            #Middle flow
            for i in range(8):
                im_res = im
                im = tf.nn.relu(im)
                im = slim.separable_conv2d(im,
                                           728, [3, 3],
                                           scope='conv_mf_sp1_{}'.format(i))
                im = tf.nn.relu(
                    bn(im, is_t=is_t, name='conv_mf_bn1_{}'.format(i)))
                im = slim.separable_conv2d(im,
                                           728, [3, 3],
                                           scope='conv_mf_sp2_{}'.format(i))
                im = tf.nn.relu(
                    bn(im, is_t=is_t, name='conv_mf_bn2_{}'.format(i)))
                im = slim.separable_conv2d(im,
                                           728, [3, 3],
                                           scope='conv_mf_sp3_{}'.format(i))
                im = bn(im, is_t=is_t, name='conv_mf_bn3_{}'.format(i))
                if senet == True:
                    im = se_layer(im, 728, 8, name='conv_se_728_{}'.format(i))
                im = tf.add(im, im_res, name='con_mf_add_{}'.format(i))

            #Exit flow
            im_res = conv2d(im,
                            output_dim=1024,
                            k_w=1,
                            k_h=1,
                            stride=2,
                            name='conv_res_ex_1024')
            im_res = bn(im_res, is_t=is_t, name='conv_res_ex_bn_1024')
            im = tf.nn.relu(im, name='conv_exit_relu')
            im = slim.separable_conv2d(im, 728, [3, 3], scope='conv_ex_728')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_ex_bn_728'))
            im = slim.separable_conv2d(im, 1024, [3, 3], scope='conv_ex1_1024')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_ex1_bn_1024'))
            im = slim.separable_conv2d(im,
                                       1024, [3, 3],
                                       stride=2,
                                       scope='conv_ex2_1024')
            im = bn(im, is_t=is_t, name='conv_ex2_bn_1024')
            if senet == True:
                im = se_layer(im, 1024, 8, name='conv_se_1024')
            im = tf.add(im, im_res, name='conv5_add')
            #Output
            im = tf.nn.relu(im)
            im = slim.separable_conv2d(im, 1536, [3, 3], scope='conv_out_1536')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_out_bn_1536'))
            im = slim.separable_conv2d(im,
                                       1536, [3, 3],
                                       scope='conv_out2_1536')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_out2_bn_1536'))
            im = slim.separable_conv2d(im, 2048, [3, 3], scope='conv_out_2048')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_out_bn_2048'))
            if senet == True:
                im = se_layer(im, 2048, 8, name='conv_se_2048')
            return im, skip
Exemplo n.º 13
0
    def generator(self, z, Keep_prob, is_training=True, reuse=False):
        # x,delta,n_steps
        # z :[self.batch_size, self.z_dim]
        # first feed noize in rnn, then feed the previous output into next input
        # or we can feed noize and previous output into next input in future version
        with tf.variable_scope("g_enerator", reuse=reuse):
            #gennerate

            wr_h = tf.get_variable(
                "g_wr_h",
                shape=[self.n_inputs, self.n_hidden_units],
                initializer=tf.contrib.layers.xavier_initializer())
            w_out = tf.get_variable(
                "g_w_out",
                shape=[self.n_hidden_units, self.n_inputs],
                initializer=tf.contrib.layers.xavier_initializer())
            br_h = tf.get_variable("g_br_h",
                                   shape=[
                                       self.n_hidden_units,
                                   ],
                                   initializer=tf.constant_initializer(0.0))
            b_out = tf.get_variable("g_b_out",
                                    shape=[
                                        self.n_inputs,
                                    ],
                                    initializer=tf.constant_initializer(0.0))
            w_z = tf.get_variable(
                "g_w_z",
                shape=[self.z_dim, self.n_inputs],
                initializer=tf.contrib.layers.xavier_initializer())
            b_z = tf.get_variable("g_b_z",
                                  shape=[
                                      self.n_inputs,
                                  ],
                                  initializer=tf.constant_initializer(0.0))

            #self.times=tf.reshape(self.times,[self.batch_size,self.n_steps,self.n_inputs])
            #change z's dimension
            # batch_size*z_dim-->batch_size*n_inputs
            x = tf.matmul(z, w_z) + b_z
            x = tf.reshape(x, [-1, self.n_inputs])
            delta_zero = tf.constant(0.0,
                                     shape=[self.batch_size, self.n_inputs])
            #delta_normal=tf.constant(48.0*60.0/self.gen_length,shape=[self.batch_size,self.n_inputs])
            #delta:[batch_size,1,n_inputs]

            # combine X_in
            rth = tf.matmul(delta_zero, wr_h) + br_h
            rth = math_ops.exp(-tf.maximum(0.0, rth))
            x = tf.concat([x, rth], 1)

            X_in = tf.reshape(x, [-1, 1, self.n_inputs + self.n_hidden_units])

            init_state = self.grud_cell_g.zero_state(
                self.batch_size, dtype=tf.float32)  # 初始化全零 state
            #z=tf.reshape(z,[self.batch_size,1,self.z_dim])
            seq_len = tf.constant(1, shape=[self.batch_size])

            outputs, final_state = tf.nn.dynamic_rnn(self.grud_cell_g, X_in, \
                                initial_state=init_state,\
                                sequence_length=seq_len,
                                time_major=False)
            init_state = final_state
            #outputs: batch_size*1*n_hidden
            outputs = tf.reshape(outputs, [-1, self.n_hidden_units])
            # full connect
            out_predict = tf.matmul(tf.nn.dropout(outputs, Keep_prob),
                                    w_out) + b_out
            randomms = sample_M(self.batch_size, self.n_inputs, 1)
            #1 代表randoms全是0
            out_predict = self.get_time(
                self.lastvalues, 0) * randomms + (1 - randomms) * out_predict
            out_predict = tf.reshape(out_predict, [-1, 1, self.n_inputs])

            total_result = tf.multiply(out_predict, 1.0)

            for i in range(1, self.n_steps):
                out_predict = tf.reshape(out_predict,
                                         [self.batch_size, self.n_inputs])
                #输出加上noise z
                #out_predict=out_predict+tf.matmul(z,w_z)+b_z
                #
                delta_normal = tf.reshape(
                    self.imputed_deltaPre[:, i:(i + 1), :],
                    [self.batch_size, self.n_inputs])
                rth = tf.matmul(delta_normal, wr_h) + br_h
                rth = math_ops.exp(-tf.maximum(0.0, rth))
                x = tf.concat([out_predict, rth], 1)
                X_in = tf.reshape(x,
                                  [-1, 1, self.n_inputs + self.n_hidden_units])

                outputs, final_state = tf.nn.dynamic_rnn(self.grud_cell_g, X_in, \
                            initial_state=init_state,\
                            sequence_length=seq_len,
                            time_major=False)
                init_state = final_state
                outputs = tf.reshape(outputs, [-1, self.n_hidden_units])
                out_predict = tf.matmul(tf.nn.dropout(outputs, Keep_prob),
                                        w_out) + b_out
                randomms = sample_M(self.batch_size, self.n_inputs, 1)
                out_predict = self.get_time(self.lastvalues, i) * randomms + (
                    1 - randomms) * out_predict
                out_predict = tf.reshape(out_predict, [-1, 1, self.n_inputs])
                total_result = tf.concat([total_result, out_predict], 1)

            #delta:[batch_size,,n_inputs]

            if self.isbatch_normal:
                with tf.variable_scope("g_bn", reuse=tf.AUTO_REUSE):
                    total_result = bn(total_result,
                                      is_training=is_training,
                                      scope="g_bn_imple")

            return total_result
Exemplo n.º 14
0
def generator(x_in, reuse=False):
    ngf = 64
    conv_list = [[5, 5, 1, ngf], [5, 5, ngf, ngf * 2],
                 [5, 5, ngf * 2, ngf * 4], [5, 5, ngf * 4, ngf * 8],
                 [5, 5, ngf * 8, ngf * 8], [5, 5, ngf * 8, ngf * 8],
                 [5, 5, ngf * 8, ngf * 8], [5, 5, ngf * 8, ngf * 8]]
    res_list = [[5, 5, ngf * 8, ngf * 8]]
    #    deconv_list = [[5, 5, ngf*8*2, ngf*8],
    #                   [5, 5, ngf*8*2, ngf*8*2],
    #                   [5, 5, ngf*8*2, ngf*8*2],
    #                   [5, 5, ngf*8*2, ngf*8*2],
    #                   [5, 5, ngf*4*2, ngf*8*2],
    #                   [5, 5, ngf*2*2, ngf*4*2],
    #                   [5, 5, ngf*2, ngf*2*2]]
    #    out_shape = [[1, 2, 2, ngf*8*2],
    #                 [1, 4, 4, ngf*8*2],
    #                 [1, 8, 8, ngf*8*2],
    #                 [1, 16, 16, ngf*8*2],
    #                 [1, 32, 32, ngf*4*2],
    #                 [1, 64, 64, ngf*2*2],
    #                 [1, 128,128, ngf*2],
    #                 [1, 256, 256, 3]]
    deconv_list = [[5, 5, ngf * 4 * 2, ngf * 8],
                   [5, 5, ngf * 2 * 2, ngf * 4 * 2],
                   [5, 5, ngf * 2, ngf * 2 * 2]]
    out_shape = [[64, 16, 16, ngf * 4 * 2], [64, 32, 32, ngf * 2 * 2],
                 [64, 64, 64, ngf * 2], [64, 128, 128, 3]]
    layers = []
    layers.append(x_in)
    for i in range(4):
        with tf.variable_scope('g_conv_%d' % i, reuse=reuse) as scope:
            k = tf.get_variable(scope.name + '_k',
                                shape=conv_list[i],
                                dtype=tf.float32,
                                initializer=tf.random_normal_initializer(
                                    0.0, 0.02))
            pre = tf.nn.conv2d(layers[-1], k, [1, 2, 2, 1], 'SAME')
            pre = tf.nn.relu(bn(pre))
            layers.append(pre)

    for i in range(2):
        with tf.variable_scope('g_resblock_%d' % i, reuse=reuse) as scope:
            k = tf.get_variable(scope.name + '_k',
                                shape=res_list[0],
                                dtype=tf.float32,
                                initializer=tf.random_normal_initializer(
                                    0.0, 0.02))
            pre = tf.nn.conv2d(layers[-1], k, [1, 1, 1, 1], 'SAME')
            pre = tf.nn.relu(bn(pre))
            layers.append(pre)

    for i in range(3):
        with tf.variable_scope('g_deconv_%d' % i, reuse=reuse) as scope:
            k = tf.get_variable(scope.name + '_k',
                                shape=deconv_list[i],
                                dtype=tf.float32,
                                initializer=tf.random_normal_initializer(
                                    0.0, 0.02))
            pre = tf.nn.conv2d_transpose(layers[-1], k, out_shape[i],
                                         [1, 2, 2, 1], 'SAME')
            pre = tf.nn.relu(bn(pre))
            if i < 2:
                pre = tf.nn.dropout(pre, keep_prob=0.5)
            layers.append(pre)

    with tf.variable_scope('g_out', reuse=reuse) as scope:
        k = tf.get_variable(scope.name + '_k', [5, 5, 3, ngf * 2],
                            dtype=tf.float32,
                            initializer=tf.random_normal_initializer(
                                0.0, 0.02))
        out = tf.nn.conv2d_transpose(layers[-1], k, [batch_size, 128, 128, 3],
                                     [1, 2, 2, 1], 'SAME')
        layers.append(out)
    gen_out = layers[-1]
    gen_out = tf.nn.tanh(gen_out)
    return gen_out
Exemplo n.º 15
0
def encoder_block(img, is_t, senet):

    with tf.variable_scope('downsample_space'):
        with slim.arg_scope([slim.separable_conv2d], depth_multiplier=1):

            feature = []

            im = conv2d(img, 16, k_w=1, k_h=1, name='conv_16')
            im = bn(im, is_t=is_t, name='conv_bn_16')
            feature.append(im)
            # block1 output_size=128
            im = slim.separable_conv2d(im, 32, [3, 3], scope='conv_0_32')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_0_bn_32'))
            im = slim.separable_conv2d(im, 32, [3, 3], scope='conv_1_32')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_1_bn_32'))
            im_fe = slim.max_pool2d(im, [3, 3],
                                    stride=2,
                                    padding='same',
                                    scope='conv_max_pool')
            im_sp = slim.separable_conv2d(im,
                                          32, [3, 3],
                                          stride=2,
                                          scope='conv_sp_32')
            im = tf.concat([im_sp, im_fe], 3)
            im = conv2d(im, 32, k_h=1, k_w=1, name='conv_cat_32')
            im = bn(im, is_t=is_t, name='conv_cat_bn_32')
            im_res = slim.separable_conv2d(im,
                                           64, [3, 3],
                                           stride=2,
                                           scope='conv_res_64')
            im_res = bn(im_res, is_t=is_t, name='conv_res_bn_64')
            feature.append(im)

            # block2 output_size=64
            im = slim.separable_conv2d(im, 64, [3, 3], scope='conv_0_64')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_0_bn_64'))
            im = slim.separable_conv2d(im, 64, [3, 3], scope='conv_1_64')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_1_bn_64'))
            im_fe = slim.max_pool2d(im, [3, 3],
                                    stride=2,
                                    padding='same',
                                    scope='conv_max_pool')
            im_sp = slim.separable_conv2d(im,
                                          64, [3, 3],
                                          stride=2,
                                          scope='conv_sp_64')
            im = tf.concat([im_sp, im_fe], 3)
            im = conv2d(im, 64, k_h=1, k_w=1, name='conv_cat_64')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_cat_bn_64') + im_res)
            im = se_layer(im, 64, 16, name='conv_se_64')
            im_res = conv2d(im,
                            128,
                            k_h=1,
                            k_w=1,
                            stride=2,
                            name='conv_res_128')
            im_res = bn(im_res, is_t=is_t, name='conv_res_bn_128')
            feature.append(im)

            # block3 output_size=32
            im = slim.separable_conv2d(im, 128, [3, 3], scope='conv_0_128')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_0_bn_128'))
            im = slim.separable_conv2d(im, 128, [3, 3], scope='conv_1_128')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_1_bn_128'))
            im_fe = slim.max_pool2d(im, [3, 3],
                                    stride=2,
                                    padding='same',
                                    scope='conv_max_pool')
            im_sp = slim.separable_conv2d(im,
                                          128, [3, 3],
                                          stride=2,
                                          scope='conv_sp_128')
            im = tf.concat([im_sp, im_fe], 3)
            im = conv2d(im, 128, k_h=1, k_w=1, name='conv_cat_128')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_cat_bn_128') + im_res)
            im_res = conv2d(im,
                            256,
                            k_h=1,
                            k_w=1,
                            stride=2,
                            name='conv_res_256')
            im_res = bn(im_res, is_t=is_t, name='conv_res_bn_256')
            feature.append(im)

            # block4 output_size=16
            im = slim.separable_conv2d(im, 256, [3, 3], scope='conv_0_256')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_0_bn_256'))
            im = slim.separable_conv2d(im, 256, [3, 3], scope='conv_1_256')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_1_bn_256'))
            im_fe = slim.max_pool2d(im, [3, 3],
                                    stride=2,
                                    padding='same',
                                    scope='conv_max_pool')
            im_sp = slim.separable_conv2d(im,
                                          256, [3, 3],
                                          stride=2,
                                          scope='conv_sp_256')
            im = tf.concat([im_sp, im_fe], 3)
            im = conv2d(im, 256, k_h=1, k_w=1, name='conv_cat_256')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_cat_bn_256') + im_res)
            im = se_layer(im, 256, 16, name='conv_se_256')
            im_res = conv2d(im,
                            512,
                            k_h=1,
                            k_w=1,
                            stride=2,
                            name='conv_res_512')
            im_res = bn(im_res, is_t=is_t, name='conv_res_bn_512')
            feature.append(im)

            #block5 output)size=8
            im = slim.separable_conv2d(im, 512, [3, 3], scope='conv_0_512')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_0_bn_512'))
            im = slim.separable_conv2d(im, 512, [3, 3], scope='conv_1_512')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_1_bn_512'))
            im_fe = slim.max_pool2d(im, [3, 3],
                                    stride=2,
                                    padding='same',
                                    scope='conv_max_pool')
            im_sp = slim.separable_conv2d(im,
                                          512, [3, 3],
                                          stride=2,
                                          scope='conv_sp_512')
            im = tf.concat([im_sp, im_fe], 3)
            im = conv2d(im, 512, k_h=1, k_w=1, name='conv_512')
            im = tf.nn.relu(bn(im, is_t=is_t, name='conv_bn_512') + im_res)
            im = atrous_spatial_pyramid_pooling(im, scope='conv_aspp_256')

            return im, feature
Exemplo n.º 16
0
def generator(x_in, reuse=False):
    ngf = 64
    conv_list = [[5, 5, 1, ngf], [5, 5, ngf, ngf * 2],
                 [5, 5, ngf * 2, ngf * 4], [5, 5, ngf * 4, ngf * 8],
                 [5, 5, ngf * 8, ngf * 8], [5, 5, ngf * 8, ngf * 8],
                 [5, 5, ngf * 8, ngf * 8], [5, 5, ngf * 8, ngf * 8]]
    res_list = [[5, 5, ngf * 8, ngf * 8]]

    deconv_list = [[5, 5, ngf * 4 * 2, ngf * 8],
                   [5, 5, ngf * 2 * 2, ngf * 4 * 2],
                   [5, 5, ngf * 2, ngf * 2 * 2]]

    deconv_concate = [[5, 5, ngf * 2, ngf * 6], [5, 5, 3, ngf * 3]]
    out_shape = [[64, 16, 16, ngf * 4 * 2], [64, 32, 32, ngf * 2 * 2],
                 [64, 64, 64, ngf * 2], [64, 128, 128, 3]]
    layers = []
    layers.append(x_in)
    for i in range(4):
        with tf.variable_scope('g_conv_%d' % i, reuse=reuse) as scope:
            k = tf.get_variable(scope.name + '_k',
                                shape=conv_list[i],
                                dtype=tf.float32,
                                initializer=tf.random_normal_initializer(
                                    0.0, 0.02))
            pre = tf.nn.conv2d(layers[-1], k, [1, 2, 2, 1], 'SAME')
            pre = tf.nn.relu(bn(pre))
            layers.append(pre)

    for i in range(2):
        with tf.variable_scope('g_resblock_%d' % i, reuse=reuse) as scope:
            k = tf.get_variable(scope.name + '_k',
                                shape=res_list[0],
                                dtype=tf.float32,
                                initializer=tf.random_normal_initializer(
                                    0.0, 0.02))
            pre = tf.nn.conv2d(layers[-1], k, [1, 1, 1, 1], 'SAME')
            pre = tf.nn.relu(bn(pre))
            layers.append(pre)

    for i in range(2):
        with tf.variable_scope('g_deconv_%d' % i, reuse=reuse) as scope:
            k = tf.get_variable(scope.name + '_k',
                                shape=deconv_list[i],
                                dtype=tf.float32,
                                initializer=tf.random_normal_initializer(
                                    0.0, 0.02))
            pre = tf.nn.conv2d_transpose(layers[-1], k, out_shape[i],
                                         [1, 2, 2, 1], 'SAME')
            pre = tf.nn.relu(bn(pre))
            pre = tf.nn.dropout(pre, keep_prob=0.5)
            layers.append(pre)
    with tf.variable_scope('g_deconv_3', reuse=reuse) as scope:
        input_ = tf.concat((layers[-1], layers[2]), axis=3)
        k = tf.get_variable(scope.name + '_k',
                            shape=deconv_concate[0],
                            dtype=tf.float32,
                            initializer=tf.random_normal_initializer(
                                0.0, 0.02))
        pre = tf.nn.conv2d_transpose(input_,
                                     k,
                                     output_shape=out_shape[2],
                                     strides=[1, 2, 2, 1],
                                     padding='SAME')
        pre = tf.nn.relu(pre)
        layers.append(pre)

    with tf.variable_scope('g_out', reuse=reuse) as scope:
        # u-skip to the last layer
        input_ = tf.concat((layers[-1], layers[1]), axis=3)
        k = tf.get_variable(scope.name + '_k', [5, 5, 3, ngf * 3],
                            dtype=tf.float32,
                            initializer=tf.random_normal_initializer(
                                0.0, 0.02))
        out = tf.nn.conv2d_transpose(input_, k, [batch_size, 128, 128, 3],
                                     [1, 2, 2, 1], 'SAME')
        layers.append(out)
    gen_out = layers[-1]
    gen_out = tf.nn.tanh(gen_out)
    return gen_out
Exemplo n.º 17
0
    def build(self, rgb, label_num, kp, last_layer_type="softmax"):

        assert rgb.get_shape().as_list()[1:] == [224, 224, 3]

        self.conv1 = conv_layer(rgb, 7, 3, 64, 2, "scale1")
        self.conv1 = bn(self.conv1,
                        is_training=self.is_training,
                        name="scale1")
        self.conv1 = tf.nn.relu(self.conv1)
        self.conv1 = maxpool(self.conv1, 3, 2, "pool1")

        with tf.variable_scope("scale2"):
            self.block1_1 = res_block_3_layer(self.conv1, [64, 64, 256],
                                              "block1",
                                              change_dimension=True,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block1_2 = res_block_3_layer(self.block1_1, [64, 64, 256],
                                              "block2",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block1_3 = res_block_3_layer(self.block1_2, [64, 64, 256],
                                              "block3",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)

        with tf.variable_scope("scale3"):
            self.block2_1 = res_block_3_layer(self.block1_3, [128, 128, 512],
                                              "block1",
                                              change_dimension=True,
                                              block_stride=2,
                                              is_training=self.is_training)
            self.block2_2 = res_block_3_layer(self.block2_1, [128, 128, 512],
                                              "block2",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block2_3 = res_block_3_layer(self.block2_2, [128, 128, 512],
                                              "block3",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block2_4 = res_block_3_layer(self.block2_3, [128, 128, 512],
                                              "block4",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
        with tf.variable_scope("scale4"):
            self.block3_1 = res_block_3_layer(self.block2_4, [256, 256, 1024],
                                              "block1",
                                              change_dimension=True,
                                              block_stride=2,
                                              is_training=self.is_training)
            self.block3_2 = res_block_3_layer(self.block3_1, [256, 256, 1024],
                                              "block2",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block3_3 = res_block_3_layer(self.block3_2, [256, 256, 1024],
                                              "block3",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block3_4 = res_block_3_layer(self.block3_3, [256, 256, 1024],
                                              "block4",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block3_5 = res_block_3_layer(self.block3_4, [256, 256, 1024],
                                              "block5",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block3_6 = res_block_3_layer(self.block3_5, [256, 256, 1024],
                                              "block6",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
        with tf.variable_scope("scale5"):
            self.block4_1 = res_block_3_layer(self.block3_6, [512, 512, 2048],
                                              "block1",
                                              change_dimension=True,
                                              block_stride=2,
                                              is_training=self.is_training)
            self.block4_2 = res_block_3_layer(self.block4_1, [512, 512, 2048],
                                              "block2",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
            self.block4_3 = res_block_3_layer(self.block4_2, [512, 512, 2048],
                                              "block3",
                                              change_dimension=False,
                                              block_stride=1,
                                              is_training=self.is_training)
        with tf.variable_scope("fc"):
            self.pool2 = maxpool(self.block4_3, 7, 1, "pool2")
            self.fc1 = fc_layer(self.pool2, 2048, 2048, "fc1")
            self.fc1 = tf.nn.relu(tf.nn.dropout(self.fc1, keep_prob=kp))
            self.fc2 = fc_layer(self.fc1, 2048, label_num, "fc2")

        if last_layer_type == "sigmoid":
            self.prob = tf.nn.sigmoid(self.fc2)
        elif last_layer_type == "softmax":
            self.prob = tf.nn.softmax(self.fc2)
        elif last_layer_type == "no":
            self.prob = self.fc2
        return self.prob