Пример #1
0
    def generator(self, z):
        with tf.variable_scope("generator") as scope:
            self.z_, self.h0_w, self.h0_b = linear(z, self.gf_dim*8*4*4, 'g_h0_lin', with_w=True)

            # TODO: Nicer iteration pattern here. #readability
            hs = [None]
            hs[0] = tf.reshape(self.z_, [-1, 4, 4, self.gf_dim * 8])
            hs[0] = tf.nn.relu(self.g_bns[0](hs[0], self.training_bool))

            i = 1 # Iteration number.
            depth_mul = 8  # Depth decreases as spatial component increases.
            size = 8  # Size increases as depth decreases.

            # 4 convolutional layers as well..for 64x64
            while size < self.image_size:
                hs.append(None)
                name = 'g_h{}'.format(i)
                hs[i], _, _ = conv2d_transpose(hs[i-1],
                    [self.batch_size, size, size, self.gf_dim*depth_mul], name=name, with_w=True)
                hs[i] = tf.nn.relu(self.g_bns[i](hs[i], self.training_bool))

                i += 1
                depth_mul //= 2
                size *= 2

            hs.append(None)
            name = 'g_h{}'.format(i)
            hs[i], _, _ = conv2d_transpose(hs[i - 1],
                [self.batch_size, size, size, 3], name=name, with_w=True)

            return tf.nn.tanh(hs[i])
Пример #2
0
def generator_context(z):
    n = 32
    with arg_scope([conv2d, conv2d_transpose], batch_norm_params=batch_norm_params, stddev=0.02):
        z = z*2-1
        d = 8
        z = fc(z, num_units_out=d*d*32, batch_norm_params=batch_norm_params)
        c = z.get_shape()[1].value / (d*d)
        z = tf.reshape(z, (-1, d, d, c))
        o = conv2d_transpose(z, n, (3, 3), stride=(2, 2))
        o = conv2d_transpose(o, n, (3, 3), stride=(2, 2))
        o = conv2d(o, num_filters_out=n, kernel_size=(3, 3), stride=1)
        o = conv2d(o, num_filters_out=4, kernel_size=(3, 3), stride=1)
        attended = o
        return attended
Пример #3
0
 def generator(self, images):
     with tf.variable_scope("generator"):
         g_h0 = tf.nn.relu(conv2d(images, 16, name='g_encode_0'))
         g_h1 = tf.nn.relu(conv2d(g_h0, 32, name='g_encode_1'))
         g_h2 = tf.nn.relu(conv2d(g_h1, 64, name='g_encode_2'))
         g_flat = tf.reshape(g_h2, [self.batch_size, -1])
         g_encode = linear(g_flat, 128, 'g_encode')
         g_decode = linear(g_encode, 512 * 4 * 4, 'g_h0')
         g_h3 = tf.nn.relu(tf.reshape(g_decode, [self.batch_size, 4, 4, 512]))
         g_h4 = tf.nn.relu(conv2d_transpose(g_h3, [self.batch_size, 8, 8, 256], name='g_h1'))
         g_h5 = tf.nn.relu(conv2d_transpose(g_h4, [self.batch_size, 16, 16, 128], name='g_h2'))
         g_h6 = tf.nn.relu(conv2d_transpose(g_h5, [self.batch_size, 32, 32, 64], name='g_h3'))
         g_h7 = conv2d_transpose(g_h6, [self.batch_size, 64, 64, 3], name='g_h4')
         return tf.nn.tanh(g_h7)
Пример #4
0
def generator(z):
    n = 32
    with arg_scope([conv2d, conv2d_transpose], batch_norm_params=batch_norm_params, stddev=0.02):
        z = z*2-1
        d = 8
        z = fc(z, num_units_out=d*d*32, batch_norm_params=batch_norm_params)
        c = z.get_shape()[1].value / (d*d)
        z = tf.reshape(z, (-1, d, d, c))
        o = conv2d_transpose(z, n, (3, 3), stride=(2, 2))
        o = conv2d_transpose(o, n, (3, 3), stride=(2, 2))
        o = conv2d(o, num_filters_out=n*2, kernel_size=(3, 3), stride=1)
        o = conv2d(o, num_filters_out=1, kernel_size=(3, 3), stride=1, padding="VALID", batch_norm_params=None)
        out = o[:, 1:29, 1:29, :]
        return out
Пример #5
0
def _generator_dcgan(inpt, gf_dim, is_training):
    with tf.variable_scope('Generator'):
        z, h0_w, h0_b = ops.linear(inpt, gf_dim * 8 * 4 * 4, scope='g0_dcgan',
                                   with_w=True)
        hs = [None]
        hs[0] = tf.reshape(z, [-1, 4, 4, gf_dim * 8])

        # batch norm parameter
        decay = 0.9
        epsilon = 1e-5
        # do batch norm
        bn = tf.contrib.layers.batch_norm(hs[0], decay=decay,
                                          updates_collections=None,
                                          epsilon=epsilon,
                                          center=True, scale=True,
                                          is_training=is_training,
                                          scope='g_bn0')
        hs[0] = tf.nn.relu(bn)

        i = 1  # Iteration number.
        depth_mul = 8  # Depth decreases as spatial component increases.
        size = 8  # Size increases as depth decreases.

        while size < gf_dim:
            hs.append(None)
            name = 'g{}_dcgan'.format(i)
            hs[i], _, _ = ops.conv2d_transpose(hs[i - 1],
                                           [batch_size, size, size,
                                            gf_dim * depth_mul], scope=name,
                                           with_w=True)
            bn = tf.contrib.layers.batch_norm(hs[i], decay=decay,
                                              updates_collections=None,
                                              epsilon=epsilon,
                                              center=True, scale=True,
                                              is_training=is_training,
                                              scope='g_bn{}'.format(i))

            hs[i] = tf.nn.relu(bn)
            i += 1
            depth_mul //= 2
            size *= 2

        hs.append(None)
        name = 'g{}_dcgan'.format(i)
        hs[i], _, _ = ops.conv2d_transpose(hs[i - 1],
                                       [batch_size, size, size, 1], scope=name,
                                       with_w=True)

    return tf.nn.tanh(hs[i])
Пример #6
0
 def generator(self, images):
     with tf.variable_scope("generator"):
         g_h0 = tf.nn.relu(conv2d(images, 64, 5, 5, 1, 1, name='g_h0'))
         g_h1 = tf.nn.relu(self.g_bns[0](conv2d(g_h0, 128, 3, 3, 2, 2, name='g_h1')))
         g_h2 = tf.nn.relu(self.g_bns[1](conv2d(g_h1, 128, 3, 3, 1, 1, name='g_h2')))
         g_h3 = tf.nn.relu(self.g_bns[2](conv2d(g_h2, 256, 3, 3, 2, 2, name='g_h3')))
         g_h4 = tf.nn.relu(self.g_bns[3](conv2d(g_h3, 256, 3, 3, 1, 1, name='g_h4')))
         g_h5 = tf.nn.relu(self.g_bns[4](conv2d(g_h4, 256, 3, 3, 1, 1, name='g_h5')))
         g_h6 = tf.nn.relu(self.g_bns[5](dilated_conv2d(g_h5, 256, 3, 3, 2, name='g_h6')))
         g_h7 = tf.nn.relu(self.g_bns[6](dilated_conv2d(g_h6, 256, 3, 3, 4, name='g_h7')))
         g_h8 = tf.nn.relu(self.g_bns[7](dilated_conv2d(g_h7, 256, 3, 3, 8, name='g_h8')))
         g_h9 = tf.nn.relu(self.g_bns[8](dilated_conv2d(g_h8, 256, 3, 3, 16, name='g_h9')))
         g_h10 = tf.nn.relu(self.g_bns[9](conv2d(g_h9, 256, 3, 3, 1, 1, name='g_h10')))
         g_h11 = tf.nn.relu(self.g_bns[10](conv2d(g_h10, 256, 3, 3, 1, 1, name='g_h11')))
         g_h12 = tf.nn.relu(self.g_bns[11](conv2d_transpose(
             g_h11, [self.batch_size, int(self.image_size/2), int(self.image_size/2), 128], 4, 4, 2, 2, name='g_h12')))
         g_h13 = tf.nn.relu(self.g_bns[12](conv2d(g_h12, 128, 3, 3, 1, 1, name='g_h13')))
         g_h14 = tf.nn.relu(self.g_bns[13](conv2d_transpose(
             g_h13, [self.batch_size, self.image_size, self.image_size, 64], 4, 4, 2, 2, name='g_h14')))
         g_h15 = tf.nn.relu(self.g_bns[14](conv2d(g_h14, 32, 3, 3, 1, 1, name='g_h15')))
         g_h16 = tf.nn.sigmoid(conv2d(g_h15, 3, 3, 3, 1, 1, name='g_h16'))
         return g_h16
Пример #7
0
 def generator(self, images):
     with tf.variable_scope("generator"):
         g_h0 = tf.nn.relu(conv2d(images, 16, name='g_encode_0'))
         g_h1 = tf.nn.relu(conv2d(g_h0, 32, name='g_encode_1'))
         g_h2 = tf.nn.relu(conv2d(g_h1, 64, name='g_encode_2'))
         g_flat = tf.reshape(g_h2, [self.batch_size, -1])
         g_encode = linear(g_flat, 128, 'g_encode')
         g_decode = linear(g_encode, 512 * 4 * 4, 'g_h0')
         g_h3 = tf.nn.relu(
             tf.reshape(g_decode, [self.batch_size, 4, 4, 512]))
         g_h4 = tf.nn.relu(
             conv2d_transpose(g_h3, [self.batch_size, 8, 8, 256],
                              name='g_h1'))
         g_h5 = tf.nn.relu(
             conv2d_transpose(g_h4, [self.batch_size, 16, 16, 128],
                              name='g_h2'))
         g_h6 = tf.nn.relu(
             conv2d_transpose(g_h5, [self.batch_size, 32, 32, 64],
                              name='g_h3'))
         g_h7 = conv2d_transpose(g_h6, [self.batch_size, 64, 64, 3],
                                 name='g_h4')
         return tf.nn.tanh(g_h7)
Пример #8
0
 def generator(self, images):
     with tf.variable_scope("generator"):
         g_h0 = tf.nn.relu(conv2d(images, 64, 5, 5, 1, 1, name='g_h0'))
         g_h1 = tf.nn.relu(self.g_bns[0](conv2d(g_h0, 128, 3, 3, 2, 2, name='g_h1')))
         g_h2 = tf.nn.relu(self.g_bns[1](conv2d(g_h1, 128, 3, 3, 1, 1, name='g_h2')))
         g_h3 = tf.nn.relu(self.g_bns[2](conv2d(g_h2, 256, 3, 3, 2, 2, name='g_h3')))
         g_h4 = tf.nn.relu(self.g_bns[3](conv2d(g_h3, 256, 3, 3, 1, 1, name='g_h4')))
         g_h5 = tf.nn.relu(self.g_bns[4](conv2d(g_h4, 256, 3, 3, 1, 1, name='g_h5')))
         g_h6 = tf.nn.relu(self.g_bns[5](dilated_conv2d(g_h5, 256, 3, 3, 2, name='g_h6')))
         g_h7 = tf.nn.relu(self.g_bns[6](dilated_conv2d(g_h6, 256, 3, 3, 4, name='g_h7')))
         g_h8 = tf.nn.relu(self.g_bns[7](dilated_conv2d(g_h7, 256, 3, 3, 8, name='g_h8')))
         g_h9 = tf.nn.relu(self.g_bns[8](dilated_conv2d(g_h8, 256, 3, 3, 16, name='g_h9')))
         g_h10 = tf.nn.relu(self.g_bns[9](conv2d(g_h9, 256, 3, 3, 1, 1, name='g_h10')))
         g_h11 = tf.nn.relu(self.g_bns[10](conv2d(g_h10, 256, 3, 3, 1, 1, name='g_h11')))
         g_h12 = tf.nn.relu(self.g_bns[11](conv2d_transpose(
             g_h11, [self.batch_size, int(self.image_size/2), int(self.image_size/2), 128], 4, 4, 2, 2, name='g_h12')))
         g_h13 = tf.nn.relu(self.g_bns[12](conv2d(g_h12, 128, 3, 3, 1, 1, name='g_h13')))
         g_h14 = tf.nn.relu(self.g_bns[13](conv2d_transpose(
             g_h13, [self.batch_size, self.image_size, self.image_size, 64], 4, 4, 2, 2, name='g_h14')))
         g_h15 = tf.nn.relu(self.g_bns[14](conv2d(g_h14, 32, 3, 3, 1, 1, name='g_h15')))
         g_h16 = tf.nn.sigmoid(conv2d(g_h15, 3, 3, 3, 1, 1, name='g_h16'))
         return g_h16
Пример #9
0
def decode(fm, training):
    fm = tf.reshape(fm, [-1, 2, 2, 256])
    network = conv2d_transpose('dcnv1', fm, 128, training, bn=False)
    network = conv2d_transpose('dcnv2', network, 64, training, bn=False)
    network = conv2d_transpose('dcnv3', network, 32, training, bn=False)
    network = conv2d_transpose('dcnv4', network, 16, training, bn=False)
    network = conv2d_transpose('dcnv5', network, 8, training, bn=False)
    network = conv2d_transpose('dcnv6', network, 4, training, bn=False)
    network = conv2d_transpose('dcnv7',
                               network,
                               3,
                               training,
                               activation_fn=tf.nn.sigmoid,
                               bn=False)

    return network
Пример #10
0
def inference1(img_l_batch,
               img_l_gra_batch,
               theme_ab_batch,
               theme_mask_batch,
               local_ab_batch,
               local_mask_batch,
               is_training=True,
               scope_name='UserGuide'):
    """
    :param img_l_batch: l channel of input image
    :param img_l_gra_batch: sobel edge map of l channel of input image
    :param theme_ab_batch: ab channel of input color theme
    :param theme_mask_batch: theme mask
    :param local_ab_batch: ab channel of input local points
    :param local_mask_batch: local points mask
    :param is_training: bool, indicate usage of model (training or testing)
    :param scope_name: model name
    :return: ab channel of output image
    """
    assert img_l_batch.get_shape()[-1] == 1
    assert img_l_gra_batch.get_shape(
    )[-1] == 1  # horizontal and vertical direction
    assert theme_ab_batch.get_shape()[-1] == 2
    assert theme_mask_batch.get_shape()[-1] == 1
    assert local_ab_batch.get_shape()[-1] == 2
    assert local_mask_batch.get_shape()[-1] == 1

    ngf = 64
    theme_batch = tf.concat([theme_ab_batch, theme_mask_batch], axis=3)
    local_batch = tf.concat([local_ab_batch, local_mask_batch], axis=3)
    print('Image  Inputs:', img_l_batch)
    print('Theme  Inputs:', theme_batch)
    print('Points Inputs:', local_batch)
    print()

    with tf.variable_scope(scope_name, reuse=tf.AUTO_REUSE):
        theme_batch = tf.reshape(theme_batch,
                                 [img_l_batch.get_shape()[0], 1, 1, -1])
        glob_conv1 = ops.conv2d(theme_batch,
                                ngf * 8,
                                1,
                                1,
                                activation_fn=tf.nn.relu,
                                norm_fn=tf.layers.batch_normalization,
                                is_training=is_training,
                                scope_name='glob_conv1')
        glob_conv2 = ops.conv2d(glob_conv1,
                                ngf * 8,
                                1,
                                1,
                                activation_fn=tf.nn.relu,
                                norm_fn=tf.layers.batch_normalization,
                                is_training=is_training,
                                scope_name='glob_conv2')
        glob_conv3 = ops.conv2d(glob_conv2,
                                ngf * 8,
                                1,
                                1,
                                activation_fn=tf.nn.relu,
                                norm_fn=tf.layers.batch_normalization,
                                is_training=is_training,
                                scope_name='glob_conv3')
        glob_conv4 = ops.conv2d(glob_conv3,
                                ngf * 8,
                                1,
                                1,
                                activation_fn=tf.nn.relu,
                                norm_fn=tf.layers.batch_normalization,
                                is_training=is_training,
                                scope_name='glob_conv4')
        print('ThemeBlock', glob_conv4)

        ab_conv1_1 = ops.conv2d(local_batch,
                                ngf,
                                3,
                                1,
                                activation_fn=tf.nn.relu,
                                norm_fn=None,
                                is_training=is_training,
                                scope_name='ab_conv1_1')
        bw_conv1_1 = ops.conv2d(img_l_batch,
                                ngf,
                                3,
                                1,
                                activation_fn=tf.nn.relu,
                                norm_fn=None,
                                is_training=is_training,
                                scope_name='bw_conv1_1')
        gra_conv1_1 = ops.conv2d(img_l_gra_batch,
                                 ngf,
                                 3,
                                 1,
                                 activation_fn=tf.nn.relu,
                                 norm_fn=None,
                                 is_training=is_training,
                                 scope_name='gra_conv1_1')
        print('LocalBlock', gra_conv1_1)

        conv1_1 = ab_conv1_1 + bw_conv1_1 + gra_conv1_1  # TODO: Merge Local Points and Gradient Maps
        conv1_1 = ops.conv2d(conv1_1,
                             ngf,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv1_1')
        conv1_2 = ops.conv2d(conv1_1,
                             ngf,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=tf.layers.batch_normalization,
                             is_training=is_training,
                             scope_name='conv1_2')
        conv1_2_ss = ops.depth_wise_conv2d(conv1_2,
                                           1,
                                           1,
                                           2,
                                           activation_fn=None,
                                           scope_name='conv1_2_ss')
        print('ConvBlock 1', conv1_2_ss)

        conv2_1 = ops.conv2d(conv1_2_ss,
                             ngf * 2,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv2_1')
        conv2_2 = ops.conv2d(conv2_1,
                             ngf * 2,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=tf.layers.batch_normalization,
                             is_training=is_training,
                             scope_name='conv2_2')
        conv2_2_ss = ops.depth_wise_conv2d(conv2_2,
                                           1,
                                           1,
                                           2,
                                           activation_fn=None,
                                           scope_name='conv2_2_ss')
        print('ConvBlock 2', conv2_2_ss)

        conv3_1 = ops.conv2d(conv2_2_ss,
                             ngf * 4,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv3_1')
        conv3_2 = ops.conv2d(conv3_1,
                             ngf * 4,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv3_2')
        conv3_3 = ops.conv2d(conv3_2,
                             ngf * 4,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=tf.layers.batch_normalization,
                             is_training=is_training,
                             scope_name='conv3_3')
        conv3_3_ss = ops.depth_wise_conv2d(conv3_3,
                                           1,
                                           1,
                                           2,
                                           activation_fn=None,
                                           scope_name='conv3_3_ss')
        print('ConvBlock 3', conv3_3_ss)

        conv4_1 = ops.conv2d(conv3_3_ss,
                             ngf * 8,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv4_1')
        conv4_2 = ops.conv2d(conv4_1,
                             ngf * 8,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv4_2')
        conv4_3 = ops.conv2d(conv4_2,
                             ngf * 8,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=tf.layers.batch_normalization,
                             is_training=is_training,
                             scope_name='conv4_3')
        print('ConvBlock 4', conv4_3)

        conv4_3 = conv4_3 + glob_conv4  # TODO: Merge Color Theme
        conv5_1 = ops.conv2d(conv4_3,
                             ngf * 8,
                             3,
                             1,
                             dilation=2,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv5_1')
        conv5_2 = ops.conv2d(conv5_1,
                             ngf * 8,
                             3,
                             1,
                             dilation=2,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv5_2')
        conv5_3 = ops.conv2d(conv5_2,
                             ngf * 8,
                             3,
                             1,
                             dilation=2,
                             activation_fn=tf.nn.relu,
                             norm_fn=tf.layers.batch_normalization,
                             is_training=is_training,
                             scope_name='conv5_3')
        print('ConvBlock 5', conv5_3)

        conv6_1 = ops.conv2d(conv5_3,
                             ngf * 8,
                             3,
                             1,
                             dilation=2,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv6_1')
        conv6_2 = ops.conv2d(conv6_1,
                             ngf * 8,
                             3,
                             1,
                             dilation=2,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv6_2')
        conv6_3 = ops.conv2d(conv6_2,
                             ngf * 8,
                             3,
                             1,
                             dilation=2,
                             activation_fn=tf.nn.relu,
                             norm_fn=tf.layers.batch_normalization,
                             is_training=is_training,
                             scope_name='conv6_3')
        print('ConvBlock 6', conv6_3)

        conv7_1 = ops.conv2d(conv6_3,
                             ngf * 8,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv7_1')
        conv7_2 = ops.conv2d(conv7_1,
                             ngf * 8,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv7_2')
        conv7_3 = ops.conv2d(conv7_2,
                             ngf * 8,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=tf.layers.batch_normalization,
                             is_training=is_training,
                             scope_name='conv7_3')
        print('ConvBlock 7', conv7_3)

        conv3_3_short = ops.conv2d(conv3_3,
                                   ngf * 4,
                                   3,
                                   1,
                                   activation_fn=None,
                                   is_training=is_training,
                                   scope_name='conv3_3_short')
        conv8_1 = ops.conv2d_transpose(conv7_3,
                                       ngf * 4,
                                       4,
                                       2,
                                       activation_fn=None,
                                       is_training=is_training,
                                       scope_name='conv8_1')
        conv8_1_comb = tf.nn.relu(conv3_3_short + conv8_1)
        conv8_2 = ops.conv2d(conv8_1_comb,
                             ngf * 4,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=None,
                             is_training=is_training,
                             scope_name='conv8_2')
        conv8_3 = ops.conv2d(conv8_2,
                             ngf * 4,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=tf.layers.batch_normalization,
                             is_training=is_training,
                             scope_name='conv8_3')
        print('ConvBlock 8', conv8_3)

        conv2_2_short = ops.conv2d(conv2_2,
                                   ngf * 2,
                                   3,
                                   1,
                                   activation_fn=None,
                                   is_training=is_training,
                                   scope_name='conv2_2_short')
        conv9_1 = ops.conv2d_transpose(conv8_3,
                                       ngf * 2,
                                       4,
                                       2,
                                       activation_fn=None,
                                       is_training=is_training,
                                       scope_name='conv9_1')
        conv9_1_comb = tf.nn.relu(conv2_2_short + conv9_1)
        conv9_2 = ops.conv2d(conv9_1_comb,
                             ngf * 2,
                             3,
                             1,
                             activation_fn=tf.nn.relu,
                             norm_fn=tf.layers.batch_normalization,
                             is_training=is_training,
                             scope_name='conv9_2')
        print('ConvBlock 9', conv9_2)

        conv1_2_short = ops.conv2d(conv1_2,
                                   ngf * 2,
                                   3,
                                   1,
                                   activation_fn=None,
                                   is_training=is_training,
                                   scope_name='conv1_2_short')
        conv10_1 = ops.conv2d_transpose(conv9_2,
                                        ngf * 2,
                                        4,
                                        2,
                                        activation_fn=None,
                                        is_training=is_training,
                                        scope_name='conv10_1')
        conv10_1_comb = tf.nn.relu(conv1_2_short + conv10_1)
        conv10_2 = ops.conv2d(conv10_1_comb,
                              ngf * 2,
                              3,
                              1,
                              activation_fn=tf.nn.relu,
                              norm_fn=tf.layers.batch_normalization,
                              is_training=is_training,
                              scope_name='conv10_2')
        print('ConvBlock 10', conv10_2)

        conv10_ab = ops.conv2d(conv10_2,
                               2,
                               1,
                               1,
                               activation_fn=tf.nn.tanh,
                               norm_fn=None,
                               is_training=is_training,
                               scope_name='conv10_ab')
        print('OutputBlock', conv10_ab, end='\n\n')

    return conv10_ab
def generator(z, labels):
    with tf.variable_scope("generator") as scope:
        # concat z and labels
        z_labels = tf.concat([z, labels], 1)
        # project z and reshape
        oh, ow = FLAGS.output_height, FLAGS.output_width

        z_labels_ = ops.fc(z_labels, 1024 * oh / 32 * ow / 32, scope="project")
        z_labels_ = tf.reshape(z_labels_, [-1, oh / 32, ow / 32, 1024])

        # batch norm
        norm0 = ops.batch_norm(z_labels_,
                               scope="batch_norm0",
                               is_training=True)

        # ReLU
        h0 = tf.nn.relu(norm0)

        # conv1
        conv1 = ops.conv2d_transpose(h0,
                                     [FLAGS.batch_size, oh / 16, ow / 16, 512],
                                     scope="conv_tranpose1")

        # batch norm
        norm1 = ops.batch_norm(conv1,
                               scope="batch_norm1",
                               is_training=FLAGS.is_train)

        # ReLU
        h1 = tf.nn.relu(norm1)

        # conv2
        conv2 = ops.conv2d_transpose(h1,
                                     [FLAGS.batch_size, oh / 8, ow / 8, 256],
                                     scope="conv_tranpose2")

        # batch norm
        norm2 = ops.batch_norm(conv2,
                               scope="batch_norm2",
                               is_training=FLAGS.is_train)

        # ReLU
        h2 = tf.nn.relu(norm2)

        # conv3
        conv3 = ops.conv2d_transpose(h2,
                                     [FLAGS.batch_size, oh / 4, ow / 4, 128],
                                     scope="conv_tranpose3")

        # batch norm
        norm3 = ops.batch_norm(conv3,
                               scope="batch_norm3",
                               is_training=FLAGS.is_train)

        # ReLU
        h3 = tf.nn.relu(norm3)

        # conv4
        conv4 = ops.conv2d_transpose(h3,
                                     [FLAGS.batch_size, oh / 2, ow / 2, 64],
                                     scope="conv_tranpose4")
        # batch norm
        norm4 = ops.batch_norm(conv4,
                               scope="batch_norm4",
                               is_training=FLAGS.is_train)

        # ReLU
        h4 = tf.nn.relu(norm4)

        conv5 = ops.conv2d_transpose(
            h4, [FLAGS.batch_size, oh, ow, FLAGS.input_channels],
            scope="conv_tranpose5")
        # tanh
        h5 = tf.nn.tanh(conv5)

        h5 = tf.map_fn(
            lambda i: tf.image.resize_images(
                i, [FLAGS.input_height, FLAGS.input_width]), h5)

    return h5
Пример #12
0
      '''
      delta = numpy.add.reduce(weighted, 3)
      image = partial_add(image, delta, [i, j])
  if padding > 0:
    return image[padding:-padding,padding:-padding]
  else:
    return image

# value [-1, 14, 14, 64]
# fil   [5, 5, 32, 64]
p = np.array([1.0, 2.0, 3.0])
arr = np.array([[p,p],[p,p]])
a_value = np.array([arr, arr, arr, arr])
l = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a_fil = np.array([[l]])
value = tf.constant(a_value, dtype=tf.float32)
fil   = tf.constant(a_fil, dtype=tf.float32)

#value = np.array([[[[1]],[[2]]],[[[3]],[[4]]]])
#fil = np.array([[[[2]]]])
#result = dconv(value, fil, padding=0)

result = dconv(a_value, a_fil, padding=0)  # nn_ops.deconv2d(value, fil, [-1,2,2,2], [1,1,1,1])
print(result)

#result = ops.partial_add(tf.constant([[1,1,1],[1,1,1],[1,1,1]],dtype=tf.float32), tf.constant([[1,1],[1,1]], dtype=tf.float32), [0,0])
result = ops.conv2d_transpose(value, fil, padding=0)

print(result)
print(result.eval())
Пример #13
0
    def generator(self, images):
        """
        This is the completion network
        :param images: 4D-tensor, shape is [batch, height, width, channels]
        :return: 4D-tensor, shape is [batch, height, width, channels], completed images tensor
        """
        with tf.variable_scope("generator"):
            g_h0 = tf.nn.relu(conv2d(images, 64, 5, 5, 1, 1, name='g_h0'))

            g_h1 = tf.nn.relu(self.g_bns[0](conv2d(g_h0,
                                                   128,
                                                   3,
                                                   3,
                                                   2,
                                                   2,
                                                   name='g_h1')))
            g_h2 = tf.nn.relu(self.g_bns[1](conv2d(g_h1,
                                                   128,
                                                   3,
                                                   3,
                                                   1,
                                                   1,
                                                   name='g_h2')))

            g_h3 = tf.nn.relu(self.g_bns[2](conv2d(g_h2,
                                                   256,
                                                   3,
                                                   3,
                                                   2,
                                                   2,
                                                   name='g_h3')))
            g_h4 = tf.nn.relu(self.g_bns[3](conv2d(g_h3,
                                                   256,
                                                   3,
                                                   3,
                                                   1,
                                                   1,
                                                   name='g_h4')))
            g_h5 = tf.nn.relu(self.g_bns[4](conv2d(g_h4,
                                                   256,
                                                   3,
                                                   3,
                                                   1,
                                                   1,
                                                   name='g_h5')))
            g_h6 = tf.nn.relu(self.g_bns[5](dilated_conv2d(g_h5,
                                                           256,
                                                           3,
                                                           3,
                                                           2,
                                                           name='g_h6')))
            g_h7 = tf.nn.relu(self.g_bns[6](dilated_conv2d(g_h6,
                                                           256,
                                                           3,
                                                           3,
                                                           4,
                                                           name='g_h7')))
            g_h8 = tf.nn.relu(self.g_bns[7](dilated_conv2d(g_h7,
                                                           256,
                                                           3,
                                                           3,
                                                           8,
                                                           name='g_h8')))
            g_h9 = tf.nn.relu(self.g_bns[8](dilated_conv2d(g_h8,
                                                           256,
                                                           3,
                                                           3,
                                                           16,
                                                           name='g_h9')))
            g_h10 = tf.nn.relu(self.g_bns[9](conv2d(g_h9,
                                                    256,
                                                    3,
                                                    3,
                                                    1,
                                                    1,
                                                    name='g_h10')))
            g_h11 = tf.nn.relu(self.g_bns[10](conv2d(g_h10,
                                                     256,
                                                     3,
                                                     3,
                                                     1,
                                                     1,
                                                     name='g_h11')))

            g_h12 = tf.nn.relu(self.g_bns[11](conv2d_transpose(g_h11, [
                self.batch_size,
                int(self.image_size / 2),
                int(self.image_size / 2), 128
            ],
                                                               4,
                                                               4,
                                                               2,
                                                               2,
                                                               name='g_h12')))
            g_h13 = tf.nn.relu(self.g_bns[12](conv2d(g_h12,
                                                     128,
                                                     3,
                                                     3,
                                                     1,
                                                     1,
                                                     name='g_h13')))

            g_h14 = tf.nn.relu(self.g_bns[13](conv2d_transpose(
                g_h13, [self.batch_size, self.image_size, self.image_size, 64],
                4,
                4,
                2,
                2,
                name='g_h14')))
            g_h15 = tf.nn.relu(self.g_bns[14](conv2d(g_h14,
                                                     32,
                                                     3,
                                                     3,
                                                     1,
                                                     1,
                                                     name='g_h15')))
            g_h16 = tf.nn.sigmoid(conv2d(g_h15, 3, 3, 3, 1, 1, name='g_h16'))
            return g_h16
Пример #14
0
    def create(self,
               inputs,
               kernel_size=None,
               seed=None,
               reuse_variables=None):
        output = inputs

        with tf.variable_scope(self.name, reuse=reuse_variables):

            layers = []

            # encoder branch
            for index, kernel in enumerate(self.encoder_kernels):

                name = 'conv' + str(index)
                output = conv2d(inputs=output,
                                name=name,
                                kernel_size=kernel_size,
                                filters=kernel[0],
                                strides=kernel[1],
                                activation=tf.nn.leaky_relu,
                                seed=seed)

                # save contracting path layers to be used for skip connections
                layers.append(output)

                if kernel[2] > 0:
                    keep_prob = 1.0 - kernel[2] if self.training else 1.0
                    output = tf.nn.dropout(output,
                                           keep_prob=keep_prob,
                                           name='dropout_' + name,
                                           seed=seed)

            # decoder branch
            for index, kernel in enumerate(self.decoder_kernels):

                name = 'deconv' + str(index)
                output = conv2d_transpose(inputs=output,
                                          name=name,
                                          kernel_size=kernel_size,
                                          filters=kernel[0],
                                          strides=kernel[1],
                                          activation=tf.nn.relu,
                                          seed=seed)

                if kernel[2] > 0:
                    keep_prob = 1.0 - kernel[2] if self.training else 1.0
                    output = tf.nn.dropout(output,
                                           keep_prob=keep_prob,
                                           name='dropout_' + name,
                                           seed=seed)

                # concat the layer from the contracting path with the output of the current layer
                # concat only the channels (axis=3)
                #将紧缩路径中的图层与当前图层的输出连接
                #仅连接通道(轴= 3)
                output = tf.concat([layers[len(layers) - index - 2], output],
                                   axis=3)

            output = conv2d(
                inputs=output,
                name='conv_last',
                filters=self.output_channels,  # number of output chanels
                kernel_size=1,  # last layer kernel size = 1
                strides=1,  # last layer stride = 1
                bnorm=False,  # do not use batch-norm for the last layer
                activation=tf.nn.
                tanh,  # tanh activation function for the output
                seed=seed)

            self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                              self.name)

            return output
Пример #15
0
def generator(z, labels):
    with tf.variable_scope("generator") as scope:
        # labels to one_hot
        labels_one_hot = tf.one_hot(labels, FLAGS.n_classes)

        # concat z and labels
        #z_labels = tf.concat(1, [z, labels_one_hot])
        z_labels = tf.concat( [z, labels_one_hot], 1)

        # project z and reshape
        oh, ow = FLAGS.output_height, FLAGS.output_width

        z_labels_ = ops.fc(z_labels, 512 * oh / 16 * ow / 16, scope="project")
        z_labels_ = tf.reshape(z_labels_, [-1, oh / 16, ow / 16, 512])

        # batch norm
        norm0 = ops.batch_norm(
            z_labels_, scope="batch_norm0", is_training=True)

        # ReLU
        h0 = tf.nn.relu(norm0)

        # conv1
        conv1 = ops.conv2d_transpose(
            h0, [FLAGS.batch_size, oh / 8, ow / 8, 256],
            scope="conv_tranpose1")

        # batch norm
        norm1 = ops.batch_norm(conv1, scope="batch_norm1", is_training=True)

        # ReLU
        h1 = tf.nn.relu(norm1)

        # conv2
        conv2 = ops.conv2d_transpose(
            h1, [FLAGS.batch_size, oh / 4, ow / 4, 128],
            scope="conv_tranpose2")

        # batch norm
        norm2 = ops.batch_norm(conv2, scope="batch_norm2", is_training=True)

        # ReLU
        h2 = tf.nn.relu(norm2)

        # conv3
        conv3 = ops.conv2d_transpose(
            h2, [FLAGS.batch_size, oh / 2, ow / 2, 64], scope="conv_tranpose3")

        # batch norm
        norm3 = ops.batch_norm(conv3, scope="batch_norm3", is_training=True)

        # ReLU
        h3 = tf.nn.relu(norm3)

        # conv4
        conv4 = ops.conv2d_transpose(
            h3, [FLAGS.batch_size, oh, ow, FLAGS.input_channels],
            scope="conv_tranpose4")

        # tanh
        h4 = tf.nn.tanh(conv4)

    return h4