コード例 #1
0
def discriminator_base(inputs):
    with tf.name_scope('discriminator_base'):
        #net = layers.batch_norm(inputs, training, name='bn1')
        net = layers.conv2d_layer(1,
                                  inputs, [5, 5, 16],
                                  lambda x: layers.lrelu(x, 0.2),
                                  stride=2)
        #net = layers.batch_norm(net, training, name='bn2')
        net = layers.conv2d_layer(2,
                                  net, [5, 5, 32],
                                  lambda x: layers.lrelu(x, 0.2),
                                  stride=2)
        #net = layers.batch_norm(net, training, name='bn3')
        net = layers.conv2d_layer(3,
                                  net, [5, 5, 64],
                                  lambda x: layers.lrelu(x, 0.2),
                                  stride=2)
        #net = layers.batch_norm(net, training, name='bn4')
        net = layers.conv2d_layer(4,
                                  net, [5, 5, 128],
                                  lambda x: layers.lrelu(x, 0.2),
                                  stride=2)
        net = layers.max_pool2d(net, [2, 2])
        #net = layers.batch_norm(net, training, name='bn5')

        return net
コード例 #2
0
ファイル: model.py プロジェクト: Crispinli/learncyclegan
def residual(inputres, dim, name="resnet"):
    with tf.variable_scope(name):
        out_res = tf.pad(inputres, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
        _, out_res = conv2d(out_res,
                            dim,
                            3,
                            3,
                            1,
                            1,
                            0.02,
                            "VALID",
                            "c1",
                            relufactor=0.2)
        out_res = tf.pad(out_res, [[0, 0], [1, 1], [1, 1], [0, 0]], "REFLECT")
        _, out_res = conv2d(out_res,
                            dim,
                            3,
                            3,
                            1,
                            1,
                            0.02,
                            "VALID",
                            "c2",
                            do_relu=False)

        return lrelu(out_res + inputres)
コード例 #3
0
 def activate(self, param):
     inp = self.result
     with tf.name_scope('activation_' + str(self.layernum)):
         if param == 0:
             res = L.relu(inp, name='relu_' + str(self.layernum))
         elif param == 1:
             res = L.lrelu(inp, name='lrelu_' + str(self.layernum))
         elif param == 2:
             res = L.elu(inp, name='elu_' + str(self.layernum))
         elif param == 3:
             res = L.tanh(inp, name='tanh_' + str(self.layernum))
         elif param == 4:
             self.inpsize[-1] = self.inpsize[-1] // 2
             res = L.MFM(inp,
                         self.inpsize[-1],
                         name='mfm_' + str(self.layernum))
         elif param == 5:
             self.inpsize[-1] = self.inpsize[-1] // 2
             res = L.MFMfc(inp,
                           self.inpsize[-1],
                           name='mfm_' + str(self.layernum))
         elif param == 6:
             res = L.sigmoid(inp, name='sigmoid_' + str(self.layernum))
         else:
             res = inp
     self.result = res
     return self.result
コード例 #4
0
    def __call__(self, x, reuse=False, output_name=None):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            # Initial dense multiplication
            x = layers.linear(x, "G_FC1", 512 * 8 * 8)

            batch_size = tf.shape(x)[0]
            if FLAGS.data_format == "NHWC":
                target_shape = (batch_size, 8, 8, 512)
            elif FLAGS.data_format == "NCHW":
                target_shape = (batch_size, 512, 8, 8)

            x = layers.reshape(x, target_shape)
            x = tf.contrib.layers.batch_norm(x, fused=True, data_format=FLAGS.data_format)
            x = layers.lrelu(x)

            x = layers.G_conv2d_block(x, "G_conv2D1", 256, 3, data_format=FLAGS.data_format, bn=True)
            x = layers.upsampleNN(x, "G_up1", 2, data_format=FLAGS.data_format)

            x = layers.G_conv2d_block(x, "G_conv2D2", 128, 3, data_format=FLAGS.data_format, bn=True)
            x = layers.upsampleNN(x, "G_up2", 2, data_format=FLAGS.data_format)

            x = layers.G_conv2d_block(x, "G_conv2D3", 64, 3, data_format=FLAGS.data_format, bn=True)
            x = layers.upsampleNN(x, "G_up3", 2, data_format=FLAGS.data_format)

            # Last conv
            x = layers.conv2d(x, "G_conv2D4", 64, FLAGS.channels, 3, 1, "SAME", data_format=FLAGS.data_format)

            x = tf.nn.tanh(x, name=output_name)

            return x
コード例 #5
0
def logit(h, is_training=True, update_batch_stats=True, stochastic=True, seed=1234, dropout_mask=None, return_mask=False, h_before_dropout=None):
    rng = np.random.RandomState(seed)
    if h_before_dropout is None:
        h = L.conv(h, ksize=3, stride=1, f_in=3, f_out=128, seed=rng.randint(123456), name='c1')
        h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b1'), FLAGS.lrelu_a)
        h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=128, seed=rng.randint(123456), name='c2')
        h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b2'), FLAGS.lrelu_a)
        h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=128, seed=rng.randint(123456), name='c3')
        h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b3'), FLAGS.lrelu_a)

        h = L.max_pool(h, ksize=2, stride=2)
        if stochastic:
            h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden)

        h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=256, seed=rng.randint(123456), name='c4')
        h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b4'), FLAGS.lrelu_a)
        h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=256, seed=rng.randint(123456), name='c5')
        h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b5'), FLAGS.lrelu_a)
        h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=256, seed=rng.randint(123456), name='c6')
        h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b6'), FLAGS.lrelu_a)

        h_before_dropout = L.max_pool(h, ksize=2, stride=2)

    # Making it possible to change or return a dropout mask
    if stochastic:
        if dropout_mask is None:
            dropout_mask = tf.cast(
                tf.greater_equal(tf.random_uniform(tf.shape(h_before_dropout), 0, 1, seed=rng.randint(123456)), 1.0 - FLAGS.keep_prob_hidden),
                tf.float32)
        else:
            dropout_mask = tf.reshape(dropout_mask, tf.shape(h_before_dropout))
        h = tf.multiply(h_before_dropout, dropout_mask)
        h = (1.0 / FLAGS.keep_prob_hidden) * h
    else:
        h = h_before_dropout
    h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=512, seed=rng.randint(123456), padding="VALID", name='c7')
    h = L.lrelu(L.bn(h, 512, is_training=is_training, update_batch_stats=update_batch_stats, name='b7'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=512, f_out=256, seed=rng.randint(123456), name='c8')
    h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b8'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=256, f_out=128, seed=rng.randint(123456), name='c9')
    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b9'), FLAGS.lrelu_a)

    h = tf.reduce_mean(h, reduction_indices=[1, 2])  # Global average pooling
    h = L.fc(h, 128, 10, seed=rng.randint(123456), name='fc')

    if FLAGS.top_bn:
        h = L.bn(h, 10, is_training=is_training,
                 update_batch_stats=update_batch_stats, name='bfc')
    if return_mask:
        return h, tf.reshape(dropout_mask, [-1, 8*8*256]), h_before_dropout
    else:
        return h
コード例 #6
0
def logit_small(x,
                num_classes,
                is_training=True,
                update_batch_stats=True,
                stochastic=True,
                seed=1234):

    if is_training:
        scope = tf.name_scope("Training")

    else:
        scope = tf.name_scope("Testing")

    with scope:
        h = x

        rng = np.random.RandomState(seed)

        h = L.fc(h,
                 dim_in=x.shape[1],
                 dim_out=64,
                 seed=rng.randint(123456),
                 name="fc1")
        h = L.lrelu(
            L.bn(h,
                 64,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='fc1_normalized'), FLAGS.lrelu_a)
        h = L.fc(h,
                 dim_in=64,
                 dim_out=64,
                 seed=rng.randint(123456),
                 name="fc2")
        h = L.lrelu(
            L.bn(h,
                 64,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='fc2_normalized'), FLAGS.lrelu_a)
        h = L.fc(h,
                 dim_in=64,
                 dim_out=num_classes,
                 seed=rng.randint(123456),
                 name="fc3")
        return h
コード例 #7
0
def logit(x, dropout_mask=None, is_training=True, update_batch_stats=True, stochastic=True, seed=1234):

    rng = numpy.random.RandomState(seed)
    
    h = L.gl(x, std=FLAGS.sigma)
    h = L.conv(h, ksize=3, stride=1, f_in=3, f_out=layer_sizes[0], seed=rng.randint(123456), name='c1')
    h = L.lrelu(bn(h, layer_sizes[0], is_training=is_training, update_batch_stats=update_batch_stats, name='b1'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[0], f_out=layer_sizes[0], seed=rng.randint(123456), name='c2')
    h = L.lrelu(bn(h, layer_sizes[0], is_training=is_training, update_batch_stats=update_batch_stats, name='b2'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[0], f_out=layer_sizes[0], seed=rng.randint(123456), name='c3')
    h = L.lrelu(bn(h, layer_sizes[0], is_training=is_training, update_batch_stats=update_batch_stats, name='b3'), FLAGS.lrelu_a)

    h = L.max_pool(h, ksize=2, stride=2)
    
    h = tf.nn.dropout(h, keep_prob=0.5, seed=rng.randint(123456)) if stochastic else h
    
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[0], f_out=layer_sizes[1], seed=rng.randint(123456), name='c4')
    h = L.lrelu(bn(h, layer_sizes[1], is_training=is_training, update_batch_stats=update_batch_stats, name='b4'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[1], f_out=layer_sizes[1], seed=rng.randint(123456), name='c5')
    h = L.lrelu(bn(h, layer_sizes[1], is_training=is_training, update_batch_stats=update_batch_stats, name='b5'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[1], f_out=layer_sizes[1], seed=rng.randint(123456), name='c6')
    h = L.lrelu(bn(h, layer_sizes[1], is_training=is_training, update_batch_stats=update_batch_stats, name='b6'), FLAGS.lrelu_a)

    h = L.max_pool(h, ksize=2, stride=2)
    
    h = tf.nn.dropout(h, keep_prob=0.5, seed=rng.randint(123456)) if stochastic else h
    
    h = L.conv(h, ksize=3, stride=1, f_in=layer_sizes[1], f_out=layer_sizes[2], seed=rng.randint(123456), padding="VALID", name='c7')
    h = L.lrelu(bn(h, layer_sizes[2], is_training=is_training, update_batch_stats=update_batch_stats, name='b7'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=layer_sizes[2], f_out=layer_sizes[3], seed=rng.randint(123456), name='c8')
    h = L.lrelu(bn(h, layer_sizes[3], is_training=is_training, update_batch_stats=update_batch_stats, name='b8'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=layer_sizes[3], f_out=layer_sizes[4], seed=rng.randint(123456), name='c9')
    h = L.lrelu(bn(h, layer_sizes[4], is_training=is_training, update_batch_stats=update_batch_stats, name='b9'), FLAGS.lrelu_a)

    h = tf.reduce_mean(h, reduction_indices=[1, 2])  # Global average pooling

    # dropout with mask
    if dropout_mask is None:
        # Base dropout mask is 1 (Fully Connected)
        dropout_mask = tf.ones_like(h)

    h = h*dropout_mask 

    h = L.fc(h, layer_sizes[4], 10, seed=rng.randint(123456), name='fc')

    if FLAGS.top_bn:
        h = bn(h, 10, is_training=is_training,
                 update_batch_stats=update_batch_stats, name='bfc')
    
    return h, dropout_mask
コード例 #8
0
def logit_moons(x, is_training=True, update_batch_stats=True, stochastic=True, seed=1234, dropout_mask=None):
    h = x
    rng = np.random.RandomState(seed)
    h = L.fc(h, dim_in=X_DIM, dim_out=64, seed=rng.randint(123456), name='fc1')
    h = L.lrelu(h, FLAGS.lrelu_a)
    h = L.fc(h, dim_in=64, dim_out=64, seed=rng.randint(123456), name='fc2')
    h = L.lrelu(h, FLAGS.lrelu_a)
    if stochastic:
        if dropout_mask == None:
            dropout_mask = tf.cast(
                tf.greater_equal(tf.random_uniform(tf.shape(h), 0, 1, seed=rng.randint(123456)), 1.0 - FLAGS.keep_prob_hidden),
                tf.float32)
        else:
            dropout_mask = tf.reshape(dropout_mask, tf.shape(h))
        h = tf.multiply(h, dropout_mask)
        h = (1.0 / FLAGS.keep_prob_hidden) * h

    h = L.fc(h, dim_in=64, dim_out=NUM_CLASSES, seed=rng.randint(123456), name='fc3')
    return h
コード例 #9
0
ファイル: GAN.py プロジェクト: rotkert/colorization_gan_first
    def discriminator(self, image, reuse=False, config=None):
        with tf.variable_scope("discriminator") as scope:
            if reuse:
                scope.reuse_variables()

            h0 = layers.lrelu(layers.conv(image, 3, 64, name='d_h0_conv'))
            h1 = layers.lrelu(
                batch_norm(layers.conv(h0, 64, 128, name='d_h1_conv'),
                           name='d_bn1'))
            h2 = layers.lrelu(
                batch_norm(layers.conv(h1, 128, 256, name='d_h2_conv'),
                           name='d_bn2'))
            h3 = layers.lrelu(
                batch_norm(layers.conv(h2, 256, 512, name='d_h3_conv'),
                           name='d_bn3'))

            h4 = linear(tf.reshape(h3, [config.batch_size, -1]),
                        524288,
                        64,
                        name="d_h4_lin")
            h5 = linear(h4, 64, 1, name="d_h5_lin")
            return h5
コード例 #10
0
ファイル: models.py プロジェクト: cvtower/basedMl
    def __call__(self, x, reuse=False):
        with tf.variable_scope(self.name) as scope:

            if reuse:
                scope.reuse_variables()

            # Initial dense multiplication
            x = layers.linear(x,
                              self.filters * self.start_dim * self.start_dim,
                              bias=True)

            # Reshape to image format
            if self.data_format == "NCHW":
                target_shape = (self.batch_size, self.filters, self.start_dim,
                                self.start_dim)
            else:
                target_shape = (self.batch_size, self.start_dim,
                                self.start_dim, self.filters)

            x = layers.reshape(x, target_shape)
            x = tf.contrib.layers.batch_norm(x, fused=True)
            x = layers.lrelu(x)

            # # Upsampling2D + conv blocks
            for idx, (f, k, s, p) in enumerate(
                    zip(self.list_filters, self.list_kernel_size,
                        self.list_strides, self.list_padding)):
                name = "upsample2D_%s" % idx
                if idx == len(self.list_filters) - 1:
                    bn = False
                    bias = False
                    activation_fn = None
                else:
                    bias = True
                    bn = True
                    activation_fn = layers.lrelu
                x = layers.upsample2d_block(name,
                                            x,
                                            f,
                                            k,
                                            s,
                                            p,
                                            data_format=self.data_format,
                                            bias=bias,
                                            bn=bn,
                                            activation_fn=activation_fn)

            x = tf.nn.tanh(x, name="X_G")

            return x
コード例 #11
0
def logit(x, is_training=True, update_batch_stats=True, stochastic=True, seed=1234):

    h = x
    rng = numpy.random.RandomState(seed)
    print h


    h = L.conv(h, ksize=5, stride=1, f_in=1, f_out=32, padding='VALID', seed=rng.randint(123456), name='c1')
    h = L.max_pool(h, ksize=2, stride=2, padding='VALID')
    # h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b1'), FLAGS.lrelu_a)
    h = L.lrelu(h, FLAGS.lrelu_a)
    print h

    h = L.conv(h, ksize=5, stride=1, f_in=32, f_out=64, padding='VALID',seed=rng.randint(123456), name='c2')
    h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden, seed=rng.randint(123456)) if stochastic else h
    h = L.max_pool(h, ksize=2, stride=2, padding='VALID')
#    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b2'), FLAGS.lrelu_a)
    h = L.lrelu(h, FLAGS.lrelu_a)
    print h

#    h = tf.reduce_mean(h, reduction_indices=[1, 2])  # Global average pooling
    h = tf.layers.flatten(h)
    print h
    
    h = L.fc(h, 64*4*4, 512, seed=rng.randint(123456), name='fc1')
    h = L.lrelu(h, FLAGS.lrelu_a)
    print h

    h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden, seed=rng.randint(123456)) if stochastic else h
    h = L.fc(h, 512,  10, seed=rng.randint(123456), name='fc2')
    print h

#    if FLAGS.top_bn:
#        h = L.bn(h, 10, is_training=is_training,
#                 update_batch_stats=update_batch_stats, name='bfc')

    return h
コード例 #12
0
ファイル: discriminator.py プロジェクト: Pie31415/AnimeGen
    def __init__(self, conv_dim, num_classes):
        super(Discriminator, self).__init__()
        self.conv_dim = conv_dim

        self.res_1 = ResidualBlock_D(3, conv_dim)
        self.res_2 = ResidualBlock_D(conv_dim, conv_dim * 2)
        self.attn = SelfAttn(conv_dim * 2)
        self.res_3 = ResidualBlock_D(conv_dim * 2, conv_dim * 4)
        self.res_4 = ResidualBlock_D(conv_dim * 4, conv_dim * 8)
        self.res_5 = ResidualBlock_D(conv_dim * 8, conv_dim * 16)
        self.lrelu = lrelu(inplace=True)
        self.linear = spectral_norm(linear(conv_dim * 16, 1))
        self.embed = embedding(num_classes, conv_dim * 16)

        self.apply(init_weights)
コード例 #13
0
def logit(x, is_training=True, update_batch_stats=True, stochastic=True, seed=1234):
    h = x

    rng = numpy.random.RandomState(seed)

    h = L.conv(h, ksize=3, stride=1, f_in=3, f_out=128, seed=rng.randint(123456), name='c1')
    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b1'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=128, seed=rng.randint(123456), name='c2')
    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b2'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=128, seed=rng.randint(123456), name='c3')
    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b3'), FLAGS.lrelu_a)

    h = L.max_pool(h, ksize=2, stride=2)
    h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden, seed=rng.randint(123456)) if stochastic else h

    h = L.conv(h, ksize=3, stride=1, f_in=128, f_out=256, seed=rng.randint(123456), name='c4')
    h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b4'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=256, seed=rng.randint(123456), name='c5')
    h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b5'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=256, seed=rng.randint(123456), name='c6')
    h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b6'), FLAGS.lrelu_a)

    h = L.max_pool(h, ksize=2, stride=2)
    h = tf.nn.dropout(h, keep_prob=FLAGS.keep_prob_hidden, seed=rng.randint(123456)) if stochastic else h

    h = L.conv(h, ksize=3, stride=1, f_in=256, f_out=512, seed=rng.randint(123456), padding="VALID", name='c7')
    h = L.lrelu(L.bn(h, 512, is_training=is_training, update_batch_stats=update_batch_stats, name='b7'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=512, f_out=256, seed=rng.randint(123456), name='c8')
    h = L.lrelu(L.bn(h, 256, is_training=is_training, update_batch_stats=update_batch_stats, name='b8'), FLAGS.lrelu_a)
    h = L.conv(h, ksize=1, stride=1, f_in=256, f_out=128, seed=rng.randint(123456), name='c9')
    h = L.lrelu(L.bn(h, 128, is_training=is_training, update_batch_stats=update_batch_stats, name='b9'), FLAGS.lrelu_a)

    h1 = tf.reduce_mean(h, reduction_indices=[1, 2])  # Features to be aligned
    h = L.fc(h1, 128, 10, seed=rng.randint(123456), name='fc')

    if FLAGS.top_bn:
        h = L.bn(h, 10, is_training=is_training,
                 update_batch_stats=update_batch_stats, name='bfc')

    return h, h1
コード例 #14
0
    def __init__(self, z_dim, conv_dim, num_classes):
        super(Generator, self).__init__()
        self.conv_dim = conv_dim

        self.linear = spectral_norm(
            linear(in_features=z_dim, out_features=conv_dim * 16 * 4 * 4))
        self.res_1 = ResidualBlock_G(conv_dim * 16, conv_dim * 16, num_classes)
        self.res_2 = ResidualBlock_G(conv_dim * 16, conv_dim * 8, num_classes)
        self.res_3 = ResidualBlock_G(conv_dim * 8, conv_dim * 4, num_classes)
        self.attn = SelfAttn(conv_dim * 4)
        self.res_4 = ResidualBlock_G(conv_dim * 4, conv_dim * 2, num_classes)
        self.res_5 = ResidualBlock_G(conv_dim * 2, conv_dim, num_classes)
        self.bn = batch_norm(conv_dim, eps=1e-5, momentum=0.0001)
        self.lrelu = lrelu(inplace=True)
        self.conv3x3 = spectral_norm(conv3x3(conv_dim, 3))
        self.tanh = tanh()

        self.apply(init_weights)
コード例 #15
0
ファイル: model.py プロジェクト: xingxiaoxiong/interior
    def create_discriminator(self, room, design):
        n_layers = 4
        layers = []

        input = tf.concat([room, design], axis=3)
        layers.append(input)

        for i in range(n_layers):
            with tf.variable_scope("layer_%d" % len(layers)):
                out_channels = self.opt.ndf * 2**i
                convolved = tf.layers.conv2d(layers[-1], filters=out_channels, kernel_size=2, strides=2, padding='valid')
                normalized = tf.layers.batch_normalization(convolved)
                rectified = lrelu(normalized, 0.2)
                layers.append(rectified)

        with tf.variable_scope("layer_%d" % len(layers)):
            output = tf.layers.conv2d(layers[-1], filters=1, kernel_size=2, strides=1, padding='valid')
            output = tf.nn.sigmoid(output)
            layers.append(output)

        return layers[-1]
コード例 #16
0
def forward_before_adt(x,
                       is_training=True,
                       update_batch_stats=True,
                       stochastic=True,
                       seed=1234):
    h = x

    rng = numpy.random.RandomState(seed)

    h = L.gl(h, std=FLAGS.sigma)
    h = L.conv(h,
               ksize=3,
               stride=1,
               f_in=3,
               f_out=layer_sizes[0],
               seed=rng.randint(123456),
               name='c1')
    h = L.lrelu(
        bn(h,
           layer_sizes[0],
           is_training=is_training,
           update_batch_stats=update_batch_stats,
           name='b1'), FLAGS.lrelu_a)
    h = L.conv(h,
               ksize=3,
               stride=1,
               f_in=layer_sizes[0],
               f_out=layer_sizes[0],
               seed=rng.randint(123456),
               name='c2')
    h = L.lrelu(
        bn(h,
           layer_sizes[0],
           is_training=is_training,
           update_batch_stats=update_batch_stats,
           name='b2'), FLAGS.lrelu_a)
    h = L.conv(h,
               ksize=3,
               stride=1,
               f_in=layer_sizes[0],
               f_out=layer_sizes[0],
               seed=rng.randint(123456),
               name='c3')
    h = L.lrelu(
        bn(h,
           layer_sizes[0],
           is_training=is_training,
           update_batch_stats=update_batch_stats,
           name='b3'), FLAGS.lrelu_a)

    h = L.max_pool(h, ksize=2, stride=2)

    h = tf.nn.dropout(h, keep_prob=0.5,
                      seed=rng.randint(123456)) if stochastic else h

    h = L.conv(h,
               ksize=3,
               stride=1,
               f_in=layer_sizes[0],
               f_out=layer_sizes[1],
               seed=rng.randint(123456),
               name='c4')
    h = L.lrelu(
        bn(h,
           layer_sizes[1],
           is_training=is_training,
           update_batch_stats=update_batch_stats,
           name='b4'), FLAGS.lrelu_a)
    h = L.conv(h,
               ksize=3,
               stride=1,
               f_in=layer_sizes[1],
               f_out=layer_sizes[1],
               seed=rng.randint(123456),
               name='c5')
    h = L.lrelu(
        bn(h,
           layer_sizes[1],
           is_training=is_training,
           update_batch_stats=update_batch_stats,
           name='b5'), FLAGS.lrelu_a)
    h = L.conv(h,
               ksize=3,
               stride=1,
               f_in=layer_sizes[1],
               f_out=layer_sizes[1],
               seed=rng.randint(123456),
               name='c6')
    h = L.lrelu(
        bn(h,
           layer_sizes[1],
           is_training=is_training,
           update_batch_stats=update_batch_stats,
           name='b6'), FLAGS.lrelu_a)

    h = L.max_pool(h, ksize=2, stride=2)

    h = tf.nn.dropout(h, keep_prob=0.5,
                      seed=rng.randint(123456)) if stochastic else h

    h = L.conv(h,
               ksize=3,
               stride=1,
               f_in=layer_sizes[1],
               f_out=layer_sizes[2],
               seed=rng.randint(123456),
               padding="VALID",
               name='c7')
    h = L.lrelu(
        bn(h,
           layer_sizes[2],
           is_training=is_training,
           update_batch_stats=update_batch_stats,
           name='b7'), FLAGS.lrelu_a)
    h = L.conv(h,
               ksize=1,
               stride=1,
               f_in=layer_sizes[2],
               f_out=layer_sizes[3],
               seed=rng.randint(123456),
               name='c8')
    h = L.lrelu(
        bn(h,
           layer_sizes[3],
           is_training=is_training,
           update_batch_stats=update_batch_stats,
           name='b8'), FLAGS.lrelu_a)
    h = L.conv(h,
               ksize=1,
               stride=1,
               f_in=layer_sizes[3],
               f_out=layer_sizes[4],
               seed=rng.randint(123456),
               name='c9')
    h = L.lrelu(
        bn(h,
           layer_sizes[4],
           is_training=is_training,
           update_batch_stats=update_batch_stats,
           name='b9'), FLAGS.lrelu_a)

    h = tf.reduce_mean(h, reduction_indices=[1, 2])  # Global average pooling

    return h
コード例 #17
0
def autoencoder(x,
                zca,
                is_training=True,
                update_batch_stats=True,
                stochastic=True,
                seed=1234,
                use_zca=True):

    if is_training:
        scope = tf.name_scope("Training")

    else:
        scope = tf.name_scope("Testing")

    with scope:
        #Initial shape (-1, 32, 32, 3)
        x = x + 0.5  #Recover [0,1] range
        if use_zca:
            h = zca
        else:
            h = x
        print(h.shape)
        rng = np.random.RandomState(seed)

        #h = tf.map_fn(lambda x:transform(x),h)

        #(1) conv + relu + maxpool (-1, 16, 16, 64)
        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=3,
                   f_out=64,
                   seed=rng.randint(123456),
                   padding="SAME",
                   name='conv1')
        h = L.lrelu(
            L.bn(h,
                 64,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='conv1_bn'), FLAGS.lrelu_a)
        h = L.max_pool(h, ksize=2, stride=2)

        #(2) conv + relu + maxpool (-1, 8, 8, 32)
        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=64,
                   f_out=32,
                   seed=rng.randint(123456),
                   padding="SAME",
                   name='conv2')
        h = L.lrelu(
            L.bn(h,
                 32,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='conv2_bn'), FLAGS.lrelu_a)
        h = L.max_pool(h, ksize=2, stride=2)

        #(3) conv + relu + maxpool (-1, 4, 4, 16)
        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=32,
                   f_out=16,
                   seed=rng.randint(123456),
                   padding="SAME",
                   name='conv3')
        h = L.lrelu(
            L.bn(h,
                 16,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='conv3_bn'), FLAGS.lrelu_a)
        h = L.max_pool(h, ksize=2, stride=2)

        encoded = h
        #(4) deconv + relu (-1, 8, 8, 16)
        h = L.deconv(encoded,
                     ksize=5,
                     stride=1,
                     f_in=16,
                     f_out=16,
                     seed=rng.randint(123456),
                     padding="SAME",
                     name="deconv1")
        h = L.lrelu(
            L.bn(h,
                 16,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='deconv1_bn'), FLAGS.lrelu_a)

        #(5) deconv + relu (-1, 16, 16, 32)
        h = L.deconv(h,
                     ksize=5,
                     stride=1,
                     f_in=16,
                     f_out=32,
                     padding="SAME",
                     name="deconv2")
        h = L.lrelu(
            L.bn(h,
                 32,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='deconv2_bn'), FLAGS.lrelu_a)

        #(5) deconv + relu (-1, 32, 32, 64)
        h = L.deconv(h,
                     ksize=5,
                     stride=1,
                     f_in=32,
                     f_out=64,
                     padding="SAME",
                     name="deconv3")
        h = L.lrelu(
            L.bn(h,
                 64,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='deconv3_bn'), FLAGS.lrelu_a)

        #(7) conv + sigmoid (-1, 32, 32, 3)
        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=64,
                   f_out=3,
                   seed=rng.randint(123456),
                   padding="SAME",
                   name='convfinal')
        if use_zca:
            h = L.bn(h,
                     3,
                     is_training=is_training,
                     update_batch_stats=update_batch_stats,
                     name='deconv4_bn')
        else:
            h = tf.sigmoid(h)

        num_samples = 10
        sample_og_zca = tf.reshape(
            tf.slice(zca, [0, 0, 0, 0], [num_samples, 32, 32, 3]),
            (num_samples * 32, 32, 3))
        sample_og_color = tf.reshape(
            tf.slice(x, [0, 0, 0, 0], [num_samples, 32, 32, 3]),
            (num_samples * 32, 32, 3))
        sample_rec = tf.reshape(
            tf.slice(h, [0, 0, 0, 0], [num_samples, 32, 32, 3]),
            (num_samples * 32, 32, 3))
        if use_zca:
            sample = tf.concat([sample_og_zca, sample_rec], axis=1)
            m = tf.reduce_min(sample)
            sample = (sample - m) / (tf.reduce_max(sample) - m)
        else:
            m = tf.reduce_min(sample_og_zca)
            sample_og_zca = (sample_og_zca -
                             m) / (tf.reduce_max(sample_og_zca) - m)
            sample = tf.concat([sample_og_zca, sample_rec], axis=1)
        sample = tf.concat([sample_og_color, sample], axis=1)
        sample = tf.cast(255.0 * sample, tf.uint8)

        if use_zca:
            loss = tf.reduce_mean(tf.losses.mean_squared_error(zca, h))
        else:
            loss = tf.reduce_mean(tf.losses.log_loss(x, h))

        return loss, encoded, sample
コード例 #18
0
def logit(x,
          num_classes=10,
          is_training=True,
          update_batch_stats=True,
          stochastic=True,
          seed=1234):

    if is_training:
        scope = tf.name_scope("Training")

    else:
        scope = tf.name_scope("Testing")

    with scope:
        h = x

        rng = np.random.RandomState(seed)

        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=3,
                   f_out=128,
                   seed=rng.randint(123456),
                   name='c1')
        h = L.lrelu(
            L.bn(h,
                 128,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='b1'), FLAGS.lrelu_a)
        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=128,
                   f_out=128,
                   seed=rng.randint(123456),
                   name='c2')
        h = L.lrelu(
            L.bn(h,
                 128,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='b2'), FLAGS.lrelu_a)
        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=128,
                   f_out=128,
                   seed=rng.randint(123456),
                   name='c3')
        h = L.lrelu(
            L.bn(h,
                 128,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='b3'), FLAGS.lrelu_a)

        h = L.max_pool(h, ksize=2, stride=2)
        h = tf.nn.dropout(h,
                          keep_prob=FLAGS.keep_prob_hidden,
                          seed=rng.randint(123456)) if stochastic else h

        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=128,
                   f_out=256,
                   seed=rng.randint(123456),
                   name='c4')
        h = L.lrelu(
            L.bn(h,
                 256,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='b4'), FLAGS.lrelu_a)
        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=256,
                   f_out=256,
                   seed=rng.randint(123456),
                   name='c5')
        h = L.lrelu(
            L.bn(h,
                 256,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='b5'), FLAGS.lrelu_a)
        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=256,
                   f_out=256,
                   seed=rng.randint(123456),
                   name='c6')
        h = L.lrelu(
            L.bn(h,
                 256,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='b6'), FLAGS.lrelu_a)

        h = L.max_pool(h, ksize=2, stride=2)
        h = tf.nn.dropout(h,
                          keep_prob=FLAGS.keep_prob_hidden,
                          seed=rng.randint(123456)) if stochastic else h

        h = L.conv(h,
                   ksize=3,
                   stride=1,
                   f_in=256,
                   f_out=512,
                   seed=rng.randint(123456),
                   padding="VALID",
                   name='c7')
        h = L.lrelu(
            L.bn(h,
                 512,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='b7'), FLAGS.lrelu_a)
        h = L.conv(h,
                   ksize=1,
                   stride=1,
                   f_in=512,
                   f_out=256,
                   seed=rng.randint(123456),
                   name='c8')
        h = L.lrelu(
            L.bn(h,
                 256,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='b8'), FLAGS.lrelu_a)
        h = L.conv(h,
                   ksize=1,
                   stride=1,
                   f_in=256,
                   f_out=128,
                   seed=rng.randint(123456),
                   name='c9')
        h = L.lrelu(
            L.bn(h,
                 128,
                 is_training=is_training,
                 update_batch_stats=update_batch_stats,
                 name='b9'), FLAGS.lrelu_a)

        h = tf.reduce_mean(h, reduction_indices=[1,
                                                 2])  # Global average pooling
        h = L.fc(h, 128, num_classes, seed=rng.randint(123456), name='fc')

        if FLAGS.top_bn:
            h = L.bn(h,
                     num_classes,
                     is_training=is_training,
                     update_batch_stats=update_batch_stats,
                     name='bfc')

        return h
コード例 #19
0
ファイル: model.py プロジェクト: xingxiaoxiong/interior
    def create_generator(self, room):
        layers = []

        # encoder_1: [batch, 32, 32, in_channels] => [batch, 16, 16, ngf]
        with tf.variable_scope("encoder_1"):
            output = tf.layers.conv2d(room, filters=self.opt.ngf, kernel_size=2, strides=2, padding='valid')
            layers.append(output)

        layer_specs = [
            self.opt.ngf * 2,  # encoder_2: [batch, 16, 16, ngf] => [batch, 8, 8, ngf * 2]
            self.opt.ngf * 4,  # encoder_3: [batch, 8, 8, ngf * 2] => [batch, 4, 4, ngf * 4]
            self.opt.ngf * 8,  # encoder_4: [batch, 4, 4, ngf * 4] => [batch, 2, 2, ngf * 8]
            self.opt.ngf * 16,
        ]

        for out_channels in layer_specs:
            with tf.variable_scope("encoder_%d" % (len(layers) + 1)):
                rectified = lrelu(layers[-1], 0.2)
                # [batch, in_height, in_width, in_channels] => [batch, in_height/2, in_width/2, out_channels]
                convolved = tf.layers.conv2d(rectified, filters=out_channels, kernel_size=2, strides=2, padding='valid')
                output = tf.layers.batch_normalization(convolved)
                layers.append(output)

        layer_specs = [
            (self.opt.ngf * 8, 0.1),
            (self.opt.ngf * 4, 0.1),  # decoder_8: [batch, 2, 2, ngf * 8 * 2] => [batch, 4, 4, ngf * 4]
            (self.opt.ngf * 2, 0.1),  # decoder_7: [batch, 4, 4, ngf * 4 * 2] => [batch, 8, 8, ngf * 2]
            (self.opt.ngf * 1, 0.1),  # decoder_6: [batch, 8, 8, ngf * 2 * 2] => [batch, 16, 16, ngf * 1]
        ]

        num_encoder_layers = len(layers)
        for decoder_layer, (out_channels, dropout) in enumerate(layer_specs):
            skip_layer = num_encoder_layers - decoder_layer - 1
            with tf.variable_scope("decoder_%d" % (skip_layer + 1)):
                if decoder_layer == 0:
                    # first decoder layer doesn't have skip connections
                    # since it is directly connected to the skip_layer
                    input = layers[-1]
                else:
                    input = tf.concat([layers[-1], layers[skip_layer]], axis=3)

                rectified = tf.nn.relu(input)
                # [batch, in_height, in_width, in_channels] => [batch, in_height*2, in_width*2, out_channels]
                output = deconv(rectified, out_channels)
                output = tf.layers.batch_normalization(output)

                if dropout > 0.0:
                    output = tf.nn.dropout(output, keep_prob=1 - dropout)

                layers.append(output)

        # decoder_1: [batch, 16, 16, ngf * 2] => [batch, 32, 32, generator_outputs_channels]
        with tf.variable_scope("decoder_1"):
            input = tf.concat([layers[-1], layers[0]], axis=3)
            rectified = tf.nn.relu(input)
            output = deconv(rectified, self.depth)

            category = output[:, :, :, :self.depth - ROTATION_COUNT]
            category_output = tf.nn.softmax(category)

            rotation = output[:, :, :, self.depth - ROTATION_COUNT:]
            rotation_output = tf.nn.softmax(rotation)

            final_output = tf.concat([category_output, rotation_output], axis=3)
            layers.append(final_output)

        return layers[-1], category, rotation
コード例 #20
0
ファイル: discriminator.py プロジェクト: Pie31415/AnimeGen
    def __init__(self, conv_dim=64):
        super(Discriminator, self).__init__()

        layer1 = []
        layer2 = []
        layer3 = []
        layer4 = []
        output = []

        # layer 1

        # 3 -> 64
        layer1.append(
            spectral_norm(conv(3, conv_dim, kernel_size=4, stride=2,
                               padding=1)))
        layer1.append(lrelu())

        # layer 2
        input_dim = conv_dim
        output_dim = input_dim * 2

        # 64 -> 128
        layer2.append(
            spectral_norm(
                conv(input_dim, output_dim, kernel_size=4, stride=2,
                     padding=1)))
        layer2.append(lrelu())

        # layer 3
        input_dim = output_dim
        output_dim = input_dim * 2

        # 128 -> 256
        layer3.append(
            spectral_norm(
                conv(input_dim, output_dim, kernel_size=4, stride=2,
                     padding=1)))
        layer3.append(lrelu())

        # layer 4
        input_dim = output_dim
        output_dim = input_dim * 2

        # 256 -> 512
        layer4.append(
            spectral_norm(
                conv(input_dim, output_dim, kernel_size=4, stride=2,
                     padding=1)))
        layer4.append(lrelu())

        # output layer
        input_dim = input_dim * 2

        # 512 -> 1
        output.append(conv(input_dim, 1, kernel_size=4))

        self.l1 = nn.Sequential(*layer1)
        self.l2 = nn.Sequential(*layer2)
        self.l3 = nn.Sequential(*layer3)
        self.attn1 = SelfAttn(256)
        self.l4 = nn.Sequential(*layer4)
        self.attn2 = SelfAttn(512)
        self.output = nn.Sequential(*output)
コード例 #21
0
ファイル: generator.py プロジェクト: Pie31415/AnimeGen
    def __init__(self, image_size=64, z_dim=100, conv_dim=64):
        super(Generator, self).__init__()

        layer1 = []
        layer2 = []
        layer3 = []
        layer4 = []
        output = []

        # layer 1
        layer_num = int(np.log2(image_size)) - 3  # 3
        mult = 2**layer_num  # 8
        output_dim = conv_dim * mult  # 512

        # 100 -> 512
        layer1.append(spectral_norm(deconv(z_dim, output_dim, kernel_size=4)))
        layer1.append(batch_norm(output_dim))
        layer1.append(lrelu())

        # layer 2
        input_dim = output_dim
        output_dim = int(input_dim / 2)

        # 512 -> 256
        layer2.append(
            spectral_norm(
                deconv(input_dim,
                       output_dim,
                       kernel_size=4,
                       stride=2,
                       padding=1)))
        layer2.append(batch_norm(output_dim))
        layer2.append(lrelu())

        # layer 3
        input_dim = output_dim
        output_dim = int(input_dim / 2)

        # 256 -> 128
        layer3.append(
            spectral_norm(
                deconv(input_dim,
                       output_dim,
                       kernel_size=4,
                       stride=2,
                       padding=1)))
        layer3.append(batch_norm(output_dim))
        layer3.append(lrelu())

        # layer 4
        input_dim = output_dim
        output_dim = int(input_dim / 2)

        # 128 -> 64
        layer4.append(
            spectral_norm(
                deconv(input_dim,
                       output_dim,
                       kernel_size=4,
                       stride=2,
                       padding=1)))
        layer4.append(batch_norm(output_dim))
        layer4.append(lrelu())

        # output layer
        input_dim = output_dim

        # 64 -> 3
        output.append(
            deconv(input_dim,
                   out_channels=3,
                   kernel_size=4,
                   stride=2,
                   padding=1))
        output.append(tanh())

        self.l1 = nn.Sequential(*layer1)
        self.l2 = nn.Sequential(*layer2)
        self.l3 = nn.Sequential(*layer3)
        self.attn1 = SelfAttn(128)
        self.l4 = nn.Sequential(*layer4)
        self.attn2 = SelfAttn(64)
        self.output = nn.Sequential(*output)
コード例 #22
0
def dehaze_generator(generator_inputs, name="generator", skip=False):

    # Encoder
    # encoder_1: [batch, 256, 256, in_channels] => [batch, 128, 128, ngf]

    with tf.variable_scope("process_conv1", reuse=tf.AUTO_REUSE):
        process1 = layers.conv2d_pad(generator_inputs, 3, ndf, 5, 1)
        process1 = layers.lrelu(process1, 0.2)
    with tf.variable_scope("process_conv2", reuse=tf.AUTO_REUSE):
        process2 = layers.conv2d_pad(process1, ndf, ndf, 3, 1)
        process2 = layers.lrelu(process2, 0.2)

    with tf.variable_scope("encoder_1", reuse=tf.AUTO_REUSE):
        rectified = layers.lrelu(process2, 0.2)
        encoder1 = layers.gen_conv(rectified, ndf, scope='conv_0')

        encoder1 = layers.instance_norm(encoder1)

    with tf.variable_scope("encoder_2", reuse=tf.AUTO_REUSE):
        rectified = layers.lrelu(encoder1, 0.2)
        encoder2 = layers.gen_conv(rectified, ndf * 2, scope='conv_1')

        encoder2 = layers.instance_norm(encoder2)

    with tf.variable_scope("encoder_3", reuse=tf.AUTO_REUSE):
        rectified = layers.lrelu(encoder2, 0.2)
        encoder3 = layers.gen_conv(rectified, ndf * 4, scope='conv_2')

        encoder3 = layers.instance_norm(encoder3)

    with tf.variable_scope("encoder_4", reuse=tf.AUTO_REUSE):
        rectified = layers.lrelu(encoder3, 0.2)
        encoder4 = layers.gen_conv(rectified, ndf * 8, scope='conv_3')

        encoder4 = layers.instance_norm(encoder4)

    padding = "REFLECT"
    # Residual_Block
    o_r1 = build_resnet_block(encoder4, ndf * 8, "r1", padding)
    o_r2 = build_resnet_block(o_r1, ndf * 8, "r2", padding)
    o_r3 = build_resnet_block(o_r2, ndf * 8, "r3", padding)
    o_r4 = build_resnet_block(o_r3, ndf * 8, "r4", padding)
    o_r5 = build_resnet_block(o_r4, ndf * 8, "r5", padding)
    # o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
    # o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
    # o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
    # o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

    # decoder
    with tf.variable_scope("Upsample_1", reuse=tf.AUTO_REUSE):
        rectified = tf.concat([encoder4, o_r5], 3)

        rectified = layers.instance_norm(rectified)
        upsample1 = layers.lrelu(rectified, 0.2)
        upsample1 = layers.resize_conv2d(upsample1, 1024, 256, 3, [1, 1, 1, 1])
        upsample1 = tf.nn.dropout(upsample1, keep_prob=0.5)

    with tf.variable_scope("Upsample_2", reuse=tf.AUTO_REUSE):
        rectified = tf.concat([encoder3, upsample1], 3)
        rectified = layers.instance_norm(rectified)
        upsample2 = layers.lrelu(rectified, 0.2)
        upsample2 = layers.resize_conv2d(upsample2, 512, 128, 3, [1, 1, 1, 1])
        upsample2 = tf.nn.dropout(upsample2, keep_prob=0.5)

    with tf.variable_scope("Upsample_3", reuse=tf.AUTO_REUSE):
        rectified = tf.concat([encoder2, upsample2], 3)
        rectified = layers.instance_norm(rectified)
        upsample3 = layers.lrelu(rectified, 0.2)
        upsample3 = layers.resize_conv2d(upsample3, 256, 64, 3, [1, 1, 1, 1])

    with tf.variable_scope("Upsample_4", reuse=tf.AUTO_REUSE):
        rectified = tf.concat([encoder1, upsample3], 3)
        rectified = layers.instance_norm(rectified)
        upsample4 = layers.lrelu(rectified, 0.2)
        upsample4 = layers.resize_conv2d(upsample4, 128, 3, 3, [1, 1, 1, 1])
        Upsample_4 = tf.tanh(upsample4)

    with tf.variable_scope("Multi-scale_Refine_1", reuse=tf.AUTO_REUSE):
        rectified = tf.concat([Upsample_4, process2], 3)
        rectified = layers.instance_norm(rectified)
        conv3_1 = layers.conv2d_pad(rectified, 67, 64, 3, 1)
        conv3_1 = layers.lrelu(conv3_1, 0.2)
        conv5_1 = layers.conv2d_pad(rectified, 67, 64, 5, 1)
        conv5_1 = layers.lrelu(conv5_1, 0.2)

    with tf.variable_scope("Multi-scale_Refine_2", reuse=tf.AUTO_REUSE):
        conv_1 = tf.concat([conv3_1, conv5_1], 3)
        conv1_1 = layers.instance_norm(conv_1)
        conv3_2 = layers.conv2d_pad(conv1_1, 128, 128, 3, 1)
        conv3_2 = layers.lrelu(conv3_2, 0.2)
        conv5_2 = layers.conv2d_pad(conv1_1, 128, 128, 5, 1)
        conv5_2 = layers.lrelu(conv5_2, 0.2)

    with tf.variable_scope("Multi-scale_Refine_3", reuse=tf.AUTO_REUSE):
        conv_2 = tf.concat([conv3_2, conv5_2], 3)
        conv2_2 = layers.instance_norm(conv_2)
        conv_out = layers.lrelu(layers.one_conv(conv2_2, 32), 0.2)
        conv_out = layers.conv2d_pad(conv_out, 32, 3, 3, 1)
        mul_out = tf.tanh(conv_out)

    if skip is True:
        out_gen = tf.nn.tanh(generator_inputs + mul_out, "t1")
    else:
        out_gen = tf.nn.tanh(mul_out, "t1")

    return out_gen
コード例 #23
0
def dehaze_resize_with_deconv(inputgen, name="generator", skip=False):
    with tf.variable_scope(name):
        f = 7
        ks = 3
        padding = "REFLECT"

        pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [0, 0]],
                           padding)
        o_c1 = layers.general_conv2d(pad_input,
                                     ngf,
                                     f,
                                     f,
                                     1,
                                     1,
                                     0.02,
                                     name="c1",
                                     relufactor=0.2)  #256*256*32

        o_c2 = layers.general_conv2d(o_c1,
                                     ngf * 2,
                                     ks,
                                     ks,
                                     2,
                                     2,
                                     0.02,
                                     "SAME",
                                     "c2",
                                     relufactor=0.2)  #128*128*64

        o_c3 = layers.general_conv2d(o_c2,
                                     ngf * 4,
                                     ks,
                                     ks,
                                     2,
                                     2,
                                     0.02,
                                     "SAME",
                                     "c3",
                                     relufactor=0.2)  #64*64*128

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)  #64*64*128
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

        with tf.variable_scope("resize_conv1"):
            o_c4_0 = tf.concat([o_r9, o_c3], 3)
            o_c4_1 = layers.instance_norm(o_c4_0)
            o_c4_1 = layers.deconv2d_resize(o_c4_1,
                                            ngf * 4,
                                            kernel=ks,
                                            stride=(2, 2),
                                            name='deconv_1')
            o_c4_1 = layers.lrelu(o_c4_1)
            o_c4_2 = layers.general_deconv2d(o_c4_0,
                                             [BATCH_SIZE, 128, 128, ngf * 2],
                                             ngf * 2, ks, ks, 2, 2, 0.02,
                                             "SAME", "c4")
            o_c4_3 = tf.concat([o_c4_1, o_c4_2], 3)
            o_c4_4 = layers.one_conv(o_c4_3, 64)
            o_c4_4 = layers.lrelu(o_c4_4)

        with tf.variable_scope("resize_conv2"):
            o_c5_0 = tf.concat([o_c2, o_c4_4], 3)
            o_c5 = layers.instance_norm(o_c5_0)
            o_c5_1 = layers.deconv2d_resize(o_c5,
                                            ngf * 2,
                                            kernel=ks,
                                            stride=(2, 2),
                                            name='deconv_2')
            o_c5_1 = layers.lrelu(o_c5_1)

            o_c5_2 = layers.general_deconv2d(o_c5_0,
                                             [BATCH_SIZE, 256, 256, ngf], ngf,
                                             ks, ks, 2, 2, 0.02, "SAME", "c5")
            o_c5_3 = tf.concat([o_c5_1, o_c5_2], 3)
            o_c5_4 = layers.one_conv(o_c5_3, 32)
            o_c5_4 = layers.lrelu(o_c5_4)

        with tf.variable_scope("Output_layer"):
            #  o_c6_0 = tf.concat([o_c5_1, o_c5_2],3)

            o_c6 = layers.general_conv2d(o_c5_4,
                                         IMG_CHANNELS,
                                         f,
                                         f,
                                         1,
                                         1,
                                         0.02,
                                         "SAME",
                                         "c6",
                                         do_norm=False,
                                         do_relu=False)

        if skip is True:
            out_gen = tf.nn.tanh(inputgen + o_c6, "t1")
        else:
            out_gen = tf.nn.tanh(o_c6, "t1")

        return out_gen