예제 #1
0
def Q(x, reuse=False):
    with tf.variable_scope('infer', reuse=reuse):
        feamap0 = lrelu(
            conv2d(x, channelne[1], k=kersizee[0], s=1, name="conv2d_0"))
        feamap1 = block_down(feamap0,
                             channelne[1],
                             k=kersizee[1],
                             s=1,
                             name='block1')  # 32x32
        feamap2 = resblock_down(feamap1,
                                channelne[2],
                                k=kersizee[2],
                                s=1,
                                name='block2')  # 16x16
        feamap3 = block_down(feamap2,
                             channelne[3],
                             k=kersizee[3],
                             s=1,
                             name='block3')  # 8x8
        feamap4 = block_down(feamap3,
                             channelne[4],
                             k=kersizee[4],
                             s=1,
                             name='block4')  # 4x4
        #feamap5 = lrelu(conv2d(feamap4, channelne[5],k=kersizee[4], s=1,name="conv2d_5")) # 4x4
        #feamap5 = lrelu(conv2d(feamap5, channelne[5],k=4, s=1,padding='VALID',name="conv2d_6")) # 1x1
        feamap5 = tf.reshape(feamap4,
                             [BATCH_SIZE, channelne[5] * fme[5] * fme[5]])
        za = tf.nn.tanh(dense(feamap5, z_dim, gain=1, name='fully_f1'))
        return za
def P(z, class_info, reuse=False, layer_condition=False):
    with tf.variable_scope('gen', reuse=reuse):
        #output_shape_da_conv5 = tf.stack([BATCH_SIZE, fmd[5], fmd[5], channelnd[5]])
        z_generation = tf.concat([z, class_info], 1)  # [z_dim + class_num, ]
        ch = fmd[0] * fmd[0] * channelnd[0]
        hf = dense(z_generation, ch, gain=np.sqrt(2) / 4, name='fully_f')
        hc = tf.reshape(hf, [-1, fmd[0], fmd[0], channelnd[0]])
        hc = tf.nn.relu(hc)
        hc = tf.nn.relu(conv2d(hc, nzatte, k=kersized[0], s=1,
                               name="conv2d_0"))  # 4x4
        zattg = tf.reshape(hc, [BATCH_SIZE, fmd[0] * fmd[0], nzatte])
        zattg = sparsech(zattg, int(nzatte * 0.1))
        fmd1 = tf.zeros((BATCH_SIZE, fmd[0], fmd[0],
                         fmd[0] * fmd[0]))  #?????????????????????????? ZEROS??
        fmsparsel1 = sparseopattributeq(fmd1, zattg)
        fmsparsel1 = tf.reshape(
            fmsparsel1, [BATCH_SIZE, fmd[0], fmd[0], nzatte * fmd[0] * fmd[0]])
        fmd2 = block_up_no(fmsparsel1,
                           channelnd[1],
                           k=kersized[0],
                           s=1,
                           name='block1',
                           class_info=class_info,
                           layer_condition=layer_condition)  #8x8
        fmsparsel2 = sparseop(fmd2, int(1 / 4 * fmd[1] * fmd[1]), 1)
        fmd3 = block_up_no(fmsparsel2,
                           channelnd[2],
                           k=kersized[1],
                           s=1,
                           name='block2',
                           class_info=class_info,
                           layer_condition=layer_condition)  #16x16
        fmsparsel3 = sparseop(fmd3, int(1 / 4 * fmd[2] * fmd[2]), 1)
        fmd4 = resblock_up_no(fmsparsel3,
                              channelnd[3],
                              k=kersized[2],
                              s=1,
                              name='block3',
                              class_info=class_info,
                              layer_condition=layer_condition)  #32x32
        fmsparsel4 = sparseop(fmd4, int(1 / 3 * fmd[3] * fmd[3]), 1)
        fmd5 = block_up_no(fmsparsel4,
                           channelnd[4],
                           k=kersized[3],
                           s=1,
                           name='block4',
                           class_info=class_info,
                           layer_condition=layer_condition)  #64x64
        gx = tf.nn.tanh(
            conv2d(fmd5, channelnd[5], k=kersized[4], s=1,
                   name="conv2d_5"))  #64x64
        return gx
예제 #3
0
def descriptor(x, reuse=False):
    with tf.variable_scope('des', reuse=reuse):
        feamap0 = lrelu(
            conv2d(x, channelndes[1], k=kersizedes[0], s=1, name="conv2d_0"))
        feamap1 = block_down(feamap0,
                             channelndes[1],
                             k=kersizedes[1],
                             s=1,
                             name='block1')  # 32x32
        feamap2 = resblock_down(feamap1,
                                channelndes[2],
                                k=kersizedes[2],
                                s=1,
                                name='block2')  # 16x16
        feamap3 = block_down(feamap2,
                             channelndes[3],
                             k=kersizedes[3],
                             s=1,
                             name='block3')  # 8x8
        feamap4 = block_down(feamap3,
                             channelndes[4],
                             k=kersizedes[4],
                             s=1,
                             name='block4')  # 4x4
        feamap4 = minibatch_stddev_layer(feamap4, 10)
        feamap5 = lrelu(
            conv2d(feamap4,
                   channelndes[5],
                   k=kersizedes[4],
                   s=1,
                   name="conv2d_5"))  # 4x4
        #feamap5 = lrelu(conv2d(feamap5, channelndes[5],k=4, s=1,padding='VALID',name="conv2d_6")) # 1x1
        feamap5 = tf.reshape(
            feamap5, [BATCH_SIZE, channelndes[5] * fmdes[5] * fmdes[5]])
        out = dense(feamap5, 1, gain=1, name='fully_f')
        return out