Beispiel #1
0
def generator_att(inp):
    global reuse_genatt, bn_training, blknum
    blknum = 0
    with tf.variable_scope('gen_att', reuse=reuse_genatt):
        mod = Model(inp)
        mod.set_bn_training(bn_training)
        mod.convLayer(5,
                      32,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)  #64
        block(mod, 64, 1)
        mod.convLayer(3,
                      128,
                      stride=2,
                      activation=M.PARAM_LRELU,
                      batch_norm=True)  #32
        block(mod, 128, 1)
        # block(mod,256,1)
        # block(mod,256,1)
        block(mod, 256, 1)
        mod.SelfAttention(64, residual=True)
        # block(mod,512,1)
        block(mod, 256, 1)
        # block(mod,256,1)
        block(mod, 128, 1)
        mod.deconvLayer(3,
                        64,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  # 64
        block(mod, 64, 1)
        feat = mod.deconvLayer(5,
                               64,
                               stride=2,
                               activation=M.PARAM_LRELU,
                               batch_norm=True)  #128
        A = mod.convLayer(5, 1, activation=M.PARAM_SIGMOID)  #output_attention
        mod.set_current_layer(feat)
        C = mod.convLayer(5, 3, activation=M.PARAM_TANH)
        reuse_genatt = True
    return A, C
Beispiel #2
0
def generator(inp):
    global reuse_gen, bn_training
    with tf.variable_scope('generator', reuse=reuse_gen):
        mod = Model(inp)
        mod.set_bn_training(bn_training)
        mod.reshape([-1, 2, 2, 512])
        mod.deconvLayer(3,
                        256,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #8
        mod.deconvLayer(3,
                        128,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #16
        mod.SelfAttention(32)
        mod.deconvLayer(5,
                        64,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #32
        mod.deconvLayer(5,
                        32,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #64
        mod.deconvLayer(5,
                        16,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #128
        mod.deconvLayer(5, 3, activation=M.PARAM_TANH)  #output
        reuse_gen = True
    return mod.get_current_layer()
Beispiel #3
0
def generator_att(inp):
    global reuse_genatt, bn_training
    with tf.variable_scope('gen_att', reuse=reuse_genatt):
        mod = Model(inp)
        mod.set_bn_training(bn_training)
        mod.deconvLayer(3,
                        512,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #4
        mod.deconvLayer(3,
                        256,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #8
        mod.deconvLayer(3,
                        128,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #16
        mod.SelfAttention(32)
        mod.deconvLayer(5,
                        64,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #32
        mod.deconvLayer(5,
                        32,
                        stride=2,
                        activation=M.PARAM_LRELU,
                        batch_norm=True)  #64
        feat = mod.deconvLayer(5,
                               16,
                               stride=2,
                               activation=M.PARAM_LRELU,
                               batch_norm=True)  #128
        A = mod.convLayer(5, 3, activation=M.PARAM_SIGMOID)  #output_attention
        mod.set_current_layer(feat)
        C = mod.convLayer(5, 3, activation=M.PARAM_TANH)
        reuse_genatt = True
    return A, C