コード例 #1
0
def build_resnet_block_Att(inputres, dim, name="resnet", padding="REFLECT"):
    out_res = PadLayer([[0, 0], [1, 1], [1, 1], [0, 0]], mode=padding)(inputres)

    out_res = Conv2d(
        n_filter=dim,
        filter_size=(3, 3),
        strides=(1, 1),
        padding="VALID",
        act=None,
        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
        b_init=tf.constant_initializer(0.0)
    )(out_res)
    out_res = InstanceNorm2d(act=tf.nn.relu)(out_res)

    out_res = PadLayer([[0, 0], [1, 1], [1, 1], [0, 0]], mode=padding)(out_res)

    out_res = Conv2d(
        n_filter=dim,
        filter_size=(3, 3),
        strides=(1, 1),
        padding="VALID",
        act=None,
        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
        b_init=tf.constant_initializer(0.0)
    )(out_res)
    out_res = InstanceNorm2d(act=None)(out_res)

    return tf.nn.relu(out_res + inputres)
コード例 #2
0
def get_base(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, flags.leaky_rate)

    ni = Input(shape)
    nn = Conv2d(n_filter=64,
                filter_size=(4, 4),
                strides=(2, 2),
                act=lrelu,
                W_init=w_init)(ni)
    nn = Conv2d(n_filter=128,
                filter_size=(4, 4),
                strides=(2, 2),
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Conv2d(n_filter=256,
                filter_size=(4, 4),
                strides=(2, 2),
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Flatten()(nn)
    return tl.models.Model(inputs=ni, outputs=nn, name='base')
コード例 #3
0
def get_generator(shape, gf_dim=64): # Dimension of gen filters in first conv layer. [64]
    image_size = flags.output_size
    s16 = image_size // 8
    # w_init = tf.glorot_normal_initializer()
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x : tf.nn.leaky_relu(x, 0.2)

    ni = Input(shape)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init, b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim*16])(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn)
    #nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = UpSampling2d(scale=(2, 2),antialias=True)(nn)
    nn = Conv2d(gf_dim * 8, (5, 5),   padding='SAME', b_init=None, W_init=w_init)(nn)    
    nn = BatchNorm2d( decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
#    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = UpSampling2d(scale=(2, 2),antialias=True)(nn)
    nn = Conv2d(gf_dim * 4, (5, 5),   padding='SAME', b_init=None, W_init=w_init)(nn)    
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    #nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = UpSampling2d(scale=(2, 2),antialias=True)(nn)
    nn = Conv2d(gf_dim *2, (5, 5),   padding='SAME', b_init=None, W_init=w_init)(nn)    
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)

    nn = UpSampling2d(scale=(2, 2),antialias=True)(nn)
    nn = Conv2d(gf_dim , (5, 5),   padding='SAME', b_init=None, W_init=w_init)(nn)    
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)


    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
コード例 #4
0
def model(x, is_train, reuse):
    with tf.variable_scope("STN", reuse=reuse):
        nin = InputLayer(x, name='in')
        ## 1. Localisation network
        # use MLP as the localisation net
        nt = FlattenLayer(nin, name='flatten')
        nt = DenseLayer(nt, n_units=20, act=tf.nn.tanh, name='dense1')
        nt = DropoutLayer(nt, 0.8, True, is_train, name='drop1')
        # you can also use CNN instead for MLP as the localisation net
        # nt = Conv2d(nin, 16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc1')
        # nt = Conv2d(nt, 8, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc2')
        ## 2. Spatial transformer module (sampler)
        n = SpatialTransformer2dAffineLayer(nin,
                                            nt,
                                            out_size=[40, 40],
                                            name='spatial')
        s = n
        ## 3. Classifier
        n = Conv2d(n,
                   16, (3, 3), (2, 2),
                   act=tf.nn.relu,
                   padding='SAME',
                   name='conv1')
        n = Conv2d(n,
                   16, (3, 3), (2, 2),
                   act=tf.nn.relu,
                   padding='SAME',
                   name='conv2')
        n = FlattenLayer(n, name='flatten2')
        n = DenseLayer(n, n_units=1024, act=tf.nn.relu, name='out1')
        n = DenseLayer(n, n_units=10, act=tf.identity, name='out2')
    return n, s
コード例 #5
0
def get_Ek(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    ngf = 64
    isize = 64
    n_extra_layers = flags.n_extra_layers

    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    ngf = 64
    isize = 64
    n_extra_layers = flags.n_extra_layers

    ni = Input(shape)
    nn = Conv2d(ngf, (4, 4), (2, 2), W_init=w_init, act=tf.nn.relu)(ni)

    nn = Conv2d(ngf * 2, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)

    nn = Conv2d(ngf * 2, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)

    nn = DeConv2d(ngf // 2, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)

    nn = DeConv2d(ngf // 8, (1, 1), (1, 1), W_init=w_init, b_init=None)(nn)

    # nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn)
    #
    # nn = DeConv2d(ngf // 8, (4, 4), (2, 2), W_init=w_init, act=tf.nn.relu)(nn)
    return tl.models.Model(inputs=ni, outputs=nn)
コード例 #6
0
def model_batch_norm(x_crop, y_, is_train, reuse):
    W_init = tf.truncated_normal_initializer(stddev=5e-2)
    W_init2 = tf.truncated_normal_initializer(stddev=0.04)
    b_init2 = tf.constant_initializer(value=0.1)
    with tf.variable_scope("model", reuse=reuse):
        net = InputLayer(x_crop, name='input')
        net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn1')
        net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch1')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')

        net = Conv2d(net, 64, (5, 5), (1, 1), padding='SAME', W_init=W_init, b_init=None, name='cnn2')
        net = BatchNormLayer(net, decay=0.99, is_train=is_train, act=tf.nn.relu, name='batch2')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')

        net = FlattenLayer(net, name='flatten')
        net = DenseLayer(net, 384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d1relu')
        net = DenseLayer(net, 192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='d2relu')
        net = DenseLayer(net, n_units=10, act=None, W_init=W_init2, name='output')
        y = net.outputs

        ce = tl.cost.cross_entropy(y, y_, name='cost')
        # L2 for the MLP, without this, the accuracy will be reduced by 15%.
        L2 = 0
        for p in tl.layers.get_variables_with_name('relu/W', True, True):
            L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
        cost = ce + L2

        correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return net, cost, acc
コード例 #7
0
ファイル: lw_openpose.py プロジェクト: TuskAW/hyperpose
 def __init__(self,
              n_filter=128,
              in_channels=512,
              data_format="channels_first"):
     super().__init__()
     self.data_format = data_format
     self.init_layer = Conv2d(n_filter=n_filter,
                              in_channels=in_channels,
                              filter_size=(1, 1),
                              act=tf.nn.relu,
                              data_format=self.data_format)
     self.main_block = layers.LayerList([
         conv_block(n_filter=n_filter,
                    in_channels=n_filter,
                    data_format=self.data_format),
         conv_block(n_filter=n_filter,
                    in_channels=n_filter,
                    data_format=self.data_format),
         conv_block(n_filter=n_filter,
                    in_channels=n_filter,
                    data_format=self.data_format),
     ])
     self.end_layer = Conv2d(n_filter=n_filter,
                             in_channels=n_filter,
                             filter_size=(3, 3),
                             act=tf.nn.relu,
                             data_format=self.data_format)
コード例 #8
0
ファイル: lw_openpose.py プロジェクト: TuskAW/hyperpose
 def __init__(self,
              n_filter=128,
              in_channels=185,
              n_confmaps=19,
              n_pafmaps=38,
              data_format="channels_first"):
     super().__init__()
     self.data_format = data_format
     self.block_1 = self.Refinement_block(n_filter=n_filter,
                                          in_channels=in_channels,
                                          data_format=self.data_format)
     self.block_2 = self.Refinement_block(n_filter=n_filter,
                                          in_channels=n_filter,
                                          data_format=self.data_format)
     self.block_3 = self.Refinement_block(n_filter=n_filter,
                                          in_channels=n_filter,
                                          data_format=self.data_format)
     self.block_4 = self.Refinement_block(n_filter=n_filter,
                                          in_channels=n_filter,
                                          data_format=self.data_format)
     self.block_5 = self.Refinement_block(n_filter=n_filter,
                                          in_channels=n_filter,
                                          data_format=self.data_format)
     self.conf_block=layers.LayerList([
     Conv2d(n_filter=512,in_channels=n_filter,filter_size=(1,1),strides=(1,1),act=tf.nn.relu,W_init=initializer,b_init=initializer,\
         data_format=self.data_format),
     Conv2d(n_filter=n_confmaps,in_channels=512,filter_size=(1,1),strides=(1,1),W_init=initializer,b_init=initializer,\
         data_format=self.data_format)
     ])
     self.paf_block=layers.LayerList([
     Conv2d(n_filter=512,in_channels=n_filter,filter_size=(1,1),strides=(1,1),act=tf.nn.relu,W_init=initializer,b_init=initializer,\
         data_format=self.data_format),
     Conv2d(n_filter=n_pafmaps,in_channels=512,filter_size=(1,1),strides=(1,1),W_init=initializer,b_init=initializer,\
         data_format=self.data_format)
     ])
コード例 #9
0
ファイル: backbones.py プロジェクト: TuskAW/hyperpose
 def __init__(self,
              n_filter=128,
              in_channels=128,
              strides=(1, 1),
              exp_ratio=6,
              data_format="channels_first"):
     super().__init__()
     self.n_filter = n_filter
     self.in_channels = in_channels
     self.strides = strides
     self.exp_ratio = exp_ratio
     self.data_format = data_format
     self.hidden_dim = self.exp_ratio * self.in_channels
     self.identity = False
     if (self.strides == (1, 1) and self.in_channels == self.n_filter):
         self.identity = True
     if (self.exp_ratio == 1):
         self.main_block=LayerList([
             DepthwiseConv2d(in_channels=self.hidden_dim,filter_size=(3,3),strides=self.strides,\
                 b_init=None,data_format=self.data_format),
             BatchNorm2d(num_features=self.hidden_dim,is_train=True,act=tf.nn.relu6,data_format=self.data_format),
             Conv2d(n_filter=self.n_filter,in_channels=self.hidden_dim,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format),
             BatchNorm2d(num_features=self.n_filter,is_train=True,act=None,data_format=self.data_format)
         ])
     else:
         self.main_block=LayerList([
             Conv2d(n_filter=self.hidden_dim,in_channels=self.in_channels,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format),
             BatchNorm2d(num_features=self.hidden_dim,is_train=True,act=tf.nn.relu6,data_format=self.data_format),
             DepthwiseConv2d(in_channels=self.hidden_dim,filter_size=(3,3),strides=self.strides,\
                 b_init=None,data_format=self.data_format),
             BatchNorm2d(num_features=self.hidden_dim,is_train=True,act=tf.nn.relu6,data_format=self.data_format),
             Conv2d(n_filter=self.n_filter,in_channels=self.hidden_dim,filter_size=(1,1),strides=(1,1),b_init=None,data_format=self.data_format)
         ])
コード例 #10
0
def get_encoder(shape=[None, flags.output_size, flags.output_size, flags.c_dim] \
                , df_dim=64, name=None):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)

    ni = Input(shape)
    nn = Conv2d(df_dim, (5, 5), (2, 2), act=lrelu, W_init=w_init)(ni)
    nn = Conv2d(df_dim * 2, (5, 5), (2, 2),
                act=None,
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)

    nn = Conv2d(df_dim * 4, (5, 5), (2, 2),
                act=None,
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)

    nn = Conv2d(df_dim * 8, (5, 5), (2, 2),
                act=None,
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)

    nn = Flatten()(nn)
    #print(nn.shape)
    nn = Dense(flags.z_dim, act=tf.identity, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name=name)
コード例 #11
0
def build_resnet_block_Att(inputres, dim, name="resnet", padding="REFLECT"):
    with tf.compat.v1.variable_scope(name):
        out_res = PadLayer([[0, 0], [1, 1], [1, 1], [0, 0]], padding)(inputres)

        out_res = Conv2d(n_filter=dim,
                         filter_size=(3, 3),
                         strides=(1, 1),
                         padding="VALID",
                         act=None,
                         W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                         b_init=tf.constant_initializer(0.0))(out_res)
        out_res = InstanceNorm2d(act=tf.nn.relu)(out_res)

        out_res = PadLayer([[0, 0], [1, 1], [1, 1], [0, 0]], padding)(out_res)

        out_res = Conv2d(n_filter=dim,
                         filter_size=(3, 3),
                         strides=(1, 1),
                         padding="VALID",
                         act=None,
                         W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                         b_init=tf.constant_initializer(0.0))(out_res)
        out_res = InstanceNorm2d(act=None)(out_res)

        tmp = Elementwise(combine_fn=tf.add)([out_res, inputres])
        return Lambda(tf.nn.relu)(tmp)
コード例 #12
0
ファイル: model.py プロジェクト: zyw1218/OUCML
def discriminator(inputs, is_train=True, reuse=False):
    df_dim = 64  # Dimension of discrim filters in first conv layer. [64]
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope("discriminator", reuse=reuse):

        net_in = InputLayer(inputs, name='d/in')
        net_h0 = Conv2d(net_in,
                        df_dim, (5, 5), (2, 2),
                        act=tf.nn.leaky_relu,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h0/conv2d')

        net_h1 = Conv2d(net_h0,
                        df_dim * 2, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h1/conv2d')
        net_h1 = BatchNormLayer(net_h1,
                                act=tf.nn.leaky_relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h1/batch_norm')

        net_h2 = Conv2d(net_h1,
                        df_dim * 4, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h2/conv2d')
        net_h2 = BatchNormLayer(net_h2,
                                act=tf.nn.leaky_relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h2/batch_norm')

        net_h3 = Conv2d(net_h2,
                        df_dim * 8, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h3/conv2d')
        net_h3 = BatchNormLayer(net_h3,
                                act=tf.nn.leaky_relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h3/batch_norm')

        net_h4 = FlattenLayer(net_h3, name='d/h4/flatten')
        net_h4 = DenseLayer(net_h4,
                            n_units=1,
                            act=tf.identity,
                            W_init=w_init,
                            name='d/h4/lin_sigmoid')
        logits = net_h4.outputs
        net_h4.outputs = tf.nn.sigmoid(net_h4.outputs)
    return net_h4, logits
コード例 #13
0
ファイル: models.py プロジェクト: AaronAnima/DisentanGAN
def get_Ec(x_shape=(None, flags.img_size_h, flags.img_size_w, flags.c_dim),
           name=None):
    # ref: Multimodal Unsupervised Image-to-Image Translation
    lrelu = lambda x: tl.act.lrelu(x, 0.01)
    w_init = tf.random_normal_initializer(stddev=0.02)
    channel = 64
    ni = Input(x_shape)
    n = Conv2d(channel, (7, 7), (1, 1), act=lrelu, W_init=w_init)(ni)
    for i in range(2):
        n = Conv2d(channel * 2, (3, 3), (2, 2), W_init=w_init)(n)
        n = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(n)
        channel = channel * 2

    for i in range(1, 5):
        # res block
        nn = Conv2d(channel, (3, 3), (1, 1),
                    act=None,
                    W_init=w_init,
                    b_init=None)(n)
        nn = InstanceNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
        nn = Conv2d(channel, (3, 3), (1, 1),
                    act=None,
                    W_init=w_init,
                    b_init=None)(nn)
        nn = InstanceNorm2d(act=None, gamma_init=g_init)(nn)
        n = Elementwise(tf.add)([n, nn])

    n = GaussianNoise(is_always=False)(n)

    M = Model(inputs=ni, outputs=n, name=name)
    return M
コード例 #14
0
def get_img_D(shape):
    df_dim = 8
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)
    ni = Input(shape)
    n = Conv2d(df_dim, (5, 5), (2, 2), act=None, W_init=w_init,
               b_init=None)(ni)
    n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 2, (5, 5), (1, 1),
               act=None,
               W_init=w_init,
               b_init=None)(n)
    n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 4, (5, 5), (2, 2),
               act=None,
               W_init=w_init,
               b_init=None)(n)
    n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (5, 5), (1, 1),
               act=None,
               W_init=w_init,
               b_init=None)(n)
    n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (5, 5), (2, 2),
               act=None,
               W_init=w_init,
               b_init=None)(n)
    n = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    nf = Flatten(name='flatten')(n)
    n = Dense(n_units=1, act=None, W_init=w_init)(nf)
    return tl.models.Model(inputs=ni, outputs=n, name='img_Discriminator')
コード例 #15
0
def model(x, is_train):
    with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
        net = InputLayer(x, name='input')
        net = Conv2d(net,
                     64, (5, 5), (1, 1),
                     padding='SAME',
                     b_init=None,
                     name='cnn1')
        net = BatchNormLayer(net,
                             decay=0.99,
                             is_train=is_train,
                             act=tf.nn.relu,
                             name='batch1')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')

        net = Conv2d(net,
                     64, (5, 5), (1, 1),
                     padding='SAME',
                     b_init=None,
                     name='cnn2')
        net = BatchNormLayer(net,
                             decay=0.99,
                             is_train=is_train,
                             act=tf.nn.relu,
                             name='batch2')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')

        net = FlattenLayer(net, name='flatten')
        net = DenseLayer(net, 384, act=tf.nn.relu, name='d1relu')
        net = DenseLayer(net, 192, act=tf.nn.relu, name='d2relu')
        net = DenseLayer(net, 10, act=None, name='output')
    return net
コード例 #16
0
def get_img_D(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)
    ndf = 64
    isize = 64
    n_extra_layers = flags.n_extra_layers

    ni = Input(shape)
    n = Conv2d(ndf, (4, 4), (2, 2), act=None, W_init=w_init, b_init=None)(ni)
    csize, cndf = isize / 2, ndf

    for t in range(n_extra_layers):
        n = SpectralNormConv2d(cndf, (3, 3), (1, 1),
                               act=lrelu,
                               W_init=w_init,
                               b_init=None)(n)

    while csize > 4:
        cndf = cndf * 2
        n = SpectralNormConv2d(cndf, (4, 4), (2, 2),
                               act=lrelu,
                               W_init=w_init,
                               b_init=None)(n)
        csize = csize / 2

    n = Conv2d(1, (4, 4), (1, 1),
               act=None,
               W_init=w_init,
               b_init=None,
               padding='VALID')(n)

    return tl.models.Model(inputs=ni, outputs=n)
コード例 #17
0
ファイル: model.py プロジェクト: se7enXF/TAM_Net
    def tam_net(self):
        inputs = Input(self.in_shape, name='inputs')

        e_in = inputs
        for i in range(0, 5):
            e_out = Conv2d(self.f_size * (2**i), (3, 3), (2, 2),
                           act=tf.nn.relu,
                           name=f'e{i+1}_con')(e_in)
            e_in = self.residual_block(i, e=True)(e_out)
            self.__setattr__(f'e{i+1}', e_in)

        d_in = e_in
        for i in range(4, 0, -1):
            d_out = DeConv2d(self.f_size * (2**(i - 1)), (3, 3), (2, 2),
                             name=f'd{i}_con')(d_in)
            encoder = self.__getattribute__(f'e{i}')
            d_out = Concat(concat_dim=3, name=f'concat{i}')([encoder, d_out])
            d_out = Conv2d(self.f_size * (2**(i - 1)), (1, 1), (1, 1),
                           name=f'fusion{i}')(d_out)
            d_in = self.residual_block(i - 1, e=False)(d_out)
            self.__setattr__(f'd{i + 1}', d_in)

        outs = DeConv2d(3, (3, 3), (2, 2), name='d_con_out')(d_in)
        outs = Conv2d(3, (1, 1), (1, 1), act=tf.nn.sigmoid, name='outs')(outs)
        return Model(inputs=inputs, outputs=outs, name="TAM_Net")
コード例 #18
0
def get_discriminator(latent_shape, image_shape, df_dim=64):

    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)

    n1i = Input(image_shape)
    n1 = Conv2d(df_dim, (5, 5), (2, 2), act=lrelu, W_init=w_init)(n1i)
    n1 = Conv2d(df_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(n1)
    n1 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n1)
    n1 = Dropout(keep=0.8)(n1)
    n1 = Conv2d(df_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(n1)
    n1 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n1)
    n1 = Dropout(keep=0.8)(n1)
    n1 = Conv2d(df_dim * 8, (5, 5), (2, 2), W_init=w_init, b_init=None)(n1)
    n1 = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(n1)
    n1 = Dropout(keep=0.8)(n1)
    n1 = Flatten()(n1)  # [-1,4*4*df_dim*8]

    n2i = Input(latent_shape)
    n2 = Dense(n_units=4 * 4 * df_dim * 8, W_init=w_init, b_init=None)(n2i)
    n2 = Dropout(keep=0.8)(n2)
    nn = Concat()([n1, n2])

    nn = Dense(n_units=1, W_init=w_init, b_init=None)(nn)

    return tl.models.Model(inputs=[n1i, n2i], outputs=nn, name='discriminator')
コード例 #19
0
ファイル: openpose.py プロジェクト: TuskAW/hyperpose
 def __init__(self,n_pos=19,n_limbs=19,num_channels=128,hin=368,win=368,hout=46,wout=46,backbone=None,pretrained_backbone=True,data_format="channels_first"):
     super().__init__()
     self.num_channels=num_channels
     self.n_pos=n_pos
     self.n_limbs=n_limbs
     self.n_confmaps=n_pos
     self.n_pafmaps=2*n_limbs
     self.hin=hin
     self.win=win
     self.hout=hout
     self.wout=wout
     self.data_format=data_format
     self.pretrained_backbone=pretrained_backbone
     self.concat_dim=1 if self.data_format=="channels_first" else -1
     #back bone configure
     if(backbone==None):
         self.backbone=self.vgg19(in_channels=3,pretrained=self.pretrained_backbone,data_format=self.data_format)
     else:
         self.backbone=backbone(scale_size=8,data_format=self.data_format)
     self.cpm_stage=LayerList([
         Conv2d(n_filter=256,in_channels=self.backbone.out_channels,filter_size=(3,3),strides=(1,1),padding="SAME",act=tf.nn.relu,data_format=self.data_format),
         Conv2d(n_filter=128,in_channels=256,filter_size=(3,3),strides=(1,1),padding="SAME",act=tf.nn.relu,data_format=self.data_format)
     ])
     #init stage
     self.init_stage=self.Init_stage(n_confmaps=self.n_confmaps, n_pafmaps=self.n_pafmaps,in_channels=128,data_format=self.data_format)
     #one refinemnet stage
     self.refinement_stage_1=self.Refinement_stage(n_confmaps=self.n_confmaps, n_pafmaps=self.n_pafmaps, in_channels=self.n_confmaps+self.n_pafmaps+128,data_format=self.data_format)
     self.refinement_stage_2=self.Refinement_stage(n_confmaps=self.n_confmaps, n_pafmaps=self.n_pafmaps, in_channels=self.n_confmaps+self.n_pafmaps+128,data_format=self.data_format)
     self.refinement_stage_3=self.Refinement_stage(n_confmaps=self.n_confmaps, n_pafmaps=self.n_pafmaps, in_channels=self.n_confmaps+self.n_pafmaps+128,data_format=self.data_format)
     self.refinement_stage_4=self.Refinement_stage(n_confmaps=self.n_confmaps, n_pafmaps=self.n_pafmaps, in_channels=self.n_confmaps+self.n_pafmaps+128,data_format=self.data_format)
     self.refinement_stage_5=self.Refinement_stage(n_confmaps=self.n_confmaps, n_pafmaps=self.n_pafmaps, in_channels=self.n_confmaps+self.n_pafmaps+128,data_format=self.data_format)
コード例 #20
0
ファイル: squeezenetv1.py プロジェクト: schneicw/Chatbot
def fire_block(n, n_filter, max_pool=False, name='fire_block'):
    n = Conv2d(n_filter, (1, 1), (1, 1), tf.nn.relu, 'SAME', name=name + '.squeeze1x1')(n)
    n1 = Conv2d(n_filter * 4, (1, 1), (1, 1), tf.nn.relu, 'SAME', name=name + '.expand1x1')(n)
    n2 = Conv2d(n_filter * 4, (3, 3), (1, 1), tf.nn.relu, 'SAME', name=name + '.expand3x3')(n)
    n = Concat(-1, name=name + '.concat')([n1, n2])
    if max_pool:
        n = MaxPool2d((3, 3), (2, 2), 'VALID', name=name + '.max')(n)
    return n
コード例 #21
0
def discriminator(inputs, is_train=True):
    with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE):
        net_in = InputLayer(inputs, name='din')

        #Conv2d is tf.nn.conv2d + tf.nn.relu
        dnet_c0 = Conv2d(net_in,
                         64, (8, 8), (2, 2),
                         act=tf.nn.relu,
                         padding='SAME',
                         name='dnet_c0')

        #Conv2d is tf.nn.conv2d
        #BatchNormLayer is tf.nn.batch_normalization + tf.nn.relu
        dnet_c1 = Conv2d(dnet_c0,
                         128, (8, 8), (2, 2),
                         act=None,
                         padding='SAME',
                         name='dnet_c1')
        dnet_b1 = BatchNormLayer(dnet_c1,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='dnet_b1')

        #    dnet_p1 = MaxPool2d(dnet_b1, (2, 2), name='pool2')   #Don't use pool layer, it is not good. But you can try.

        dnet_c2 = Conv2d(dnet_b1,
                         256, (8, 8), (2, 2),
                         act=None,
                         padding='SAME',
                         name='dnet_c2')
        dnet_b2 = BatchNormLayer(dnet_c2,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='dnet_b2')

        dnet_c3 = Conv2d(dnet_b2,
                         512, (8, 8), (2, 2),
                         act=None,
                         padding='SAME',
                         name='dnet_c3')
        dnet_b3 = BatchNormLayer(dnet_c3,
                                 decay=0.9,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 name='dnet_b3')

        #FlattenLayer is tf.reshape
        dnet_f1 = FlattenLayer(dnet_b3, name='dnet_f1')
        #DenseLayer is tf.layers.dense, the full-connected
        dnet_d1 = DenseLayer(dnet_f1,
                             n_units=1,
                             act=tf.identity,
                             name='dnet_h4')
        logits = dnet_d1.outputs
        dnet_d1.outputs = tf.nn.sigmoid(dnet_d1.outputs)
    return dnet_d1, logits
コード例 #22
0
ファイル: model.py プロジェクト: TuskAW/hyperpose
    def __init__(self,K_size=18,L_size=17,win=384,hin=384,wout=12,hout=12,wnei=9,hnei=9\
        ,lmd_rsp=0.25,lmd_iou=1,lmd_coor=5,lmd_size=5,lmd_limb=0.5,backbone=None,data_format="channels_first"):
        super().__init__()
        #construct params
        self.K = K_size
        self.L = L_size
        self.win = win
        self.hin = hin
        self.wout = wout
        self.hout = hout
        self.hnei = hnei
        self.wnei = wnei
        self.n_pos = K_size
        self.lmd_rsp = lmd_rsp
        self.lmd_iou = lmd_iou
        self.lmd_coor = lmd_coor
        self.lmd_size = lmd_size
        self.lmd_limb = lmd_limb
        self.data_format = data_format

        self.output_dim = 6 * self.K + self.hnei * self.wnei * self.L
        #construct networks
        if (backbone == None):
            self.backbone = self.Resnet_18(n_filter=512,
                                           in_channels=3,
                                           data_format=data_format)
        else:
            self.backbone = backbone(scale_size=32,
                                     data_format=self.data_format)
        self.add_layer_1 = LayerList([
            Conv2d(n_filter=512,
                   in_channels=self.backbone.out_channels,
                   filter_size=(3, 3),
                   strides=(1, 1),
                   data_format=self.data_format),
            BatchNorm2d(decay=0.9,
                        act=lambda x: tl.act.leaky_relu(x, alpha=0.1),
                        is_train=True,
                        num_features=512,
                        data_format=self.data_format)
        ])
        self.add_layer_2 = LayerList([
            Conv2d(n_filter=512,
                   in_channels=512,
                   filter_size=(3, 3),
                   strides=(1, 1),
                   data_format=self.data_format),
            BatchNorm2d(decay=0.9,
                        act=lambda x: tl.act.leaky_relu(x, alpha=0.1),
                        is_train=True,
                        num_features=512,
                        data_format=self.data_format)
        ])
        self.add_layer_3 = Conv2d(n_filter=self.output_dim,
                                  in_channels=512,
                                  filter_size=(1, 1),
                                  strides=(1, 1),
                                  data_format=self.data_format)
コード例 #23
0
def discriminator(inputs, is_train=True, reuse=False):
    dfs = 64
    gamma_init = tf.random_normal_initializer(1., 0.02)
    W_init = tf.random_normal_initializer(stddev=0.02)

    with tf.variable_scope('discriminator', reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        d = InputLayer(inputs, name='d/inputs')
        d = Conv2d(d,
                   dfs, (5, 5), (2, 2),
                   W_init=W_init,
                   act=lambda x: tl.act.lrelu(x, 0.2),
                   name='d/conv1')

        d = Conv2d(d,
                   dfs * 2, (5, 5), (2, 2),
                   W_init=W_init,
                   act=None,
                   name='d/conv2')
        d = BatchNormLayer(d,
                           act=lambda x: tl.act.lrelu(x, 0.2),
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='d/bn3')

        d = Conv2d(d,
                   dfs * 4, (5, 5), (2, 2),
                   W_init=W_init,
                   act=None,
                   name='d/conv4')
        d = BatchNormLayer(d,
                           act=lambda x: tl.act.lrelu(x, 0.2),
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='d/bn5')

        d = Conv2d(d,
                   dfs * 8, (5, 5), (2, 2),
                   W_init=W_init,
                   act=None,
                   name='d/conv6')
        d = BatchNormLayer(d,
                           act=lambda x: tl.act.lrelu(x, 0.2),
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='d/bn7')

        d = FlattenLayer(d, name='d/flt8')
        d = DenseLayer(d,
                       1,
                       act=tl.act.identity,
                       W_init=W_init,
                       name='d/output')

        logits = d.outputs
        d.outputs = tf.nn.sigmoid(d.outputs)
        return d, logits
コード例 #24
0
def model(x, y_, reuse, is_train=False):
    W_init = tf.truncated_normal_initializer(stddev=5e-2)
    W_init2 = tf.truncated_normal_initializer(stddev=0.04)
    b_init2 = tf.constant_initializer(value=0.1)
    with tf.variable_scope("model", reuse=reuse):
        tl.layers.set_name_reuse(reuse)
        net = InputLayer(x, name='input')
        net = Conv2d(net,
                     32, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     padding='SAME',
                     W_init=W_init,
                     name='cnn1')
        net = Conv2d(net,
                     32, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     W_init=W_init,
                     name='cnn2',
                     padding="VALID")
        net = MaxPool2d(net, name='pool1', padding="VALID")
        net = DropoutLayer(net, keep=0.75, is_train=is_train, name='drop1')

        net = Conv2d(net,
                     64, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     padding='SAME',
                     W_init=W_init,
                     name='cnn3')
        net = Conv2d(net,
                     64, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     W_init=W_init,
                     name='cnn4',
                     padding="VALID")
        net = MaxPool2d(net, name='pool2', padding="VALID")
        net = DropoutLayer(net, keep=0.75, is_train=is_train, name='drop2')

        net = FlattenLayer(net, name='flatten')
        net = DenseLayer(net,
                         n_units=512,
                         act=tf.nn.relu,
                         W_init=W_init2,
                         b_init=b_init2,
                         name='d1relu')
        net = DenseLayer(net,
                         n_units=10,
                         act=tf.identity,
                         W_init=tf.truncated_normal_initializer(stddev=1 /
                                                                192.0),
                         name='output')  # output: (batch_size, 10)
        y = net.outputs

        loss = tl.cost.cross_entropy(y, y_, name='cost')

        correct_prediction = tf.equal(tf.argmax(y, 1), y_)
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return net, loss, acc
コード例 #25
0
 def __init__(self):
     super(ResBlock,self).__init__()
     w_init = tf.random_normal_initializer(stddev=0.02)
     g_init = tf.random_normal_initializer(1., 0.02)
     self.conv1 =  Conv2d(n_filter=64,filter_size=(3, 3),strides=(1, 1),in_channels=64, padding='SAME', W_init=w_init,b_init=None)
     self.bn1 = BatchNorm2d(num_features = 64,gamma_init=g_init, act=tf.nn.relu)
     self.conv2 = Conv2d(n_filter=64,filter_size=(3, 3),strides=(1, 1),in_channels=64, padding='SAME', W_init=w_init,b_init=None)
     self.bn2 = BatchNorm2d(num_features = 64,gamma_init=g_init)
     self.add = Elementwise(tf.add)
コード例 #26
0
ファイル: utils.py プロジェクト: paolomandica/AML
 def get_G_res_block(n, w_init, g_init):
     nn = Conv2d(64, (3, 3), (1, 1), padding='SAME',
                 W_init=w_init, b_init=None)(n)
     nn = BatchNorm2d(act=tf.nn.relu, gamma_init=g_init)(nn)
     nn = Conv2d(64, (3, 3), (1, 1), padding='SAME',
                 W_init=w_init, b_init=None)(nn)
     nn = BatchNorm2d(gamma_init=g_init)(nn)
     nn = Elementwise(tf.add)([n, nn])
     return nn
コード例 #27
0
ファイル: model.py プロジェクト: se7enXF/TAM_Net
 def residual_block(self, n_k=1, e=True):
     k_size = self.f_size * (2**n_k)
     ni = Input([None, None, None, k_size])
     nn = Conv2d(k_size, (3, 3), (1, 1))(ni)
     nn = BatchNorm(act=tf.nn.relu)(nn)
     nn = Conv2d(k_size, (3, 3), (1, 1))(nn)
     nn = BatchNorm()(nn)
     nn = Elementwise(tf.add)([ni, nn])
     return Model(inputs=ni,
                  outputs=nn,
                  name=f'{"e" if e else "d"}{n_k+1}_res').as_layer()
コード例 #28
0
ファイル: resnet.py プロジェクト: zuzi-rl/tensorlayer
def conv_block(input, kernel_size, n_filters, stage, block, strides=(2, 2)):
    """The conv block where there is a conv layer at shortcut.

    Parameters
    ----------
    input : tf tensor
        Input tensor from above layer.
    kernel_size : int
        The kernel size of middle conv layer at main path.
    n_filters : list of integers
        The numbers of filters for 3 conv layer at main path.
    stage : int
        Current stage label.
    block : str
        Current block label.
    strides : tuple
        Strides for the first conv layer in the block.

    Returns
    -------
        Output tensor of this block.

    """
    filters1, filters2, filters3 = n_filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2d(filters1, (1, 1),
               strides=strides,
               W_init=tf.initializers.he_normal(),
               name=conv_name_base + '2a')(input)
    x = BatchNorm(name=bn_name_base + '2a', act='relu')(x)

    ks = (kernel_size, kernel_size)
    x = Conv2d(filters2,
               ks,
               padding='SAME',
               W_init=tf.initializers.he_normal(),
               name=conv_name_base + '2b')(x)
    x = BatchNorm(name=bn_name_base + '2b', act='relu')(x)

    x = Conv2d(filters3, (1, 1),
               W_init=tf.initializers.he_normal(),
               name=conv_name_base + '2c')(x)
    x = BatchNorm(name=bn_name_base + '2c')(x)

    shortcut = Conv2d(filters3, (1, 1),
                      strides=strides,
                      W_init=tf.initializers.he_normal(),
                      name=conv_name_base + '1')(input)
    shortcut = BatchNorm(name=bn_name_base + '1')(shortcut)

    x = Elementwise(tf.add, act='relu')([x, shortcut])
    return x
コード例 #29
0
 def __init__(self):
     super(SRresnet,self).__init__()
     w_init = tf.random_normal_initializer(stddev=0.02)
     g_init = tf.random_normal_initializer(1., 0.02)
     self.conv1 = Conv2d(n_filter=64,filter_size=(3, 3),strides=(1, 1),in_channels=3, act=tf.nn.relu, padding='SAME', W_init=w_init,b_init=None)
     self.conv2 = Conv2d(n_filter=64,filter_size=(3, 3),strides=(1, 1),in_channels=64, padding='SAME', W_init=w_init,b_init=None)
     self.conv3 = Conv2d(n_filter=256,filter_size=(3, 3),strides=(1, 1),in_channels=64, padding='SAME', W_init=w_init,b_init=None)
     self.conv4 = Conv2d(n_filter=3,filter_size=(3, 3),strides=(1, 1),in_channels=64, act=tf.nn.tanh, padding='SAME', W_init=w_init,b_init=None)
     self.bn2 = BatchNorm2d(num_features = 64,gamma_init=g_init)
     self.bn1 = BatchNorm2d(num_features = 64,gamma_init=g_init, act=tf.nn.relu)
     self.subconv1 = SubpixelConv2d(scale=2, n_out_channels=256,in_channels=256, act=tf.nn.relu)
     self.add1 = Elementwise(tf.add)
コード例 #30
0
ファイル: model_list.py プロジェクト: susantamandal/SISR
def model_G2():  ##Phase2 Generator

    gamma_init = tf1.random_normal_initializer(1., 0.02)
    w_init = tf1.random_normal_initializer(stddev=0.02)
    fn = tf1.nn.relu

    ##	Input layers
    lr_image = Input(
        (None, 128, 128, 3))  ##	(batch_size, height, width, channel)
    hr_image = Input((None, 512, 512, 3))

    ## 	Feature extracting layers from LR image
    lr_feature_layer_1 = Conv2d(64, (3, 3), (1, 1),
                                act=fn,
                                padding='SAME',
                                W_init=w_init)(lr_image)  # Shape(1,256,256,64)
    lr_feature_layer_1 = BatchNorm2d(gamma_init=gamma_init)(lr_feature_layer_1)

    lr_feature_layer_2 = SubpixelConv2d(scale=4, act=fn)(
        lr_feature_layer_1)  # Shape(1,256,256,16)

    ##	Feature extracting layers from HR image

    hr_feature_layer_1 = Conv2d(64, (3, 3), (1, 1),
                                act=fn,
                                padding='SAME',
                                W_init=w_init)(hr_image)  # Shape(1,256,256,64)
    hr_feature_layer_1 = BatchNorm2d(gamma_init=gamma_init)(hr_feature_layer_1)

    ##	Features Merging layers

    merge_layer = Concat(concat_dim=-1)(
        [lr_feature_layer_2, hr_feature_layer_1])  # Shape(1,256,256,128)

    non_linearity_layer_1 = Conv2d(64, (5, 5), (1, 1),
                                   act=fn,
                                   padding='SAME',
                                   W_init=w_init)(
                                       merge_layer)  # Shape(1,256,256,256)
    non_linearity_layer_1 = BatchNorm2d(
        gamma_init=gamma_init)(non_linearity_layer_1)

    ## 	Reconstruction layers
    Recon_layer_1 = Conv2d(3, (5, 5), (1, 1),
                           act=fn,
                           padding='SAME',
                           W_init=w_init)(
                               non_linearity_layer_1)  # Shape(1,256,256,1)
    Recon_layer_2 = Elementwise(combine_fn=tf1.add)([Recon_layer_1, hr_image
                                                     ])  # Shape(1,256,256,1)

    return Model(inputs=[lr_image, hr_image], outputs=Recon_layer_2)