Exemple #1
0
def build_resnet_block_Att(inputres, dim, name="resnet", padding="REFLECT"):
    with tf.compat.v1.variable_scope(name):
        out_res = PadLayer([[0, 0], [1, 1], [1, 1], [0, 0]], padding)(inputres)

        out_res = Conv2d(n_filter=dim,
                         filter_size=(3, 3),
                         strides=(1, 1),
                         padding="VALID",
                         act=None,
                         W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                         b_init=tf.constant_initializer(0.0))(out_res)
        out_res = InstanceNorm2d(act=tf.nn.relu)(out_res)

        out_res = PadLayer([[0, 0], [1, 1], [1, 1], [0, 0]], padding)(out_res)

        out_res = Conv2d(n_filter=dim,
                         filter_size=(3, 3),
                         strides=(1, 1),
                         padding="VALID",
                         act=None,
                         W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                         b_init=tf.constant_initializer(0.0))(out_res)
        out_res = InstanceNorm2d(act=None)(out_res)

        tmp = Elementwise(combine_fn=tf.add)([out_res, inputres])
        return Lambda(tf.nn.relu)(tmp)
Exemple #2
0
def get_Ea(x_shape=(None, flags.img_size_h, flags.img_size_w, flags.c_dim),
           name=None):  # input: (1, 256, 256, 3)
    # ref: DRIT source code (Pytorch Implementation)
    w_init = tf.random_normal_initializer(stddev=0.02)
    ndf = 64
    ni = Input(x_shape)
    nn = Conv2d(ndf, (7, 7), (1, 1), padding='VALID', W_init=w_init,
                act=None)(ni)

    ## Basic Blocks * 3
    for i in range(1, 4):
        ## Basic Block
        # conv part
        n = Lambda(lambda x: tf.nn.leaky_relu(x, 0.2))(nn)  # leaky relu (0.2)
        n = Conv2d(ndf * i, (3, 3), (1, 1),
                   padding='VALID',
                   W_init=w_init,
                   act=lrelu)(n)  # conv3x3
        n = Conv2d(ndf * (i + 1), (3, 3), (1, 1),
                   padding='VALID',
                   W_init=w_init,
                   act=None)(n)  # conv3x3 in convMeanpool
        n = MeanPool2d((2, 2), (2, 2))(n)  # meanPool2d in convMeanpool
        # shortcut part
        ns = MeanPool2d((2, 2), (2, 2))(nn)
        ns = Conv2d(ndf * (i + 1), (3, 3), (1, 1),
                    padding='VALID',
                    W_init=w_init)(ns)
        nn = Elementwise(tf.add)([n, ns])

    n = GlobalMeanPool2d()(nn)
    n_mu = Dense(n_units=flags.za_dim, W_init=w_init, name="mean_linear")(n)
    n_log_var = Dense(n_units=flags.za_dim, W_init=w_init,
                      name="var_linear")(n)

    # Sampling using reparametrization trick
    def sample(input):
        n_mu = input[0]
        n_log_var = input[1]
        epsilon = tf.random.truncated_normal(n_mu.shape)
        stddev = tf.exp(n_log_var)
        out = n_mu + stddev * epsilon
        return out

    no = Lambda(sample)([n_mu, n_log_var])
    M = Model(inputs=ni, outputs=[no, n_mu, n_log_var], name=name)
    return M
Exemple #3
0
def VGG_static(layer_type, batch_norm=False, end_with='outputs', name=None):
    ni = Input([None, 224, 224, 3])
    n = Lambda(
        lambda x: x * 255 - np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape([1, 1, 1, 3]), name='scale'
    )(ni)

    config = cfg[mapped_cfg[layer_type]]
    layers = make_layers(config, batch_norm, end_with)

    nn = layers(n)

    M = Model(inputs=ni, outputs=nn, name=name)
    return M
Exemple #4
0
def get_img_D_cifar10(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)
    df_dim = 64
    ni = Input(shape)
    n = Lambda(
        lambda x: tf.image.random_crop(x, [flags.batch_size, 24, 24, 3]))(
            ni)  # patchGAN

    # need to be implemented

    n = Dense(n_units=1, act=None, W_init=w_init)(n)
    return tl.models.Model(inputs=ni,
                           outputs=n,
                           name='img_Discriminator_CIFAR10')
Exemple #5
0
def get_D(name=None):
    df_dim = 64
    w_init = tf.random_normal_initializer(stddev=0.02)
    lrelu = lambda x: tl.act.lrelu(x, 0.2)

    nx = Input((flags.batch_size, 256, 256, 3))

    n = Lambda(lambda x: tf.image.random_crop(x, [flags.batch_size, 70, 70, 3]))(nx) # patchGAN
    n = Conv2d(df_dim, (4, 4), (2, 2), act=lrelu, W_init=w_init)(n)
    n = Conv2d(df_dim * 2, (4, 4), (2, 2), W_init=w_init)(n)
    n = InstanceNorm2d(act=lrelu)(n)

    n = Conv2d(df_dim * 4, (4, 4), (2, 2), W_init=w_init)(n)
    n = InstanceNorm2d(act=lrelu)(n)

    n = Conv2d(df_dim * 8, (4, 4), (2, 2), W_init=w_init)(n)
    n = InstanceNorm2d(act=lrelu)(n)

    n = Conv2d(1, (4, 4), (4, 4), padding='VALID', W_init=w_init)(n)
    n = Flatten()(n)
    assert n.shape[-1] == 1
    M = Model(inputs=nx, outputs=n, name=name)
    return M
Exemple #6
0
def SqueezeNetV1(pretrained=False, end_with='out', name=None):
    """Pre-trained SqueezeNetV1 model (static mode). Input shape [?, 224, 224, 3], value range [0, 1].

    Parameters
    ------------
    pretrained : boolean
        Whether to load pretrained weights. Default False.
    end_with : str
        The end point of the model [conv1, maxpool1, fire2, fire3, fire4, ..., out]. Default ``out`` i.e. the whole model.
    name : None or str
        Name for this model.

    Examples
    ---------
    Classify ImageNet classes, see `tutorial_models_squeezenetv1.py <https://github.com/tensorlayer/tensorlayer/blob/master/example/tutorial_models_squeezenetv1.py>`__

    >>> # get the whole model
    >>> squeezenet = tl.models.SqueezeNetV1(pretrained=True)
    >>> # use for inferencing
    >>> output = squeezenet(img1, is_train=False)
    >>> prob = tf.nn.softmax(output)[0].numpy()

    Extract features and Train a classifier with 100 classes

    >>> # get model without the last layer
    >>> cnn = tl.models.SqueezeNetV1(pretrained=True, end_with='drop1').as_layer()
    >>> # add one more layer and build new model
    >>> ni = Input([None, 224, 224, 3], name="inputs")
    >>> nn = cnn(ni)
    >>> nn = Conv2d(100, (1, 1), (1, 1), padding='VALID', name='conv10')(nn)
    >>> nn = GlobalMeanPool2d(name='globalmeanpool')(nn)
    >>> model = tl.models.Model(inputs=ni, outputs=nn)
    >>> # train your own classifier (only update the last layer)
    >>> train_params = model.get_layer('conv10').trainable_weights

    Returns
    -------
        static SqueezeNetV1.

    """
    ni = Input([None, 224, 224, 3], name="input")
    n = Lambda(lambda x: x * 255, name='scale')(ni)

    for i in range(len(layer_names)):
        if layer_names[i] == 'conv1':
            n = Conv2d(64, (3, 3), (2, 2), tf.nn.relu, 'SAME', name='conv1')(n)
        elif layer_names[i] == 'maxpool1':
            n = MaxPool2d((3, 3), (2, 2), 'VALID', name='maxpool1')(n)
        elif layer_names[i] == 'drop1':
            n = Dropout(keep=0.5, name='drop1')(n)
        elif layer_names[i] == 'out':
            n = Conv2d(1000, (1, 1), (1, 1), padding='VALID', name='conv10')(n)  # 13, 13, 1000
            n = GlobalMeanPool2d(name='globalmeanpool')(n)
        elif layer_names[i] in ['fire3', 'fire5']:
            n = fire_block(n, n_filters[i - 2], max_pool=True, name=layer_names[i])
        else:
            n = fire_block(n, n_filters[i - 2], max_pool=False, name=layer_names[i])

        if layer_names[i] == end_with:
            break

    network = Model(inputs=ni, outputs=n, name=name)

    if pretrained:
        restore_params(network)

    return network
# keras layers
layers = [
    tf.keras.layers.Dropout(0.8),
    tf.keras.layers.Dense(800, activation='relu'),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(800, activation='relu'),
    tf.keras.layers.Dropout(0.5),
    tf.keras.layers.Dense(10, activation='linear')
]
keras_block = tf.keras.Sequential(layers)
# in order to compile keras model and get trainable_variables of the keras model
_ = keras_block(np.random.random([batch_size, 784]).astype(np.float32))

# build tl model using keras layers
ni = Input([None, 784], dtype=tf.float32)
nn = Lambda(fn=keras_block, fn_weights=keras_block.trainable_variables)(ni)
network = tl.models.Model(inputs=ni, outputs=nn)
print(network)

n_epoch = 200
learning_rate = 0.0001

train_params = network.trainable_weights
optimizer = tf.optimizers.Adam(learning_rate)

for epoch in range(n_epoch):
    start_time = time.time()
    ## Training
    for X_train_a, y_train_a in tl.iterate.minibatches(X_train,
                                                       y_train,
                                                       batch_size,
Exemple #8
0
def discriminator(name="discriminator"):
    with tf.compat.v1.variable_scope(name):
        inputdisc_in = Input(shape=[None, IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS],
                             dtype=tf.float32)
        mask_in = Input(shape=[None, IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS],
                        dtype=tf.float32)
        transition_rate = Input(shape=[1], dtype=tf.float32)
        donorm = Input(shape=[1], dtype=tf.float32)

        tmp = Elementwise(combine_fn=tf.greater_equal)(
            [mask_in, transition_rate])
        mask = Lambda(fn=my_cast)(tmp)
        inputdisc = Elementwise(combine_fn=tf.multiply)([inputdisc_in, mask])

        f = 4
        padw = 2
        lrelu = lambda x: tl.act.lrelu(x, 0.2)

        pad_input = PadLayer([[0, 0], [padw, padw], [padw, padw], [0, 0]],
                             "CONSTANT")(inputdisc)

        o_c1 = Conv2d(n_filter=ndf,
                      filter_size=(f, f),
                      strides=(2, 2),
                      padding="VALID",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(pad_input)
        #pdb.set_trace()
        o_c1 = Lambda(fn=my_cond)(
            [donorm, InstanceNorm2d(act=None)(o_c1), o_c1])
        o_c1 = Lambda(fn=lrelu)(o_c1)

        pad_o_c1 = PadLayer([[0, 0], [padw, padw], [padw, padw], [0, 0]],
                            "CONSTANT")(o_c1)

        o_c2 = Conv2d(n_filter=ndf * 2,
                      filter_size=(f, f),
                      strides=(2, 2),
                      padding="VALID",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(pad_o_c1)
        o_c2 = Lambda(fn=my_cond)(
            [donorm, InstanceNorm2d(act=None)(o_c2), o_c2])
        o_c2 = Lambda(fn=lrelu)(o_c2)

        pad_o_c2 = PadLayer([[0, 0], [padw, padw], [padw, padw], [0, 0]],
                            "CONSTANT")(o_c2)

        o_c3 = Conv2d(n_filter=ndf * 4,
                      filter_size=(f, f),
                      strides=(2, 2),
                      padding="VALID",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(pad_o_c2)
        o_c3 = Lambda(fn=my_cond)(
            [donorm, InstanceNorm2d(act=None)(o_c3), o_c3])
        o_c3 = Lambda(fn=lrelu)(o_c3)

        pad_o_c3 = PadLayer([[0, 0], [padw, padw], [padw, padw], [0, 0]],
                            "CONSTANT")(o_c3)

        o_c4 = Conv2d(n_filter=ndf * 8,
                      filter_size=(f, f),
                      strides=(1, 1),
                      padding="VALID",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(pad_o_c3)
        o_c4 = Lambda(fn=my_cond)(
            [donorm, InstanceNorm2d(act=None)(o_c4), o_c4])
        o_c4 = Lambda(fn=lrelu)(o_c4)

        pad_o_c4 = PadLayer([[0, 0], [padw, padw], [padw, padw], [0, 0]],
                            "CONSTANT")(o_c4)

        o_c5 = Conv2d(n_filter=1,
                      filter_size=(f, f),
                      strides=(1, 1),
                      padding="VALID",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(pad_o_c4)

        return Model(inputs=[inputdisc_in, mask_in, transition_rate, donorm],
                     outputs=o_c5)
Exemple #9
0
def build_generator_9blocks(name="generator", skip=False):
    with tf.compat.v1.variable_scope(name):
        #pdb.set_trace()
        inputgen = Input(shape=[None, IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS],
                         dtype=tf.float32)
        f = 7
        ks = 3
        padding = "CONSTANT"
        padgen = PadLayer([[0, 0], [ks, ks], [ks, ks], [0, 0]],
                          padding)(inputgen)

        o_c1 = Conv2d(n_filter=ngf,
                      filter_size=(f, f),
                      strides=(1, 1),
                      padding="VALID",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(padgen)
        o_c1 = InstanceNorm2d(act=tf.nn.relu)(o_c1)

        o_c2 = Conv2d(n_filter=ngf * 2,
                      filter_size=(ks, ks),
                      strides=(2, 2),
                      padding="SAME",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(o_c1)
        o_c2 = InstanceNorm2d(act=tf.nn.relu)(o_c2)

        o_c3 = Conv2d(n_filter=ngf * 4,
                      filter_size=(ks, ks),
                      strides=(2, 2),
                      padding="SAME",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(o_c2)
        o_c3 = InstanceNorm2d(act=tf.nn.relu)(o_c3)

        o_r1 = build_resnet_block(o_c3, ngf * 4, "r1", padding)
        o_r2 = build_resnet_block(o_r1, ngf * 4, "r2", padding)
        o_r3 = build_resnet_block(o_r2, ngf * 4, "r3", padding)
        o_r4 = build_resnet_block(o_r3, ngf * 4, "r4", padding)
        o_r5 = build_resnet_block(o_r4, ngf * 4, "r5", padding)
        o_r6 = build_resnet_block(o_r5, ngf * 4, "r6", padding)
        o_r7 = build_resnet_block(o_r6, ngf * 4, "r7", padding)
        o_r8 = build_resnet_block(o_r7, ngf * 4, "r8", padding)
        o_r9 = build_resnet_block(o_r8, ngf * 4, "r9", padding)

        o_c4 = DeConv2d(n_filter=ngf * 2,
                        filter_size=(ks, ks),
                        strides=(2, 2),
                        padding="SAME",
                        act=None,
                        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                        b_init=tf.constant_initializer(0.0))(o_r9)
        o_c4 = InstanceNorm2d(act=tf.nn.relu)(o_c4)

        o_c5 = DeConv2d(n_filter=ngf,
                        filter_size=(ks, ks),
                        strides=(2, 2),
                        padding="SAME",
                        act=None,
                        W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                        b_init=tf.constant_initializer(0.0))(o_c4)
        o_c5 = InstanceNorm2d(act=tf.nn.relu)(o_c5)

        o_c6 = Conv2d(n_filter=IMG_CHANNELS,
                      filter_size=(f, f),
                      strides=(1, 1),
                      padding="SAME",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(o_c5)

        if skip is True:
            #out_gen = Lambda(tf.nn.tanh, name="t1")(Elementwise(combine_fn=tf.add)([inputgen, o_c6]))
            tmp = Elementwise(combine_fn=tf.add)([inputgen, o_c6])
            out_gen = Lambda(tf.nn.tanh)(tmp)
        else:
            #out_gen = Lambda(tf.nn.tanh, name="t1")(o_c6)
            out_gen = Lambda(tf.nn.tanh)(o_c6)

        return Model(inputs=inputgen, outputs=out_gen)
Exemple #10
0
def autoenc_upsample(name):
    with tf.compat.v1.variable_scope(name):
        inputae = Input(shape=[None, IMG_WIDTH, IMG_HEIGHT, IMG_CHANNELS],
                        dtype=tf.float32)
        f = 7
        ks = 3
        padding = "REFLECT"

        pad_input = PadLayer([[0, 0], [ks, ks], [ks, ks], [0, 0]],
                             padding)(inputae)

        o_c1 = Conv2d(n_filter=ngf,
                      filter_size=(f, f),
                      strides=(2, 2),
                      act=None,
                      padding="VALID",
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(pad_input)

        o_c1 = InstanceNorm2d(act=tf.nn.relu)(o_c1)
        o_c2 = Conv2d(n_filter=ngf * 2,
                      filter_size=(ks, ks),
                      strides=(2, 2),
                      padding="SAME",
                      act=None,
                      W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                      b_init=tf.constant_initializer(0.0))(o_c1)
        o_c2 = InstanceNorm2d(act=tf.nn.relu)(o_c2)

        o_r1 = build_resnet_block_Att(o_c2, ngf * 2, "r1", padding)

        size_d1 = o_r1.get_shape().as_list()
        o_c4 = upsamplingDeconv(o_r1,
                                size=[size_d1[1] * 2, size_d1[2] * 2],
                                name="up1")
        o_c4 = PadLayer([[0, 0], [1, 1], [1, 1], [0, 0]], padding)(o_c4)
        o_c4_end = Conv2d(n_filter=ngf * 2,
                          filter_size=(3, 3),
                          strides=(1, 1),
                          padding="VALID",
                          act=None,
                          W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                          b_init=tf.constant_initializer(0.0))(o_c4)

        o_c4_end = InstanceNorm2d(act=tf.nn.relu)(o_c4_end)

        size_d2 = o_c4_end.get_shape().as_list()

        o_c5 = upsamplingDeconv(o_c4_end,
                                size=[size_d2[1] * 2, size_d2[2] * 2],
                                name="up2")
        o_c5 = PadLayer([[0, 0], [1, 1], [1, 1], [0, 0]], padding)(o_c5)
        o_c5_end = Conv2d(n_filter=ngf,
                          filter_size=(3, 3),
                          strides=(1, 1),
                          padding="VALID",
                          act=None,
                          W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                          b_init=tf.constant_initializer(0.0))(o_c5)

        o_c5_end = InstanceNorm2d(act=tf.nn.relu)(o_c5_end)
        o_c5_end = PadLayer([[0, 0], [3, 3], [3, 3], [0, 0]],
                            padding)(o_c5_end)
        o_c6_end = Conv2d(n_filter=1,
                          filter_size=(f, f),
                          strides=(1, 1),
                          padding="VALID",
                          act=None,
                          W_init=tf.initializers.TruncatedNormal(stddev=0.02),
                          b_init=tf.constant_initializer(0.0))(o_c5_end)

        output = Lambda(tf.nn.sigmoid)(o_c6_end)
        return Model(inputs=inputae, outputs=output)