Ejemplo n.º 1
0
def get_G(input_shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    g_init = tf.random_normal_initializer(1., 0.02)

    nin = Input(input_shape)
    n = Conv2d(64, (3, 3), (1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init)(nin)
    temp = n

    # B residual blocks
    for i in range(16):
        nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
        nn = BatchNorm(act=tf.nn.relu, gamma_init=g_init)(nn)
        nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(gamma_init=g_init)(nn)
        nn = Elementwise(tf.add)([n, nn])
        n = nn

    n = Conv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(gamma_init=g_init)(n)
    n = Elementwise(tf.add)([n, temp])
    # B residual blacks end

    n = Conv2d(256, (3, 3), (1, 1), padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)

    n = Conv2d(256, (3, 3), (1, 1), act=None, padding='SAME', W_init=w_init)(n)
    n = SubpixelConv2d(scale=2, n_out_channels=None, act=tf.nn.relu)(n)

    nn = Conv2d(3, (1, 1), (1, 1), act=tf.nn.tanh, padding='SAME', W_init=w_init)(n)
    G = Model(inputs=nin, outputs=nn, name="generator")
    return G
Ejemplo n.º 2
0
def depthwise_conv_block(n, n_filter, strides=(1, 1), name="depth_block"):
    n = DepthwiseConv2d((3, 3), strides, b_init=None,
                        name=name + '.depthwise')(n)
    n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm1')(n)
    n = Conv2d(n_filter, (1, 1), (1, 1), b_init=None, name=name + '.conv')(n)
    n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm2')(n)
    return n
Ejemplo n.º 3
0
def get_G(shape_z):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    ni = Input(shape_z)
    # input size is [None, 8, 8, 128]
    nn = DeConv2d(128, (1, 1), (1, 1), W_init=w_init, b_init=None,
                  act=None)(ni)

    nn = DeConv2d(64, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)

    nn = DeConv2d(32, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)

    nn = DeConv2d(32, (3, 3), (1, 1), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)

    nn = DeConv2d(32, (3, 3), (1, 1), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)

    nn = DeConv2d(3, (4, 4), (2, 2),
                  act=tf.nn.tanh,
                  W_init=w_init,
                  b_init=None)(nn)

    return tl.models.Model(inputs=ni, outputs=nn)
Ejemplo n.º 4
0
def model(inputs_shape, n_class=10):
    # In BNN, all the layers inputs are binary, with the exception of the first layer.
    # ref: https://github.com/itayhubara/BinaryNet.tf/blob/master/models/BNN_cifar10.py
    net_in = Input(inputs_shape, name='input')
    net = BinaryConv2d(32, (5, 5), (1, 1),
                       padding='SAME',
                       b_init=None,
                       name='bcnn1')(net_in)
    net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn1')(net)

    net = Sign("sign1")(net)
    net = BinaryConv2d(64, (5, 5), (1, 1),
                       padding='SAME',
                       b_init=None,
                       name='bcnn2')(net)
    net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn2')(net)

    net = Flatten('ft')(net)
    net = Sign("sign2")(net)
    net = BinaryDense(256, b_init=None, name='dense')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn3')(net)

    net = Sign("sign3")(net)
    net = BinaryDense(10, b_init=None, name='bout')(net)
    net = BatchNorm(name='bno')(net)
    net = Model(inputs=net_in, outputs=net, name='binarynet')
    return net
Ejemplo n.º 5
0
def get_Ek(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    ngf = 64
    isize = 64
    n_extra_layers = flags.n_extra_layers

    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    ngf = 64
    isize = 64
    n_extra_layers = flags.n_extra_layers

    ni = Input(shape)
    nn = Conv2d(ngf, (4, 4), (2, 2), W_init=w_init, act=tf.nn.relu)(ni)

    nn = Conv2d(ngf * 2, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)

    nn = Conv2d(ngf * 2, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)

    nn = DeConv2d(ngf // 2, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)

    nn = DeConv2d(ngf // 8, (1, 1), (1, 1), W_init=w_init, b_init=None)(nn)

    # nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn)
    #
    # nn = DeConv2d(ngf // 8, (4, 4), (2, 2), W_init=w_init, act=tf.nn.relu)(nn)
    return tl.models.Model(inputs=ni, outputs=nn)
Ejemplo n.º 6
0
def model(inputs_shape, n_class=10):
    in_net = Input(inputs_shape, name='input')
    net = DorefaConv2d(1,
                       3,
                       32, (5, 5), (1, 1),
                       padding='SAME',
                       b_init=None,
                       name='bcnn1')(in_net)
    net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool1')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn1')(net)

    net = DorefaConv2d(1,
                       3,
                       64, (5, 5), (1, 1),
                       padding='SAME',
                       b_init=None,
                       name='bcnn2')(net)
    net = MaxPool2d((2, 2), (2, 2), padding='SAME', name='pool2')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn2')(net)

    net = Flatten('flatten')(net)
    net = DorefaDense(1, 3, 256, b_init=None, name='dense')(net)
    net = BatchNorm(act=tl.act.htanh, name='bn3')(net)

    net = Dense(n_class, b_init=None, name='bout')(net)
    net = BatchNorm(name='bno')(net)
    net = Model(inputs=in_net, outputs=net, name='dorefanet')
    return net
Ejemplo n.º 7
0
def get_model(input_shape):
    ni = Input(shape=input_shape)
    nn = DeConv2d(1024, (1, 1), (1, 1), in_channels=64)(ni)
    nn = BatchNorm(decay=0.99, act=tf.nn.relu)(nn)
    nn = DeConv2d(128, (7, 7), (7, 7), in_channels=1024)(nn)
    nn = BatchNorm(decay=0.99, act=tf.nn.relu)(nn)
    nn = DeConv2d(64, (4, 4), (2, 2), in_channels=128)(nn)
    nn = BatchNorm(decay=0.99, act=tf.nn.relu)(nn)
    nn = DeConv2d(1, (4, 4), (2, 2), in_channels=64, act=tf.nn.sigmoid)(nn)
    return tl.models.Model(inputs=ni, outputs=nn, name='cnn')
Ejemplo n.º 8
0
 def residual_block(self, n_k=1, e=True):
     k_size = self.f_size * (2**n_k)
     ni = Input([None, None, None, k_size])
     nn = Conv2d(k_size, (3, 3), (1, 1))(ni)
     nn = BatchNorm(act=tf.nn.relu)(nn)
     nn = Conv2d(k_size, (3, 3), (1, 1))(nn)
     nn = BatchNorm()(nn)
     nn = Elementwise(tf.add)([ni, nn])
     return Model(inputs=ni,
                  outputs=nn,
                  name=f'{"e" if e else "d"}{n_k+1}_res').as_layer()
Ejemplo n.º 9
0
def conv_block(input, kernel_size, n_filters, stage, block, strides=(2, 2)):
    """The conv block where there is a conv layer at shortcut.

    Parameters
    ----------
    input : tf tensor
        Input tensor from above layer.
    kernel_size : int
        The kernel size of middle conv layer at main path.
    n_filters : list of integers
        The numbers of filters for 3 conv layer at main path.
    stage : int
        Current stage label.
    block : str
        Current block label.
    strides : tuple
        Strides for the first conv layer in the block.

    Returns
    -------
        Output tensor of this block.

    """
    filters1, filters2, filters3 = n_filters
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Conv2d(filters1, (1, 1),
               strides=strides,
               W_init=tf.initializers.he_normal(),
               name=conv_name_base + '2a')(input)
    x = BatchNorm(name=bn_name_base + '2a', act='relu')(x)

    ks = (kernel_size, kernel_size)
    x = Conv2d(filters2,
               ks,
               padding='SAME',
               W_init=tf.initializers.he_normal(),
               name=conv_name_base + '2b')(x)
    x = BatchNorm(name=bn_name_base + '2b', act='relu')(x)

    x = Conv2d(filters3, (1, 1),
               W_init=tf.initializers.he_normal(),
               name=conv_name_base + '2c')(x)
    x = BatchNorm(name=bn_name_base + '2c')(x)

    shortcut = Conv2d(filters3, (1, 1),
                      strides=strides,
                      W_init=tf.initializers.he_normal(),
                      name=conv_name_base + '1')(input)
    shortcut = BatchNorm(name=bn_name_base + '1')(shortcut)

    x = Elementwise(tf.add, act='relu')([x, shortcut])
    return x
Ejemplo n.º 10
0
def get_z_D(shape_z):
    gamma_init = tf.random_normal_initializer(1., 0.02)
    w_init = tf.random_normal_initializer(stddev=0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)
    nz = Input(shape_z)
    n = Dense(n_units=750, act=None, W_init=w_init, b_init=None)(nz)
    n = BatchNorm(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    n = Dense(n_units=750, act=None, W_init=w_init, b_init=None)(n)
    n = BatchNorm(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    n = Dense(n_units=750, act=None, W_init=w_init, b_init=None)(n)
    n = BatchNorm(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)
    n = Dense(n_units=1, act=None, W_init=w_init)(n)
    return tl.models.Model(inputs=nz, outputs=n, name='c_Discriminator')
Ejemplo n.º 11
0
def get_G(shape_z,
          ngf=64):  # Dimension of gen filters in first conv layer. [64]
    # w_init = tf.glorot_normal_initializer()
    print("for G")
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    n_extra_layers = flags.n_extra_layers
    isize = 64
    cngf, tisize = ngf // 2, 4
    while tisize != isize:
        cngf = cngf * 2
        tisize = tisize * 2
    ni = Input(shape_z)
    nn = Reshape(shape=[-1, 1, 1, 128])(ni)
    nn = DeConv2d(cngf, (4, 4), (1, 1),
                  W_init=w_init,
                  b_init=None,
                  padding='VALID')(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    print(nn.shape)

    csize, cndf = 4, cngf
    while csize < isize // 2:
        cngf = cngf // 2
        nn = DeConv2d(cngf, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(decay=0.9,
                       act=tf.nn.relu,
                       gamma_init=gamma_init,
                       name=None)(nn)
        print(nn.shape)
        csize = csize * 2

    for t in range(n_extra_layers):
        nn = DeConv2d(cngf, (3, 3), (1, 1), W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(decay=0.9,
                       act=tf.nn.relu,
                       gamma_init=gamma_init,
                       name=None)(nn)
        print(nn.shape)

    nn = DeConv2d(3, (4, 4), (2, 2),
                  act=tf.nn.tanh,
                  W_init=w_init,
                  b_init=None)(nn)
    print(nn.shape)

    return tl.models.Model(inputs=ni, outputs=nn)
Ejemplo n.º 12
0
def get_generator(shape=[None, flags.z_dim], gf_dim=64, name=None):
    image_size = 64
    s16 = image_size // 16
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
               b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(nn)

    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)

    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)

    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)

    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name=name)
Ejemplo n.º 13
0
def get_dwG(
        shape_z=(None, 100),
        shape_h=(0, 16)):  # Dimension of gen filters in first conv layer. [64]
    s16 = flags.img_size_h // 16
    gf_dim = 64  # Dimension of gen filters in first conv layer. [64]
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    n_z = Input(shape_z)
    n_h = Input(shape_h)
    n = Concat(-1)([n_z, n_h])

    n = Dense(n_units=(gf_dim * 8 * s16 * s16),
              W_init=w_init,
              act=tf.identity,
              b_init=None)(n)

    n = Reshape(shape=[-1, s16, s16, gf_dim * 8])(n)

    n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)

    n = DeConv2d(gf_dim * 4, (5, 5),
                 strides=(2, 2),
                 act=None,
                 W_init=w_init,
                 b_init=None)(n)
    n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)

    n = DeConv2d(gf_dim * 2, (5, 5),
                 strides=(2, 2),
                 act=None,
                 W_init=w_init,
                 b_init=None)(n)
    n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)

    n = DeConv2d(gf_dim, (5, 5),
                 strides=(2, 2),
                 act=None,
                 W_init=w_init,
                 b_init=None)(n)
    n = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)

    n = DeConv2d(flags.c_dim, (5, 5),
                 strides=(2, 2),
                 act=tf.nn.tanh,
                 W_init=w_init)(n)
    return tl.models.Model(inputs=[n_z, n_h], outputs=n, name='generator')
Ejemplo n.º 14
0
def get_model(inputs_shape):
    # build network
    ni = Input(inputs_shape)
    nn = Conv2d(32, (3, 3), (1, 1), padding='SAME', name='conv1')(ni)
    nn = BatchNorm(decay=0.99, act=tf.nn.relu, name='batch1')(nn)
    nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(nn)

    nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', name='conv2')(nn)
    nn = BatchNorm(decay=0.99, act=tf.nn.relu, name='batch2')(nn)
    nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(nn)

    nn = Flatten(name='flatten')(nn)
    nn = Dense(16, act=tf.nn.relu, name='denserelu')(nn)
    nn = Dense(10, act=None, name='output')(nn)

    M = Model(inputs=ni, outputs=nn, name='cnn')
    return M
Ejemplo n.º 15
0
def get_G(input_tensor):
    with tf.device('/gpu:0'):

        ##exp1:conv-enc-deconv-dec
        w_init = tf.random_normal_initializer(stddev=0.02)
        g_init = tf.random_normal_initializer(1., 0.02)

        nin = Input(input_tensor)
        c1 = Conv2d(16, (3, 3), (1, 1),
                    act=tf.nn.relu,
                    padding='SAME',
                    W_init=w_init)(nin)
        c2 = Conv2d(32, (3, 3), (1, 1),
                    act=tf.nn.relu,
                    padding='SAME',
                    W_init=w_init)(c1)
        c3 = Conv2d(64, (3, 3), (1, 1),
                    act=tf.nn.relu,
                    padding='SAME',
                    W_init=w_init)(c2)
        c4 = Conv2d(128, (3, 3), (1, 1),
                    act=tf.nn.relu,
                    padding='SAME',
                    W_init=w_init)(c3)
        c5 = Conv2d(256, (3, 3), (1, 1),
                    act=tf.nn.relu,
                    padding='SAME',
                    W_init=w_init)(c4)

        d4 = DeConv2d(128, (3, 3), (1, 1), padding='SAME', W_init=w_init)(c5)
        d4 = BatchNorm(act=tf.nn.relu, gamma_init=g_init)(d4)
        d4 = Elementwise(tf.add)([c4, d4])
        d3 = DeConv2d(64, (3, 3), (1, 1), padding='SAME', W_init=w_init)(d4)
        d3 = BatchNorm(act=tf.nn.relu, gamma_init=g_init)(d3)
        d3 = Elementwise(tf.add)([c3, d3])
        d2 = DeConv2d(32, (3, 3), (1, 1), padding='SAME', W_init=w_init)(d3)
        d2 = BatchNorm(act=tf.nn.relu, gamma_init=g_init)(d2)
        d2 = Elementwise(tf.add)([c2, d2])
        d1 = DeConv2d(16, (3, 3), (1, 1), padding='SAME', W_init=w_init)(d2)
        d1 = BatchNorm(act=tf.nn.relu, gamma_init=g_init)(d1)
        d1 = Elementwise(tf.add)([c1, d1])
        out = DeConv2d(3, (3, 3), (1, 1), padding='SAME', W_init=w_init)(d1)
        out = BatchNorm(act=tf.nn.relu, gamma_init=g_init)(out)
        out = Elementwise(tf.add)([out, nin])
        G = Model(inputs=nin, outputs=out, name="generator")
        return G
Ejemplo n.º 16
0
def make_layers(config, batch_norm=False, end_with='outputs'):
    layer_list = []
    is_end = False
    for layer_group_idx, layer_group in enumerate(config):
        if isinstance(layer_group, list):
            for idx, layer in enumerate(layer_group):
                layer_name = layer_names[layer_group_idx][idx]
                n_filter = layer
                if idx == 0:
                    if layer_group_idx > 0:
                        in_channels = config[layer_group_idx - 2][-1]
                    else:
                        in_channels = 3
                else:
                    in_channels = layer
                layer_list.append(
                    Conv2d(n_filter=n_filter,
                           filter_size=(3, 3),
                           strides=(1, 1),
                           act=tf.nn.relu,
                           padding='SAME',
                           in_channels=in_channels,
                           name=layer_name))
                if batch_norm:
                    layer_list.append(BatchNorm())
                if layer_name == end_with:
                    is_end = True
                    break
        else:
            layer_name = layer_names[layer_group_idx]
            if layer_group == 'M':
                layer_list.append(
                    MaxPool2d(filter_size=(2, 2),
                              strides=(2, 2),
                              padding='SAME',
                              name=layer_name))
            elif layer_group == 'O':
                layer_list.append(
                    Dense(n_units=1000, in_channels=4096, name=layer_name))
            elif layer_group == 'F':
                layer_list.append(Flatten(name='flatten'))
            elif layer_group == 'fc1':
                layer_list.append(
                    Dense(n_units=4096,
                          act=tf.nn.relu,
                          in_channels=512 * 7 * 7,
                          name=layer_name))
            elif layer_group == 'fc2':
                layer_list.append(
                    Dense(n_units=4096,
                          act=tf.nn.relu,
                          in_channels=4096,
                          name=layer_name))
            if layer_name == end_with:
                is_end = True
        if is_end:
            break
    return LayerList(layer_list)
Ejemplo n.º 17
0
def get_G(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)
    nn = Dense(n_units=(1024), W_init=w_init, b_init=None)(ni)
    nn = Reshape(shape=[-1, 1024])(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = Dense(n_units=(8 * 8 * 256), W_init=w_init, b_init=None)(nn)
    nn = Reshape(shape=[-1, 8, 8, 256])(nn)  # ???
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = DeConv2d(n_filter=256,
                  filter_size=(4, 4),
                  strides=(1, 1),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=256,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=128,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=64,
                  filter_size=(4, 4),
                  strides=(2, 2),
                  W_init=w_init,
                  b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(n_filter=1,
                  filter_size=(4, 4),
                  strides=(1, 1),
                  act=tf.nn.sigmoid,
                  W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='G')
Ejemplo n.º 18
0
def depthwise_conv_block(n,
                         n_filter,
                         alpha,
                         strides=(1, 1),
                         name="depth_block"):
    if strides != (1, 1):
        n = ZeroPad2d(padding=((1, 1), (1, 1)), name=name + '.pad')(n)
        padding_type = 'VALID'
    else:
        padding_type = 'SAME'
    n_filter = int(n_filter * alpha)
    n = DepthwiseConv2d((3, 3),
                        strides,
                        padding=padding_type,
                        b_init=None,
                        name=name + '.depthwise')(n)
    n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm1')(n)
    n = Conv2d(n_filter, (1, 1), (1, 1), b_init=None, name=name + '.conv')(n)
    n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm2')(n)
    return n
Ejemplo n.º 19
0
def get_Q(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, flags.leaky_rate)

    ni = Input(shape)
    q = Dense(n_units=128, W_init=w_init, b_init=None)(ni)
    q = BatchNorm(decay=0.9, act=lrelu, gamma_init=gamma_init)(q)
    q = Dense(n_units=flags.n_categorical * flags.dim_categorical,
              W_init=w_init)(q)
    return tl.models.Model(inputs=ni, outputs=q, name='Q_tail')
Ejemplo n.º 20
0
def get_dwD(shape):  # Dimension of discrim filters in first conv layer. [64]

    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)
    df_dim = 64  # Dimension of discrim filters in first conv layer. [64]
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)

    n = Conv2d(df_dim, (5, 5), (2, 2), act=lrelu, W_init=w_init)(ni)

    n = Conv2d(df_dim * 2, (5, 5), (2, 2),
               act=None,
               W_init=w_init,
               b_init=None)(n)
    n = BatchNorm(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)

    n = Conv2d(df_dim * 4, (5, 5), (2, 2),
               act=None,
               W_init=w_init,
               b_init=None)(n)
    n = BatchNorm(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)

    n = Conv2d(df_dim * 8, (5, 5), (2, 2),
               act=None,
               W_init=w_init,
               b_init=None)(n)
    n = BatchNorm(decay=0.9, act=lrelu, gamma_init=gamma_init)(n)

    nf = Flatten(name='flatten')(n)

    n1 = Dense(n_units=1, act=tf.identity, W_init=w_init)(nf)

    n2 = Dense(n_units=flags.h_dim,
               act=tf.sigmoid,
               W_init=w_init,
               name='hash_layer')(nf)

    return tl.models.Model(inputs=ni,
                           outputs=[nf, n1, n2],
                           name='discriminator')
Ejemplo n.º 21
0
def get_E(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    ngf = 64
    isize = 64
    n_extra_layers = flags.n_extra_layers
    print(" for E")
    ni = Input(shape)
    nn = Conv2d(ngf, (4, 4), (2, 2), act=None, W_init=w_init, b_init=None)(ni)
    print(nn.shape)
    isize = isize // 2

    for t in range(n_extra_layers):
        nn = Conv2d(ngf, (3, 3), (1, 1), W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(decay=0.9,
                       act=tf.nn.relu,
                       gamma_init=gamma_init,
                       name=None)(nn)
        print(nn.shape)

    while isize > 4:
        ngf = ngf * 2
        nn = Conv2d(ngf, (4, 4), (2, 2), W_init=w_init, b_init=None)(nn)
        nn = BatchNorm(decay=0.9,
                       act=tf.nn.relu,
                       gamma_init=gamma_init,
                       name=None)(nn)
        print(nn.shape)
        isize = isize // 2

    nn = Conv2d(flags.z_dim, (4, 4), (1, 1),
                act=None,
                W_init=w_init,
                b_init=None,
                padding='VALID')(nn)
    print(nn.shape)
    nz = Reshape(shape=[-1, 128])(nn)

    return tl.models.Model(inputs=ni, outputs=nz)
Ejemplo n.º 22
0
def conv_block(n,
               n_filter,
               filter_size=(3, 3),
               strides=(1, 1),
               name='conv_block'):
    # ref: https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet.py
    n = Conv2d(n_filter,
               filter_size,
               strides,
               b_init=None,
               name=name + '.conv')(n)
    n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm')(n)
    return n
Ejemplo n.º 23
0
def get_model_batchnorm(inputs_shape):
    # self defined initialization
    W_init = tl.initializers.truncated_normal(stddev=5e-2)
    W_init2 = tl.initializers.truncated_normal(stddev=0.04)
    b_init2 = tl.initializers.constant(value=0.1)

    # build network
    ni = Input(inputs_shape)
    nn = Conv2d(64, (5, 5), (1, 1),
                padding='SAME',
                W_init=W_init,
                b_init=None,
                name='conv1')(ni)
    nn = BatchNorm(decay=0.99, act=tf.nn.relu, name='batch1')(nn)
    nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(nn)

    nn = Conv2d(64, (5, 5), (1, 1),
                padding='SAME',
                W_init=W_init,
                b_init=None,
                name='conv2')(nn)
    nn = BatchNorm(decay=0.99, act=tf.nn.relu, name='batch2')(nn)
    nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(nn)

    nn = Flatten(name='flatten')(nn)
    nn = Dense(384,
               act=tf.nn.relu,
               W_init=W_init2,
               b_init=b_init2,
               name='dense1relu')(nn)
    nn = Dense(192,
               act=tf.nn.relu,
               W_init=W_init2,
               b_init=b_init2,
               name='dense2relu')(nn)
    nn = Dense(10, act=None, W_init=W_init2, name='output')(nn)

    M = Model(inputs=ni, outputs=nn, name='cnn')
    return M
Ejemplo n.º 24
0
def get_D(input_shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)
    df_dim = 64
    lrelu = lambda x: tl.act.lrelu(x, 0.2)

    nin = Input(input_shape)
    n = Conv2d(df_dim, (4, 4), (2, 2), act=lrelu, padding='SAME', W_init=w_init)(nin)

    n = Conv2d(df_dim * 2, (4, 4), (2, 2), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 4, (4, 4), (2, 2), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (4, 4), (2, 2), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 16, (4, 4), (2, 2), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 32, (4, 4), (2, 2), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 16, (1, 1), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (1, 1), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    nn = BatchNorm(gamma_init=gamma_init)(n)

    n = Conv2d(df_dim * 2, (1, 1), (1, 1), padding='SAME', W_init=w_init, b_init=None)(nn)
    n = BatchNorm(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 2, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(act=lrelu, gamma_init=gamma_init)(n)
    n = Conv2d(df_dim * 8, (3, 3), (1, 1), padding='SAME', W_init=w_init, b_init=None)(n)
    n = BatchNorm(gamma_init=gamma_init)(n)
    n = Elementwise(combine_fn=tf.add, act=lrelu)([n, nn])

    n = Flatten()(n)
    no = Dense(n_units=1, W_init=w_init)(n)
    D = Model(inputs=nin, outputs=no, name="discriminator")
    return D
Ejemplo n.º 25
0
def get_D(shape):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    def lrelu(x):
        return tf.nn.leaky_relu(x, flags.leaky_rate)

    ni = Input(shape)
    nn = Conv2d(n_filter=64,
                filter_size=(4, 4),
                strides=(2, 2),
                act=lrelu,
                W_init=w_init)(ni)
    nn = Conv2d(n_filter=128,
                filter_size=(4, 4),
                strides=(2, 2),
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Conv2d(n_filter=256,
                filter_size=(4, 4),
                strides=(2, 2),
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Conv2d(n_filter=256,
                filter_size=(4, 4),
                strides=(1, 1),
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Conv2d(n_filter=256,
                filter_size=(4, 4),
                strides=(1, 1),
                W_init=w_init,
                b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    nn = Flatten()(nn)
    nn = Dense(n_units=1024, W_init=w_init)(nn)
    nn = BatchNorm(decay=0.9, act=lrelu, gamma_init=gamma_init)(nn)
    mid = nn
    d = Dense(n_units=1, W_init=w_init)(nn)
    return tl.models.Model(inputs=ni, outputs=[d, mid], name='D')
Ejemplo n.º 26
0
def get_generator(shape, gf_dim=64): # Dimension of gen filters in first conv layer. [64]
    image_size = 64
    s16 = image_size // 16
    # w_init = tf.glorot_normal_initializer()
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init, b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim*8])(nn)
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init, name=None)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d( decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init, b_init=None)(nn)
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh, W_init=w_init)(nn)

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Ejemplo n.º 27
0
def conv_block(n,
               n_filter,
               alpha,
               filter_size=(3, 3),
               strides=(1, 1),
               name='conv_block'):
    # ref: https://github.com/keras-team/keras/blob/master/keras/applications/mobilenet.py
    if strides != (1, 1):
        n = ZeroPad2d(padding=((1, 1), (1, 1)), name=name + '.pad')(n)
        padding_type = 'VALID'
    else:
        padding_type = 'SAME'
    n_filter = int(n_filter * alpha)
    n = Conv2d(n_filter,
               filter_size,
               strides,
               padding=padding_type,
               b_init=None,
               name=name + '.conv')(n)
    n = BatchNorm(decay=0.99, act=tf.nn.relu6, name=name + '.batchnorm')(n)
    return n
Ejemplo n.º 28
0
def get_G(shape_z,
          gf_dim=64):  # Dimension of gen filters in first conv layer. [64]
    # # input: (100,)
    # w_init = tf.random_normal_initializer(stddev=0.02)
    # gamma_init = tf.random_normal_initializer(1., 0.02)
    # nz = Input(shape_z)
    # n = Dense(n_units=3136, act=tf.nn.relu, W_init=w_init)(nz)
    # n = Reshape(shape=[-1, 14, 14, 16])(n)
    # n = DeConv2d(64, (5, 5), strides=(2, 2), W_init=w_init, b_init=None)(n) # (1, 28, 28, 64)
    # n = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(n)
    # n = DeConv2d(flags.c_dim, (5, 5), strides=(1, 1), padding="VALID", W_init=w_init, b_init=None)(n) # (1, 32, 32, 3)
    # return tl.models.Model(inputs=nz, outputs=n, name='generator')

    image_size = 32
    s16 = image_size // 16
    # w_init = tf.glorot_normal_initializer()
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    ni = Input(shape_z)
    nn = Dense(n_units=(gf_dim * 8 * s16 * s16), W_init=w_init,
               b_init=None)(ni)
    nn = Reshape(shape=[-1, s16, s16, gf_dim * 8])(
        nn)  # [-1, 2, 2, gf_dim * 8]
    nn = BatchNorm(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init,
                   name=None)(nn)
    nn = DeConv2d(gf_dim * 4, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 4, 4, gf_dim * 4]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim * 2, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 8, 8, gf_dim * 2]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(gf_dim, (5, 5), (2, 2), W_init=w_init,
                  b_init=None)(nn)  # [-1, 16, 16, gf_dim *]
    nn = BatchNorm2d(decay=0.9, act=tf.nn.relu, gamma_init=gamma_init)(nn)
    nn = DeConv2d(3, (5, 5), (2, 2), act=tf.nn.tanh,
                  W_init=w_init)(nn)  # [-1, 32, 32, 3]

    return tl.models.Model(inputs=ni, outputs=nn, name='generator')
Ejemplo n.º 29
0
from tensorlayer.layers import (BatchNorm, Conv2d, Dense, Flatten, Input, LocalResponseNorm, MaxPool2d)

def restore_weight(net, M, layername):
    all_weights = net.all_weights
    for i in range(len(all_weights)-1):
        weights = all_weights[i]
        weights1 = all_weights[i+1]
        logging.debug(weights.name)
        if (layername in weights.name) and (layername not in weights1.name):
            break
    logging.debug(i)
    assign_weights(all_weights[0:i+1], M)

ni = Input([None, 32, 32, 3])
nn = Conv2d(32, (3, 3), (1, 1), padding='SAME', name='conv1')(ni)
nn = BatchNorm(decay=0.99, act=tf.nn.relu, name='batch1')(nn)
nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(nn)
    
nn = Conv2d(64, (3, 3), (1, 1), padding='SAME', name='conv2')(nn)
nn = BatchNorm(decay=0.99, act=tf.nn.relu, name='batch2')(nn)
nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(nn)

nn = Flatten(name='flatten')(nn)
nn = Dense(16, act=tf.nn.relu, name='denserelu')(nn)
nn = Dense(10, act=None, name='output')(nn)

M = Model(inputs=ni, outputs=nn, name='cnn1')

net = Model.load('./cifar10.h5', load_weights=True)
restore_weight(net, M, 'output')
Ejemplo n.º 30
0
    def __init__(self,
                 state_space,
                 action_space,
                 hidden_dim_list,
                 w_init=tf.keras.initializers.glorot_normal(),
                 activation=tf.nn.tanh,
                 trainable=True,
                 name=None):
        """ NAF Q-value network with multiple fully-connected layers

        :param state_space: (gym.spaces) space of the state from gym environments
        :param action_space: (gym.spaces) space of the action from gym environments
        :param hidden_dim_list: (list[int]) a list of dimensions of hidden layers
        :param w_init: (callable) weights initialization
        :param activation: (callable) activation function
        :param trainable: (bool) set training and evaluation mode
        :param name: (str) name the model
        """
        assert isinstance(action_space, spaces.Box)
        self._state_space, self._action_space = state_space, action_space
        self._action_shape = self._action_space.shape
        assert len(self._action_shape) == 1
        act_inputs = Input((None, ) + self._action_shape,
                           name='Act_Input_Layer')

        # create state input layer
        obs_inputs, current_layer, self._state_shape = CreateInputLayer(
            state_space)

        # concat multi-head state
        if isinstance(state_space, spaces.Dict):
            assert isinstance(obs_inputs, dict)
            assert isinstance(current_layer, dict)
            self.input_dict = obs_inputs
            obs_inputs = list(obs_inputs.values())
            current_layer = tl.layers.Concat(-1)(list(current_layer.values()))

        # calculate value
        current_layer = BatchNorm()(current_layer)
        with tf.name_scope('NAF_VALUE_MLP'):
            for i, dim in enumerate(hidden_dim_list):
                current_layer = Dense(n_units=dim,
                                      act=activation,
                                      W_init=w_init,
                                      name='mlp_hidden_layer%d' %
                                      (i + 1))(current_layer)
            value = Dense(n_units=1,
                          W_init=w_init,
                          name='naf_value_mlp_output')(current_layer)

        # calculate advantange and Q-value
        dim = self._action_shape[0]
        with tf.name_scope('NAF_ADVANTAGE'):
            mu = Dense(n_units=dim, act=activation, W_init=w_init,
                       name='mu')(current_layer)
            L = Dense(n_units=int((dim * (dim + 1)) / 2),
                      W_init=w_init,
                      name='L')(current_layer)
            qvalue = NAFLayer(dim)([L, act_inputs, mu, value])

        super().__init__(inputs=[obs_inputs, act_inputs],
                         outputs=qvalue,
                         name=name)
        if trainable:
            self.train()
        else:
            self.eval()