Exemplo n.º 1
0
import tensorflow as tf
from tensorlayer.layers import InputLayer, GlobalMaxPool1d, GlobalMaxPool2d, MaxPool3d, MeanPool3d

x = tf.placeholder("float32", [None, 100, 30])
n = InputLayer(x, name='in1')
n = GlobalMaxPool1d(n)
print(n)

x = tf.placeholder("float32", [None, 100, 100, 30])
n = InputLayer(x, name='in2')
n = GlobalMaxPool2d(n)
print(n)

x = tf.placeholder("float32", [None, 100, 100, 100, 30])
n = InputLayer(x, name='in3')
n = MaxPool3d(n)
n.print_layers()
print(n)

x = tf.placeholder("float32", [None, 100, 100, 100, 30])
n = InputLayer(x, name='in4')
n = MeanPool3d(n)
n.print_layers()
print(n)
Exemplo n.º 2
0
def discriminator2(inputs, is_train=True, reuse=False):
    df_dim = 32  # Dimension of discrim filters in first conv layer. [64]
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)

    with tf.name_scope("DISCRIMINATOR2"):
        with tf.variable_scope("discriminator2", reuse=reuse):

            with tf.name_scope("net_in"):
                net_in = InputLayer(inputs, name='d2/in')

            with tf.name_scope("layer0"):
                net_h0 = Conv2d(net_in,
                                df_dim, (3, 3), (3, 3),
                                act=lrelu,
                                padding='SAME',
                                W_init=w_init,
                                name='d2/h0/conv2d')

            with tf.name_scope("layer1"):
                net_h1 = Conv2d(net_h0,
                                df_dim * 2, (3, 3), (3, 3),
                                act=None,
                                padding='SAME',
                                W_init=w_init,
                                name='d2/h1/conv2d')
                net_h1 = BatchNormLayer(net_h1,
                                        decay=0.9,
                                        act=lrelu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='d2/h1/batch_norm')

            with tf.name_scope("layer2"):
                net_h2 = Conv2d(net_h1,
                                df_dim * 4, (3, 3), (3, 3),
                                act=None,
                                padding='SAME',
                                W_init=w_init,
                                name='d2/h2/conv2d')
                net_h2 = BatchNormLayer(net_h2,
                                        decay=0.9,
                                        act=lrelu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='d2/h2/batch_norm')

            with tf.name_scope("layer3"):
                net_h3 = Conv2d(net_h2,
                                df_dim * 8, (3, 3), (3, 3),
                                act=None,
                                padding='SAME',
                                W_init=w_init,
                                name='d2/h3/conv2d')
                net_h3 = BatchNormLayer(net_h3,
                                        decay=0.9,
                                        act=lrelu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='d2/h3/batch_norm')

            with tf.name_scope("layer4"):
                net_h4 = FlattenLayer(net_h3, name='d2/h4/flatten')
                net_h4 = DenseLayer(net_h4,
                                    n_units=df_dim * 8,
                                    act=tf.identity,
                                    W_init=w_init,
                                    name='d2/h4/lin_sigmoid')

            with tf.name_scope("layer5"):
                net_h5 = FlattenLayer(net_h4, name='d2/h5/flatten')
                net_h5 = DenseLayer(net_h5,
                                    n_units=df_dim * 8,
                                    act=tf.identity,
                                    W_init=w_init,
                                    name='d2/h5/lin_sigmoid')

        #net_h6 = FlattenLayer(net_h5, name='d/h6/flatten')
            with tf.name_scope("layer6"):
                net_h6 = DenseLayer(net_h5,
                                    n_units=2,
                                    act=tf.identity,
                                    W_init=w_init,
                                    name='d2/h6/lin_sigmoid')
                logits2 = net_h6.outputs
                net_h6.outputs = tf.nn.softplus(net_h6.outputs)
        return net_h6, logits2
Exemplo n.º 3
0
def DBPN(input,
         conv_type='3d',
         feat=64,
         base_filter=32,
         upscale=False,
         factor=2,
         reuse=False,
         name='dbpn'):
    '''
    Dense-deep Back-projection Net
    Params:
        -conv_type   : in ['3d', '2d'], convolutional layer type
        -upscale: if False, the output will have the same size as the input LR, 
                else the output_size = 4 * input_size
    '''
    conv, _ = _get_conv_fn(conv_type)
    act = prelu
    kernel = 3
    stride = factor if upscale else 2
    additional_up_down_pair = 1 if upscale else 2

    with tf.variable_scope(name, reuse=reuse):
        n_channels = input.shape[-1]
        x = InputLayer(input, name='input')

        # initial feature extration
        x = conv(x, out_channels=feat, filter_size=3, act=act, name='feat0')
        x = conv(x,
                 out_channels=base_filter,
                 filter_size=1,
                 act=act,
                 name='feat1')

        #back-projection
        h1 = _up_block(x,
                       n_filters=base_filter,
                       k_size=kernel,
                       stride=stride,
                       act=act,
                       conv_type=conv_type,
                       name='up1')
        l1 = _down_block(h1,
                         n_filters=base_filter,
                         k_size=kernel,
                         stride=stride,
                         act=act,
                         conv_type=conv_type,
                         name='down1')
        h2 = _up_block(l1,
                       n_filters=base_filter,
                       k_size=kernel,
                       stride=stride,
                       act=act,
                       conv_type=conv_type,
                       name='up2')

        concat_h = concat([h2, h1])
        l = _d_down_block(concat_h,
                          n_filters=base_filter,
                          k_size=kernel,
                          stride=stride,
                          act=act,
                          conv_type=conv_type,
                          name='down2')

        concat_l = concat([l, l1])
        h = _d_up_block(concat_l,
                        n_filters=base_filter,
                        k_size=kernel,
                        stride=stride,
                        act=act,
                        conv_type=conv_type,
                        name='up3')

        for i in range(0, additional_up_down_pair):
            concat_h = concat([h, concat_h])
            l = _d_down_block(concat_h,
                              n_filters=base_filter,
                              k_size=kernel,
                              stride=stride,
                              act=act,
                              conv_type=conv_type,
                              name='down%d' % (i + 3))

            concat_l = concat([l, concat_l])
            h = _d_up_block(concat_l,
                            n_filters=base_filter,
                            k_size=kernel,
                            stride=stride,
                            act=act,
                            conv_type=conv_type,
                            name='up%d' % (i + 4))

        concat_h = concat([h, concat_h])

        if upscale:
            x = conv(concat_h,
                     out_channels=n_channels,
                     filter_size=3,
                     act=tf.tanh,
                     name='output_conv')
        else:
            x = _down_block(concat_h,
                            n_filters=1,
                            k_size=3,
                            stride=stride,
                            conv_type=conv_type,
                            name='out')
        return x
Exemplo n.º 4
0
def model(x, is_train=True, reuse=False):
    with tf.variable_scope("model", reuse=reuse):
        net = InputLayer(x, name='input')
        net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units': 50, 'name': 'dense'}, name='time_dense')
    return net
Exemplo n.º 5
0
    def mobilenetv1(self, x, end_with='out', is_train=False, reuse=None):
        with tf.variable_scope("mobilenetv1", reuse=reuse):
            n = InputLayer(x)
            n = self.conv_block(n, 32, strides=(2, 2), is_train=is_train, name="conv")
            if end_with in n.outputs.name:
                return n
            n = self.depthwise_conv_block(n, 64, is_train=is_train, name="depth1")
            if end_with in n.outputs.name:
                return n

            n = self.depthwise_conv_block(n, 128, strides=(2, 2), is_train=is_train, name="depth2")
            if end_with in n.outputs.name:
                return n
            n = self.depthwise_conv_block(n, 128, is_train=is_train, name="depth3")
            if end_with in n.outputs.name:
                return n

            n = self.depthwise_conv_block(n, 256, strides=(2, 2), is_train=is_train, name="depth4")
            if end_with in n.outputs.name:
                return n
            n = self.depthwise_conv_block(n, 256, is_train=is_train, name="depth5")
            if end_with in n.outputs.name:
                return n

            n = self.depthwise_conv_block(n, 512, strides=(2, 2), is_train=is_train, name="depth6")
            if end_with in n.outputs.name:
                return n
            n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth7")
            if end_with in n.outputs.name:
                return n
            n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth8")
            if end_with in n.outputs.name:
                return n
            n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth9")
            if end_with in n.outputs.name:
                return n
            n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth10")
            if end_with in n.outputs.name:
                return n
            n = self.depthwise_conv_block(n, 512, is_train=is_train, name="depth11")
            if end_with in n.outputs.name:
                return n

            n = self.depthwise_conv_block(n, 1024, strides=(2, 2), is_train=is_train, name="depth12")
            if end_with in n.outputs.name:
                return n
            n = self.depthwise_conv_block(n, 1024, is_train=is_train, name="depth13")
            if end_with in n.outputs.name:
                return n

            n = GlobalMeanPool2d(n, name='globalmeanpool')
            if end_with in n.outputs.name:
                return n
            # n = DropoutLayer(n, 1-1e-3, True, is_train, name='drop')
            # n = DenseLayer(n, 1000, name='output')   # equal
            n = ReshapeLayer(n, [-1, 1, 1, 1024], name='reshape')
            if end_with in n.outputs.name:
                return n
            n = Conv2d(n, 1000, (1, 1), (1, 1), name='out')
            n = FlattenLayer(n, name='flatten')
            if end_with == 'out':
                return n

            raise Exception("end_with : conv, depth1, depth2 ... depth13, globalmeanpool, out")
import tensorflow as tf
from tensorlayer.layers import InputLayer, TimeDistributedLayer, DenseLayer

sess = tf.InteractiveSession()
batch_size = 32
timestep = 20
input_dim = 100

## no reuse
x = tf.placeholder(dtype=tf.float32, shape=[batch_size, timestep, input_dim], name="encode_seqs")
net = InputLayer(x, name='input')
net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units': 50, 'name': 'dense'}, name='time_dense')

if net.outputs.get_shape().as_list() != [32, 20, 50]:
    raise Exception("shape dont match")
# ... (32, 20, 50)
net.print_params(False)
if net.count_params() != 5050:
    raise Exception("params dont match")


## reuse
def model(x, is_train=True, reuse=False):
    with tf.variable_scope("model", reuse=reuse):
        net = InputLayer(x, name='input')
        net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units': 50, 'name': 'dense'}, name='time_dense')
    return net


net_train = model(x, is_train=True, reuse=False)
net_test = model(x, is_train=False, reuse=True)
def squeezenet(x, is_train=True, reuse=False):
    # model from: https://github.com/wohlert/keras-squeezenet
    #             https://github.com/DT42/squeezenet_demo/blob/master/model.py
    with tf.variable_scope("squeezenet", reuse=reuse):
        with tf.variable_scope("input"):
            n = InputLayer(x)
            # n = Conv2d(n, 96, (7,7),(2,2),tf.nn.relu,'SAME',name='conv1')
            n = Conv2d(n, 64, (3, 3), (2, 2), tf.nn.relu, 'SAME', name='conv1')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire2"):
            n = Conv2d(n,
                       16, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        64, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        64, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire3"):
            n = Conv2d(n,
                       16, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        64, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        64, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire4"):
            n = Conv2d(n,
                       32, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        128, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        128, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire5"):
            n = Conv2d(n,
                       32, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        128, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        128, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire6"):
            n = Conv2d(n,
                       48, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        192, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        192, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire7"):
            n = Conv2d(n,
                       48, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        192, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        192, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire8"):
            n = Conv2d(n,
                       64, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        256, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        256, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire9"):
            n = Conv2d(n,
                       64, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        256, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        256, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("output"):
            n = DropoutLayer(n,
                             keep=0.5,
                             is_fix=True,
                             is_train=is_train,
                             name='drop1')
            n = Conv2d(n, 1000, (1, 1), (1, 1), padding='VALID',
                       name='conv10')  # 13, 13, 1000
            n = GlobalMeanPool2d(n)
        return n
Exemplo n.º 8
0
def classifier(t_image, is_train=False, reuse=False):
    """
    The classifier network
    :param t_image:
    :param is_train:
    :param reuse:
    :return:
    """
    w_init = tf.random_normal_initializer(stddev=0.02)
    b_init = tf.random_normal_initializer(stddev=0.02)
    g_init = tf.random_normal_initializer(1., 0.02)
    with tf.variable_scope('classifier', reuse=reuse):
        n = InputLayer(t_image, name='in')
        n = Conv2d(n,
                   64, (3, 3), (2, 2),
                   act=parametric_relu,
                   padding='SAME',
                   W_init=w_init,
                   name='n64s1/c')
        temp = n
        # residual blocks
        for i in range(8):
            nn = Conv2d(n,
                        64, (3, 3), (1, 1),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='n64s1/c1/%s' % i)
            nn = BatchNormLayer(nn,
                                act=parametric_relu,
                                is_train=is_train,
                                gamma_init=g_init,
                                name='n64s1/b1/%s' % i)
            nn = Conv2d(nn,
                        64, (3, 3), (1, 1),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        b_init=b_init,
                        name='n64s1/c2/%s' % i)
            nn = BatchNormLayer(nn,
                                is_train=is_train,
                                gamma_init=g_init,
                                name='n64s1/b2/%s' % i)
            nn = ElementwiseLayer([n, nn],
                                  tf.add,
                                  name='b_residual_add/%s' % i)
            n = nn
        n = Conv2d(n,
                   64, (3, 3), (1, 1),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   b_init=b_init,
                   name='n64s1/c/m')
        n = BatchNormLayer(n,
                           is_train=is_train,
                           gamma_init=g_init,
                           name='n64s1/b/m')
        n = ElementwiseLayer([n, temp], tf.add, name='add3')
        # residual blocks end
        n = Conv2d(n,
                   128, (3, 3), (2, 2),
                   act=None,
                   padding='SAME',
                   W_init=w_init,
                   name='n256s1/1')
        n = FlattenLayer(n)
        feat = DenseLayer(n, n_units=64, name='dense64')
        n = InputLayer(parametric_relu(feat.outputs), name='feat_act')
        n = DenseLayer(n, n_units=3, name='dense3')
    return n, feat
Exemplo n.º 9
0
def generator(inputs, is_train=True, reuse=False):
    image_size = 64
    s16 = image_size // 16
    gf_dim = 64  # Dimension of gen filters in first conv layer. [64]
    c_dim = FLAGS.c_dim  # n_color 3
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope("generator", reuse=reuse):

        net_in = InputLayer(inputs, name='g/in')
        net_h0 = DenseLayer(net_in,
                            n_units=(gf_dim * 8 * s16 * s16),
                            W_init=w_init,
                            act=tf.identity,
                            name='g/h0/lin')
        net_h0 = ReshapeLayer(net_h0,
                              shape=[-1, s16, s16, gf_dim * 8],
                              name='g/h0/reshape')
        net_h0 = BatchNormLayer(net_h0,
                                act=tf.nn.relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='g/h0/batch_norm')

        net_h1 = DeConv2d(net_h0,
                          gf_dim * 4, (5, 5),
                          strides=(2, 2),
                          padding='SAME',
                          act=None,
                          W_init=w_init,
                          name='g/h1/decon2d')
        net_h1 = BatchNormLayer(net_h1,
                                act=tf.nn.relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='g/h1/batch_norm')

        net_h2 = DeConv2d(net_h1,
                          gf_dim * 2, (5, 5),
                          strides=(2, 2),
                          padding='SAME',
                          act=None,
                          W_init=w_init,
                          name='g/h2/decon2d')
        net_h2 = BatchNormLayer(net_h2,
                                act=tf.nn.relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='g/h2/batch_norm')

        net_h3 = DeConv2d(net_h2,
                          gf_dim, (5, 5),
                          strides=(2, 2),
                          padding='SAME',
                          act=None,
                          W_init=w_init,
                          name='g/h3/decon2d')
        net_h3 = BatchNormLayer(net_h3,
                                act=tf.nn.relu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='g/h3/batch_norm')

        net_h4 = DeConv2d(net_h3,
                          c_dim, (5, 5),
                          strides=(2, 2),
                          padding='SAME',
                          act=None,
                          W_init=w_init,
                          name='g/h4/decon2d')
        net_h4.outputs = tf.nn.tanh(net_h4.outputs)
    return net_h4
Exemplo n.º 10
0
Arquivo: ATDA.py Projeto: xmhGit/ATDA
    def create_model(self):

        if self.name == 'mnist-mnistm':
            drop_prob = {'Ft': 0.2, 'F1': 0.5, 'F2': 0.5}
            self.x = tf.placeholder(tf.float32, shape=[None, 28, 28, 3])
            self.y_ = tf.placeholder(tf.float32, shape=[None, 10])
            self.istrain = tf.placeholder(tf.bool, shape=[])

            _input = InputLayer(self.x, name='input_layer')
            _shared_net = _input
            _shared_net = Conv2d(_shared_net,
                                 n_filter=32,
                                 filter_size=(5, 5),
                                 strides=(1, 1),
                                 act=tf.nn.relu,
                                 padding='SAME',
                                 name='cnn1')
            _shared_net = MaxPool2d(_shared_net,
                                    filter_size=(2, 2),
                                    strides=(2, 2),
                                    padding='SAME',
                                    name='pool_layer1')

            _shared_net = Conv2d(_shared_net,
                                 n_filter=48,
                                 filter_size=(5, 5),
                                 strides=(1, 1),
                                 act=tf.identity,
                                 padding='SAME',
                                 name='cnn2')
            _shared_net = BatchNormLayer(_shared_net,
                                         is_train=True,
                                         act=tf.nn.relu)
            _shared_net = MaxPool2d(_shared_net,
                                    filter_size=(2, 2),
                                    strides=(2, 2),
                                    padding='SAME',
                                    name='pool_layer2')
            _shared_net = FlattenLayer(_shared_net)

            feature = _shared_net.outputs

            _F1_net = _shared_net
            _F2_net = _shared_net
            _Ft_net = _shared_net

            with tf.variable_scope("F1") as scope:
                _F1_net = DropoutLayer(_F1_net,
                                       keep=drop_prob['F1'],
                                       name='drop1',
                                       is_fix=True,
                                       is_train=self.istrain)
                _F1_net = DenseLayer(_F1_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu1')
                _F1_net = BatchNormLayer(_F1_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn1')
                _F1_net = DropoutLayer(_F1_net,
                                       keep=drop_prob['F1'],
                                       name='drop2',
                                       is_fix=True,
                                       is_train=self.istrain)
                _F1_net = DenseLayer(_F1_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu2')
                _F1_net = BatchNormLayer(_F1_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn2')
                _F1_net = DenseLayer(_F1_net,
                                     n_units=10,
                                     act=tf.nn.softmax,
                                     name='output')
                self.F1_out = _F1_net.outputs

            with tf.variable_scope("F2") as scope:

                _F2_net = DropoutLayer(_F2_net,
                                       keep=drop_prob['F2'],
                                       name='drop1',
                                       is_fix=True,
                                       is_train=self.istrain)
                _F2_net = DenseLayer(_F2_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu1')
                _F2_net = BatchNormLayer(_F2_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn1')
                _F2_net = DropoutLayer(_F2_net,
                                       keep=drop_prob['F2'],
                                       name='drop2',
                                       is_fix=True,
                                       is_train=self.istrain)
                _F2_net = DenseLayer(_F2_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu2')
                _F2_net = BatchNormLayer(_F2_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn2')
                _F2_net = DenseLayer(_F2_net,
                                     n_units=10,
                                     act=tf.nn.softmax,
                                     name='output')
                self.F2_out = _F2_net.outputs

            with tf.variable_scope("Ft") as scope:
                _Ft_net = DropoutLayer(_Ft_net,
                                       keep=drop_prob['Ft'],
                                       name='drop1',
                                       is_fix=True,
                                       is_train=self.istrain)
                _Ft_net = DenseLayer(_Ft_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu1')
                _Ft_net = BatchNormLayer(_Ft_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn1')
                _Ft_net = DropoutLayer(_Ft_net,
                                       keep=drop_prob['Ft'],
                                       name='drop2',
                                       is_fix=True,
                                       is_train=self.istrain)
                _Ft_net = DenseLayer(_Ft_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu2')
                _Ft_net = BatchNormLayer(_Ft_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn2')
                _Ft_net = DenseLayer(_Ft_net,
                                     n_units=10,
                                     act=tf.nn.softmax,
                                     name='output')
                self.Ft_out = _Ft_net.outputs

            #self.cost = cross_entropy(F1_out,self.y_,name='F1_loss')#+cross_entropy(F2_out,self.y_,name='F2_loss')+cross_entropy(Ft_out,self.y_,name='Ft_loss')
            self.F1_loss = -tf.reduce_mean(self.y_ * tf.log(self.F1_out))
            self.F2_loss = -tf.reduce_mean(self.y_ * tf.log(self.F2_out))
            self.Ft_loss = -tf.reduce_mean(self.y_ * tf.log(self.Ft_out))
            self.F1_acc = tf.reduce_mean(
                tf.cast(
                    tf.equal(tf.argmax(self.F1_out, 1), tf.argmax(self.y_, 1)),
                    tf.float32))
            self.F2_acc = tf.reduce_mean(
                tf.cast(
                    tf.equal(tf.argmax(self.F2_out, 1), tf.argmax(self.y_, 1)),
                    tf.float32))
            self.Ft_acc = tf.reduce_mean(
                tf.cast(
                    tf.equal(tf.argmax(self.Ft_out, 1), tf.argmax(self.y_, 1)),
                    tf.float32))

            self.cost = self.F1_loss + self.F2_loss + self.Ft_loss + tf.reduce_sum(
                tf.abs(
                    tf.multiply(tf.transpose(_F1_net.all_params[14]),
                                _F2_net.all_params[14])))
            self.labeling_cost = self.F1_loss + self.F2_loss + tf.reduce_sum(
                tf.abs(
                    tf.multiply(tf.transpose(_F1_net.all_params[14]),
                                _F2_net.all_params[14])))
            self.targetspecific_cost = self.Ft_loss

            self.F1F2Ft_op = tf.train.AdamOptimizer(
                learning_rate=0.01).minimize(self.cost)
            self.F1F2_op = tf.train.AdamOptimizer(learning_rate=0.01).minimize(
                self.labeling_cost)
            self.Ft_op = tf.train.AdamOptimizer(learning_rate=0.01).minimize(
                self.targetspecific_cost)

            tl.layers.initialize_global_variables(self.sess)
            print '********************************************************************************************************************************************************'
            _shared_net.print_params()
            _shared_net.print_layers()
            print '********************************************************************************************************************************************************'
            _F1_net.print_params()
            _F1_net.print_layers()
            print '********************************************************************************************************************************************************'
            _F2_net.print_params()
            _F2_net.print_layers()
            print '********************************************************************************************************************************************************'
            _Ft_net.print_params()
            _Ft_net.print_layers()
Exemplo n.º 11
0
import tensorflow as tf
from tensorlayer.layers import ZeroPad1d, ZeroPad2d, ZeroPad3d, InputLayer

## 1D
x = tf.placeholder(tf.float32, (None, 100, 1))
n = InputLayer(x)
n1 = ZeroPad1d(n, padding=1)
n1.print_layers()
shape = n1.outputs.get_shape().as_list()
if shape[1:] != [102, 1]:
    raise Exception("shape dont match")

n2 = ZeroPad1d(n, padding=(2, 3))
n2.print_layers()
shape = n2.outputs.get_shape().as_list()
if shape[1:] != [105, 1]:
    raise Exception("shape dont match")

## 2D
x = tf.placeholder(tf.float32, (None, 100, 100, 3))
n = InputLayer(x)
n1 = ZeroPad2d(n, padding=2)
n1.print_layers()
shape = n1.outputs.get_shape().as_list()
if shape[1:] != [104, 104, 3]:
    raise Exception("shape dont match")

n2 = ZeroPad2d(n, padding=(2, 3))
n2.print_layers()
shape = n2.outputs.get_shape().as_list()
if shape[1:] != [104, 106, 3]:
Exemplo n.º 12
0
def discriminator(inputs, is_train=True, reuse=False):
    df_dim = 64  # Dimension of discrim filters in first conv layer. [64]
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)
    lrelu = lambda x: tf.nn.leaky_relu(x, 0.2)
    with tf.variable_scope("discriminator", reuse=reuse):

        net_in = InputLayer(inputs, name='d/in')
        net_h0 = Conv2d(net_in,
                        df_dim, (5, 5), (2, 2),
                        act=lrelu,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h0/conv2d')

        net_h1 = Conv2d(net_h0,
                        df_dim * 2, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h1/conv2d')
        net_h1 = BatchNormLayer(net_h1,
                                decay=0.9,
                                act=lrelu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h1/batch_norm')

        net_h2 = Conv2d(net_h1,
                        df_dim * 4, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h2/conv2d')
        net_h2 = BatchNormLayer(net_h2,
                                decay=0.9,
                                act=lrelu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h2/batch_norm')

        net_h3 = Conv2d(net_h2,
                        df_dim * 8, (5, 5), (2, 2),
                        act=None,
                        padding='SAME',
                        W_init=w_init,
                        name='d/h3/conv2d')
        net_h3 = BatchNormLayer(net_h3,
                                decay=0.9,
                                act=lrelu,
                                is_train=is_train,
                                gamma_init=gamma_init,
                                name='d/h3/batch_norm')

        net_h4 = FlattenLayer(net_h3, name='d/h4/flatten')
        net_h4 = DenseLayer(net_h4,
                            n_units=1,
                            act=tf.identity,
                            W_init=w_init,
                            name='d/h4/lin_sigmoid')
        logits = net_h4.outputs
        net_h4.outputs = tf.nn.sigmoid(net_h4.outputs)
    return net_h4, logits
def model_batch_norm(x_crop, y_, is_train, reuse):
    W_init = tf.truncated_normal_initializer(stddev=5e-2)
    W_init2 = tf.truncated_normal_initializer(stddev=0.04)
    b_init2 = tf.constant_initializer(value=0.1)
    with tf.variable_scope("model", reuse=reuse):
        net = InputLayer(x_crop, name='input')
        net = Conv2d(net,
                     64, (5, 5), (1, 1),
                     padding='SAME',
                     W_init=W_init,
                     b_init=None,
                     name='cnn1')
        net = BatchNormLayer(net,
                             decay=0.99,
                             is_train=is_train,
                             act=tf.nn.relu,
                             name='batch1')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool1')

        net = Conv2d(net,
                     64, (5, 5), (1, 1),
                     padding='SAME',
                     W_init=W_init,
                     b_init=None,
                     name='cnn2')
        net = BatchNormLayer(net,
                             decay=0.99,
                             is_train=is_train,
                             act=tf.nn.relu,
                             name='batch2')
        net = MaxPool2d(net, (3, 3), (2, 2), padding='SAME', name='pool2')

        net = FlattenLayer(net, name='flatten')
        net = DenseLayer(net,
                         384,
                         act=tf.nn.relu,
                         W_init=W_init2,
                         b_init=b_init2,
                         name='d1relu')
        net = DenseLayer(net,
                         192,
                         act=tf.nn.relu,
                         W_init=W_init2,
                         b_init=b_init2,
                         name='d2relu')
        net = DenseLayer(net,
                         n_units=10,
                         act=None,
                         W_init=W_init2,
                         name='output')
        y = net.outputs

        ce = tl.cost.cross_entropy(y, y_, name='cost')
        # L2 for the MLP, without this, the accuracy will be reduced by 15%.
        L2 = 0
        for p in tl.layers.get_variables_with_name('relu/W', True, True):
            L2 += tf.contrib.layers.l2_regularizer(0.004)(p)
        cost = ce + L2

        correct_prediction = tf.equal(tf.cast(tf.argmax(y, 1), tf.int32), y_)
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return net, cost, acc
Exemplo n.º 14
0
 def _phase_shift(x):
     n = InputLayer(x, name='input_subpixel')
     n = SubpixelConv2d(n, scale=scale, n_out_channel=None, act=tf.nn.relu)
     return n.outputs
Exemplo n.º 15
0
def a2net(x, is_train=True, reuse=False):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope('a2net', reuse=reuse):
        net_in = InputLayer(x, name='input')
        inputY = InputLayer(x[:, :, :, :1], name='inputY')
        inputUV = InputLayer(x[:, :, :, 1:], name='inputUV')

        # Encoder

        conv1 = Conv2d(net_in,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv1')
        conv1 = BatchNormLayer(conv1,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn1')
        conv2 = Conv2d(conv1,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv2')
        conv2 = BatchNormLayer(conv2,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn2')

        concat1 = ConcatLayer([conv1, conv2],
                              concat_dim=-1,
                              name='encoder/concat1')
        aggregation1 = Conv2d(concat1,
                              32, (4, 4), (2, 2),
                              act=None,
                              W_init=w_init,
                              b_init=None,
                              name='encoder/aggregation1')
        aggregation1 = BatchNormLayer(aggregation1,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='encoder/bn3')

        conv3 = Conv2d(aggregation1,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv3')
        conv3 = BatchNormLayer(conv3,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn4')

        concat2 = ConcatLayer([aggregation1, conv3],
                              concat_dim=-1,
                              name='encoder/concat2')
        aggregation2 = Conv2d(concat2,
                              32, (4, 4), (2, 2),
                              act=None,
                              W_init=w_init,
                              b_init=None,
                              name='encoder/aggregation2')
        aggregation2 = BatchNormLayer(aggregation2,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='encoder/bn5')

        conv4 = Conv2d(aggregation2,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv4')
        conv4 = BatchNormLayer(conv4,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn6')

        concat3 = ConcatLayer([aggregation2, conv4],
                              concat_dim=-1,
                              name='encoder/concat3')
        aggregation3 = Conv2d(concat3,
                              32, (4, 4), (2, 2),
                              act=None,
                              W_init=w_init,
                              b_init=None,
                              name='encoder/aggregation3')
        aggregation3 = BatchNormLayer(aggregation3,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='encoder/bn7')

        # DecoderY

        convY_1 = Conv2d(aggregation3,
                         32, (3, 3), (1, 1),
                         act=None,
                         W_init=w_init,
                         b_init=None,
                         name='decoderY/conv1')
        convY_1 = BatchNormLayer(convY_1,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 gamma_init=gamma_init,
                                 name='decoderY/bn1')

        concatY_1 = ConcatLayer([aggregation3, convY_1],
                                concat_dim=-1,
                                name='decoderY/concat1')
        aggregationY_1 = DeConv2d(concatY_1,
                                  32, (2, 2), (2, 2),
                                  act=None,
                                  W_init=w_init,
                                  b_init=None,
                                  name='decoderY/aggregation1')
        aggregationY_1 = BatchNormLayer(aggregationY_1,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='decoderY/bn2')

        copyY_1 = ConcatLayer([conv4, aggregationY_1],
                              concat_dim=-1,
                              name='decoderY/copy1')
        convY_2 = Conv2d(copyY_1,
                         32, (3, 3), (1, 1),
                         act=None,
                         W_init=w_init,
                         b_init=None,
                         name='decoderY/conv2')
        convY_2 = BatchNormLayer(convY_2,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 gamma_init=gamma_init,
                                 name='decoderY/bn3')

        concatY_2 = ConcatLayer([copyY_1, convY_2],
                                concat_dim=-1,
                                name='decoderY/concat2')
        aggregationY_2 = DeConv2d(concatY_2,
                                  32, (2, 2), (2, 2),
                                  act=None,
                                  W_init=w_init,
                                  b_init=None,
                                  name='decoderY/aggregation2')
        aggregationY_2 = BatchNormLayer(aggregationY_2,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='decoderY/bn4')

        copyY_2 = ConcatLayer([conv3, aggregationY_2],
                              concat_dim=-1,
                              name='decoderY/copy2')
        convY_3 = Conv2d(copyY_2,
                         32, (3, 3), (1, 1),
                         act=None,
                         W_init=w_init,
                         b_init=None,
                         name='decoderY/conv3')
        convY_3 = BatchNormLayer(convY_3,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 gamma_init=gamma_init,
                                 name='decoderY/bn5')

        concatY_3 = ConcatLayer([copyY_2, convY_3],
                                concat_dim=-1,
                                name='decoderY/concat3')
        aggregationY_3 = DeConv2d(concatY_3,
                                  32, (2, 2), (2, 2),
                                  act=None,
                                  W_init=w_init,
                                  b_init=None,
                                  name='decoderY/aggregation3')
        aggregationY_3 = BatchNormLayer(aggregationY_3,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='decoderY/bn6')

        copyY_3 = ConcatLayer([conv2, aggregationY_3],
                              concat_dim=-1,
                              name='decoderY/copy3')

        outputY = Conv2d(copyY_3,
                         1, (3, 3), (1, 1),
                         act=tf.nn.tanh,
                         name='decoderY/output')

        # DecoderUV

        convUV_1 = Conv2d(aggregation3,
                          UV_SIZE, (3, 3), (1, 1),
                          act=None,
                          W_init=w_init,
                          b_init=None,
                          name='decoderUV/conv1')
        convUV_1 = BatchNormLayer(convUV_1,
                                  act=tf.nn.relu,
                                  is_train=is_train,
                                  gamma_init=gamma_init,
                                  name='decoderUV/bn1')

        concatUV_1 = ConcatLayer([aggregation3, convUV_1],
                                 concat_dim=-1,
                                 name='decoderUV/concat1')
        aggregationUV_1 = DeConv2d(concatUV_1,
                                   UV_SIZE, (2, 2), (2, 2),
                                   act=None,
                                   W_init=w_init,
                                   b_init=None,
                                   name='decoderUV/aggregation1')
        aggregationUV_1 = BatchNormLayer(aggregationUV_1,
                                         act=tf.nn.relu,
                                         is_train=is_train,
                                         gamma_init=gamma_init,
                                         name='decoderUV/bn2')

        copyUV_1 = ConcatLayer([conv4, aggregationUV_1],
                               concat_dim=-1,
                               name='decoderUV/copy1')
        convUV_2 = Conv2d(copyUV_1,
                          UV_SIZE, (3, 3), (1, 1),
                          act=None,
                          W_init=w_init,
                          b_init=None,
                          name='decoderUV/conv2')
        convUV_2 = BatchNormLayer(convUV_2,
                                  act=tf.nn.relu,
                                  is_train=is_train,
                                  gamma_init=gamma_init,
                                  name='decoderUV/bn3')

        concatUV_2 = ConcatLayer([copyUV_1, convUV_2],
                                 concat_dim=-1,
                                 name='decoderUV/concat2')
        aggregationUV_2 = DeConv2d(concatUV_2,
                                   UV_SIZE, (2, 2), (2, 2),
                                   act=None,
                                   W_init=w_init,
                                   b_init=None,
                                   name='decoderUV/aggregation2')
        aggregationUV_2 = BatchNormLayer(aggregationUV_2,
                                         act=tf.nn.relu,
                                         is_train=is_train,
                                         gamma_init=gamma_init,
                                         name='decoderUV/bn4')

        copyUV_2 = ConcatLayer([conv3, aggregationUV_2],
                               concat_dim=-1,
                               name='decoderUV/copy2')
        convUV_3 = Conv2d(copyUV_2,
                          UV_SIZE, (3, 3), (1, 1),
                          act=None,
                          W_init=w_init,
                          b_init=None,
                          name='decoderUV/conv3')
        convUV_3 = BatchNormLayer(convUV_3,
                                  act=tf.nn.relu,
                                  is_train=is_train,
                                  gamma_init=gamma_init,
                                  name='decoderUV/bn5')

        concatUV_3 = ConcatLayer([copyUV_2, convUV_3],
                                 concat_dim=-1,
                                 name='decoderUV/concat3')
        aggregationUV_3 = DeConv2d(concatUV_3,
                                   UV_SIZE, (2, 2), (2, 2),
                                   act=None,
                                   W_init=w_init,
                                   b_init=None,
                                   name='decoderUV/aggregation3')
        aggregationUV_3 = BatchNormLayer(aggregationUV_3,
                                         act=tf.nn.relu,
                                         is_train=is_train,
                                         gamma_init=gamma_init,
                                         name='decoderUV/bn6')

        copyUV_3 = ConcatLayer([conv2, aggregationUV_3],
                               concat_dim=-1,
                               name='decoderUV/copy3')

        outputUV = Conv2d(copyUV_3,
                          2, (3, 3), (1, 1),
                          act=tf.nn.tanh,
                          name='decoderUV/output')

        outY_plus_Y = ElementwiseLambdaLayer([outputY, inputY],
                                             fn=lambda x, y: BETA * x +
                                             (1 - BETA) * y,
                                             name='outY_plus_Y')

        outUV_plus_UV = ElementwiseLambdaLayer([outputUV, inputUV],
                                               fn=lambda x, y: BETA * x +
                                               (1 - BETA) * y,
                                               name='outUV_plus_UV')

        net_out = ConcatLayer([outY_plus_Y, outUV_plus_UV],
                              concat_dim=-1,
                              name='net_out')

        return outY_plus_Y, outUV_plus_UV, net_out
Exemplo n.º 16
0
def model(x,
          n_pos,
          mask_miss1,
          mask_miss2,
          is_train=False,
          reuse=None,
          data_format='channels_last'):  # hao25
    if data_format != 'channels_last':
        # TODO: support NCHW
        print('data_format=%s is ignored' % data_format)

    b1_list = []
    b2_list = []
    with tf.variable_scope('model', reuse):
        x = x - 0.5
        n = InputLayer(x, name='in')
        n = Conv2d(n,
                   32, (3, 3), (1, 1),
                   None,
                   'SAME',
                   W_init=W_init,
                   b_init=b_init,
                   name='conv1_1')
        n = BatchNormLayer(n,
                           decay=decay,
                           is_train=is_train,
                           act=tf.nn.relu,
                           name='bn1')
        n = depthwise_conv_block(n, 64, is_train=is_train, name="conv1_depth1")

        n = depthwise_conv_block(n,
                                 128,
                                 strides=(2, 2),
                                 is_train=is_train,
                                 name="conv2_depth1")
        n = depthwise_conv_block(n,
                                 128,
                                 is_train=is_train,
                                 name="conv2_depth2")
        n1 = n

        n = depthwise_conv_block(n,
                                 256,
                                 strides=(2, 2),
                                 is_train=is_train,
                                 name="conv3_depth1")
        n = depthwise_conv_block(n,
                                 256,
                                 is_train=is_train,
                                 name="conv3_depth2")
        n2 = n

        n = depthwise_conv_block(n,
                                 512,
                                 strides=(2, 2),
                                 is_train=is_train,
                                 name="conv4_depth1")
        n = depthwise_conv_block(n,
                                 512,
                                 is_train=is_train,
                                 name="conv4_depth2")
        n = depthwise_conv_block(n,
                                 512,
                                 is_train=is_train,
                                 name="conv4_depth3")
        n = depthwise_conv_block(n,
                                 512,
                                 is_train=is_train,
                                 name="conv4_depth4")
        cnn = depthwise_conv_block(n,
                                   512,
                                   is_train=is_train,
                                   name="conv4_depth5")

        ## low-level features
        # n1 = MaxPool2d(n1, (2, 2), (2, 2), 'same', name='maxpool2d')
        n1 = depthwise_conv_block(n1,
                                  128,
                                  strides=(2, 2),
                                  is_train=is_train,
                                  name="n1_down1")
        n1 = depthwise_conv_block(n1,
                                  128,
                                  strides=(2, 2),
                                  is_train=is_train,
                                  name="n1_down2")
        ## mid-level features
        n2 = depthwise_conv_block(n2,
                                  256,
                                  strides=(2, 2),
                                  is_train=is_train,
                                  name="n2_down1")
        ## combine features
        cnn = ConcatLayer([cnn, n1, n2], -1, name='cancat')

        ## stage1
        with tf.variable_scope("stage1/branch1"):
            b1 = depthwise_conv_block(cnn,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c1")
            b1 = depthwise_conv_block(b1,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c2")
            b1 = depthwise_conv_block(b1,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c3")
            b1 = depthwise_conv_block(b1,
                                      512,
                                      filter_size=(1, 1),
                                      is_train=is_train,
                                      name="c4")
            b1 = Conv2d(b1,
                        n_pos, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=W_init,
                        b_init=b_init,
                        name='confs')

            if is_train:
                b1.outputs = b1.outputs * mask_miss1
        with tf.variable_scope("stage1/branch2"):
            b2 = depthwise_conv_block(cnn,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c1")
            b2 = depthwise_conv_block(b2,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c2")
            b2 = depthwise_conv_block(b2,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c3")
            b2 = depthwise_conv_block(b2,
                                      512,
                                      filter_size=(1, 1),
                                      is_train=is_train,
                                      name="c4")
            b2 = Conv2d(b2,
                        38, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=W_init,
                        b_init=b_init2,
                        name='pafs')
            if is_train:
                b2.outputs = b2.outputs * mask_miss2
            b1_list.append(b1)
            b2_list.append(b2)

        ## other stages
        # for i in range(2, 7): # [2, 3, 4, 5, 6]
        # for i in [5, 6]:
        for i in [3, 4, 5, 6]:
            b1, b2 = stage(cnn,
                           b1_list[-1],
                           b2_list[-1],
                           n_pos,
                           mask_miss1,
                           mask_miss2,
                           is_train,
                           name='stage%d' % i)
            b1_list.append(b1)
            b2_list.append(b2)
        net = tl.layers.merge_networks([b1_list[-1], b2_list[-1]])
    return cnn, b1_list, b2_list, net
Exemplo n.º 17
0
    def squeezenetv1(cls, x, end_with='output', is_train=False, reuse=None):
        with tf.variable_scope("squeezenetv1", reuse=reuse):
            with tf.variable_scope("input"):
                n = InputLayer(x)
                # n = Conv2d(n, 96, (7,7),(2,2),tf.nn.relu,'SAME',name='conv1')
                n = Conv2d(n, 64, (3, 3), (2, 2), tf.nn.relu, 'SAME', name='conv1')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.variable_scope("fire2"):
                n = Conv2d(n, 16, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')
                n1 = Conv2d(n, 64, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')
                n2 = Conv2d(n, 64, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.variable_scope("fire3"):
                n = Conv2d(n, 16, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')
                n1 = Conv2d(n, 64, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')
                n2 = Conv2d(n, 64, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.variable_scope("fire4"):
                n = Conv2d(n, 32, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')
                n1 = Conv2d(n, 128, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')
                n2 = Conv2d(n, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.variable_scope("fire5"):
                n = Conv2d(n, 32, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')
                n1 = Conv2d(n, 128, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')
                n2 = Conv2d(n, 128, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.variable_scope("fire6"):
                n = Conv2d(n, 48, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')
                n1 = Conv2d(n, 192, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')
                n2 = Conv2d(n, 192, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.variable_scope("fire7"):
                n = Conv2d(n, 48, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')
                n1 = Conv2d(n, 192, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')
                n2 = Conv2d(n, 192, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.variable_scope("fire8"):
                n = Conv2d(n, 64, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')
                n1 = Conv2d(n, 256, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')
                n2 = Conv2d(n, 256, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.variable_scope("fire9"):
                n = Conv2d(n, 64, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='squeeze1x1')
                n1 = Conv2d(n, 256, (1, 1), (1, 1), tf.nn.relu, 'SAME', name='expand1x1')
                n2 = Conv2d(n, 256, (3, 3), (1, 1), tf.nn.relu, 'SAME', name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.variable_scope("output"):
                n = DropoutLayer(n, keep=0.5, is_fix=True, is_train=is_train, name='drop1')
                n = Conv2d(n, 1000, (1, 1), (1, 1), padding='VALID', name='conv10')  # 13, 13, 1000
                n = GlobalMeanPool2d(n)
            if end_with in n.outputs.name:
                return n

            raise Exception("end_with : input, fire2, fire3 ... fire9, output")
Exemplo n.º 18
0
def generator(inputs, is_train=True, reuse=False):
    img_size = CFG.img_size
    s2, s4, s8, s16 = [int(img_size / i) for i in [2, 4, 8, 16]]
    gfs = 64
    channels = CFG.channels
    batch_size = CFG.batch_size

    W_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope('generator', reuse=reuse):
        tl.layers.set_name_reuse(reuse)

        g = InputLayer(inputs, name='g/inputs')
        g = DenseLayer(g,
                       gfs * 8 * s16 * s16,
                       W_init=W_init,
                       act=tl.act.identity,
                       name='g/fc1')
        g = ReshapeLayer(g, shape=(-1, s16, s16, gfs * 8), name='g/reshape2')
        g = BatchNormLayer(g,
                           act=tf.nn.relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='g/bn3')

        g = DeConv2d(g,
                     gfs * 4, (5, 5),
                     out_size=(s8, s8),
                     strides=(2, 2),
                     batch_size=batch_size,
                     act=None,
                     W_init=W_init,
                     name='g/dconv4')
        g = BatchNormLayer(g,
                           act=tf.nn.relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='g/bn5')

        g = DeConv2d(g,
                     gfs * 2, (5, 5),
                     out_size=(s4, s4),
                     strides=(2, 2),
                     batch_size=batch_size,
                     act=None,
                     W_init=W_init,
                     name='g/dconv6')
        g = BatchNormLayer(g,
                           act=tf.nn.relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='g/bn7')

        g = DeConv2d(g,
                     gfs, (5, 5),
                     out_size=(s2, s2),
                     strides=(2, 2),
                     batch_size=batch_size,
                     act=None,
                     W_init=W_init,
                     name='g/dconv8')
        g = BatchNormLayer(g,
                           act=tf.nn.relu,
                           is_train=is_train,
                           gamma_init=gamma_init,
                           name='g/bn9')

        g = DeConv2d(g,
                     channels, (5, 5),
                     out_size=(img_size, img_size),
                     strides=(2, 2),
                     batch_size=batch_size,
                     act=None,
                     W_init=W_init,
                     name='g/dconv10')

        logits = g.outputs
        g.outputs = tf.nn.tanh(g.outputs)
    return g, logits
Exemplo n.º 19
0
def res_dense_net(lr,
                  factor=4,
                  conv_kernel=3,
                  bn=False,
                  is_train=True,
                  format_out=True,
                  reuse=False,
                  name='RDN'):
    '''Residual Dense net
    Params:
      -factor: super-resolution enhancement factor 
      -reuse: reuse the variables or not (in tf.variable_scope(name))
      -bn: whether use batch norm after conv layers or not
      -is_train: paramete with the identical name in tl.layer.BatchNormLayer (only valid when 'bn' == True)
      -format_out: if False, keep the increased pixels in channels dimension. Else re-arrange them into spatial dimensions(what the SubvoxelConv does exactly)
    '''
    assert factor in [1, 2, 3, 4]

    G0 = 64
    with tf.variable_scope(name, reuse=reuse):

        n = InputLayer(lr, 'lr') if not isinstance(lr, Layer) else lr

        # shallow feature extraction layers
        n1 = conv3d(n,
                    out_channels=G0,
                    filter_size=conv_kernel,
                    name='shallow1')
        if bn: n1 = batch_norm(n1, is_train=is_train, name='bn1')
        n2 = conv3d(n1,
                    out_channels=G0,
                    filter_size=conv_kernel,
                    name='shallow2')
        if bn: n2 = batch_norm(n2, is_train=is_train, name='bn2')

        n3 = res_dense_block(n2, conv_kernel=conv_kernel, bn=bn, name='rdb1')
        n4 = res_dense_block(n3, conv_kernel=conv_kernel, bn=bn, name='rdb2')
        n5 = res_dense_block(n4, conv_kernel=conv_kernel, bn=bn, name='rdb3')

        # global feature fusion (GFF)
        n6 = concat([n3, n4, n5], name='gff')
        n6 = conv3d(n6, out_channels=G0, filter_size=1, name='gff/conv1')
        if bn: n6 = batch_norm(n6, is_train=is_train, name='bn3')
        n6 = conv3d(n6,
                    out_channels=G0,
                    filter_size=conv_kernel,
                    name='gff/conv2')
        if bn: n6 = batch_norm(n6, is_train=is_train, name='bn4')

        # global residual learning
        n7 = ElementwiseLayer([n6, n1], combine_fn=tf.add, name='grl')

        if format_out:
            if factor == 4:
                n8 = upscale(n7, scale=2, name='upscale1')
                n8 = upscale(n8, scale=2, name='upscale2')
            elif factor == 3:
                n8 = conv3d(n7, out_channels=27, filter_size=3, name='conv3')
                n8 = upscale(n8, scale=3, name='upscale1')
            elif factor == 2:
                #n8 = conv3d(n7, out_channels=8, filter_size=3, name='conv3')
                n8 = upscale(n7, scale=2, name='upscale1')
            else:
                n8 = n7
            out = conv3d(n8,
                         out_channels=1,
                         filter_size=conv_kernel,
                         act=tf.tanh,
                         name='out')
            # out = conv3d(n8, out_channels=1, filter_size=conv_kernel, name='out')

        else:
            out = n7

        return out
Exemplo n.º 20
0
    def vgg_network(x):
        """VGG19 network for default model."""

        # input x: 0~1
        # bgr: -0.5~0.5 for InputLayer
        # red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=x)
        # bgr = tf.concat(axis=3, values=[blue, green, red])
        # bgr = bgr - 0.5

        # input layer
        net_in = InputLayer(x, name='input')

        # conv1
        net = _conv2d(net_in,
                      64, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv1_1')
        net = _conv2d(net,
                      64, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv1_2')
        net = _maxpool2d(net, 'pool1')
        # conv2
        net = _conv2d(net,
                      128, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv2_1')
        net = _conv2d(net,
                      128, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv2_2')
        net = _maxpool2d(net, 'pool2')
        # conv3
        net = _conv2d(net,
                      256, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv3_1')
        net = _conv2d(net,
                      256, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv3_2')
        net = _conv2d(net,
                      256, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv3_3')
        net = _conv2d(net,
                      256, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv3_4')
        net = _maxpool2d(net, 'pool3')
        # conv4
        net = _conv2d(net,
                      512, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv4_1')
        net = _conv2d(net,
                      512, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv4_2')
        net = _conv2d(net,
                      256, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv4_3')
        net = _conv2d(net,
                      128, (3, 3), (1, 1),
                      act=tf.nn.relu,
                      padding='SAME',
                      name='conv4_4')

        return net
Exemplo n.º 21
0
import tensorflow as tf
from tensorlayer.layers import InputLayer, TimeDistributedLayer, DenseLayer

sess = tf.InteractiveSession()
batch_size = 32
timestep = 20
input_dim = 100

## no reuse
x = tf.placeholder(dtype=tf.float32, shape=[batch_size, timestep, input_dim], name="encode_seqs")
net = InputLayer(x, name='input')
net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units': 50, 'name': 'dense'}, name='time_dense')

if net.outputs.get_shape().as_list() != [32, 20, 50]:
    raise Exception("shape do not match")
# ... (32, 20, 50)
net.print_params(False)
if net.count_params() != 5050:
    raise Exception("params do not match")


## reuse
def model(x, is_train=True, reuse=False):
    with tf.variable_scope("model", reuse=reuse):
        net = InputLayer(x, name='input')
        net = TimeDistributedLayer(net, layer_class=DenseLayer, args={'n_units': 50, 'name': 'dense'}, name='time_dense')
    return net


net_train = model(x, is_train=True, reuse=False)
net_test = model(x, is_train=False, reuse=True)
Exemplo n.º 22
0
    def __get_network__(self,
                        encode_seq,
                        neighbour_seq,
                        decode_seq,
                        is_train=True,
                        reuse=False):
        w_init = tf.random_normal_initializer(stddev=0.02)
        g_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope(self.model_name, reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            inputs_x_root = InputLayer(encode_seq, name='in_root')
            inputs_x_nbor = InputLayer(neighbour_seq, name="in_neighbour")

            # encoding neighbour graph information
            n = ReshapeLayer(inputs_x_nbor,
                             (config.batch_size * config.in_seq_length,
                              config.num_neighbour), "reshape1")
            n.outputs = tf.expand_dims(n.outputs, axis=-1)
            n = Conv1d(n,
                       4,
                       4,
                       1,
                       act=tf.identity,
                       padding='SAME',
                       W_init=w_init,
                       name='conv1')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=g_init,
                               name='bn1')
            n = MaxPool1d(n, 2, 2, padding='valid', name='maxpool1')
            n = FlattenLayer(n, name="flatten1")
            n = ReshapeLayer(n, (config.batch_size, config.in_seq_length, -1),
                             name="reshape1_back")

            net_encode = ConcatLayer([inputs_x_root, n],
                                     concat_dim=-1,
                                     name="encode")
            net_decode = InputLayer(decode_seq, name="decode")

            net_rnn = Seq2Seq(
                net_encode,
                net_decode,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden=config.dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_encode.outputs),
                decode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_decode.outputs),
                initial_state_encode=None,
                # dropout=(0.8 if is_train else None),
                dropout=None,
                n_layer=1,
                return_seq_2d=True,
                name='seq2seq')
            # net_out = DenseLayer(net_rnn, n_units=64, act=tf.identity, name='dense1')
            net_out = DenseLayer(net_rnn,
                                 n_units=1,
                                 act=tf.identity,
                                 name='dense2')
            if is_train:
                net_out = ReshapeLayer(
                    net_out, (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_out = ReshapeLayer(net_out, (config.batch_size, 1, 1),
                                       name="reshape_out")

            self.net_rnn = net_rnn

            return net_out
Exemplo n.º 23
0
    def partnetwork(net_in):
        #con1、2、3均为VGG16原结构不变,该框架主要是取con2最后一个卷积层结果与con3最后进行融合,然后再作为输入继续VGG16的卷积,
        # 后续同理取最后卷积结果和中层进行融合作为最终结果输出
        with tf.name_scope('preprocess') as scope:
            # 减去全局均值,做归一化
            net_in=tf.cast(net_in, tf.float32)
            net_in = net_in * 255.0
            mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')
            net_in = net_in - mean
        alpha1=tf.constant(0.01, shape=[1,1,1,3],dtype=tf.float32, name='alpha1')
        alpha2 = tf.constant(0.0001, dtype=tf.float32, name='alpha2')
        net_in= InputLayer(net_in,name='input' )
        """conv1"""
        network = Conv2d(net_in, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                         name='conv1_1')
        network = Conv2d(network, n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                         name='conv1_2')
        network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool1')
        """conv2"""
        network = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                         name='conv2_1')
        network_1 = Conv2d(network, n_filter=128, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                           name='conv2_2')
        network = MaxPool2d(network_1, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool2')
        """conv3"""
        network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                         name='conv3_1')
        network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                         name='conv3_2')
        network = Conv2d(network, n_filter=256, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                         name='conv3_3')
        """低层特征融合"""
        # network_1 = tf.nn.dilation2d(network_1, filter=256, strides=[1, 1, 1, 1], rates=[1, 3, 3, 1], padding='SAME',
        #                              name='dilation1')
        network_1=Conv2d(network_1,n_filter=256,filter_size=(3,3),strides=(1,1),act=tf.nn.relu, padding='SAME',
                         name='conv6_1')

        network_1 = MaxPool2d(network_1, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='Pool6_1')
        # 代替caffe框架中的scale层
        #network_1=My_ScaleLayer.myscale(trainable=True,name='scale_1')
        network_1 = LayerNormLayer(network_1,scale=True,trainable=True,name="scale_1")
        network_1 = tl.layers.LambdaLayer(Model_base.fun(network_1,alpha1),name='lambda_1') #tf.multiply(network_1,alpha1)
        # 主分支的特征加低层特征处理后的特征谱图作为下一层输入
        network = tf.add(network, network_1, name="Eltwise1")
        #network = tl.layers.ElementwiseLayer(combine_fn=tf.add,name="Eltwise1")([network, network_1])
        network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool3')

        if tf.TensorShape(net_in)==[None, 224, 224, 3]:
            """conv4"""
            network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             name='conv4_1')
            network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             name='conv4_2')
            network_1 = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu,padding='SAME',
                             name='conv4_3')
            network = MaxPool2d(network_1, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
            """conv5"""
            network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             name='conv5_1')
            network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             name='conv5_2')
            network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             name='conv5_3')
            """高层特征谱图融合"""
            network_1= MaxPool2d(network_1, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool7')
            # 代替caffe框架中的scale层
            #network_1 = My_ScaleLayer.myscale(trainable=True, name='scale_2')
            network_1 = LayerNormLayer(network_1, scale=True, trainable=True, name="scale_1")
            network_1 = tl.layers.LambdaLayer(Model_base.fun(network_1, alpha2), name='lambda_2')#network_1 = tf.multiply(network_1, alpha2)
            # 主分支的特征加低层特征处理后的特征谱图作为下一层输入
            network = tf.add(network, network_1, name="Eltwise2")
            network = MaxPool2d(network, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool5')

        elif tf.TensorShape(net_in)==[None, 448, 448, 3]:
            """conv4"""
            network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             name='conv4_1')
            network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             name='conv4_2')
            #network_1=tf.nn.atrous_conv2d(network,filters=256,rate=1,padding='SAME',name='dilation1')
            network_1 = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             dilation_rate=1,name='conv4_3')
            network = MaxPool2d(network_1, filter_size=(2, 2), strides=(2, 2), padding='SAME', name='pool4')
            """conv5"""
            network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             dilation_rate=1,name='conv5_1')
            network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             dilation_rate=1,name='conv5_2')
            network = Conv2d(network, n_filter=512, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME',
                             dilation_rate=1,name='conv5_3')
            """高层特征谱图融合"""
            pad2d = tl.layers.ZeroPad2d(padding=( 4 , 4 ))(network_1)
            network_1=Conv2d(pad2d,n_filter=512,filter_size=(3,3),strides=(1,1),padding='VALID')
            network_1= MaxPool2d(network_1, filter_size=(3, 3), strides=(3, 3), padding='SAME', name='pool7')
            # 代替caffe框架中的scale层
            #network_1 = My_ScaleLayer.myscale(trainable=True, name='scale_2')
            network_1 = LayerNormLayer(network_1, scale=True, trainable=True, name="scale_2")
            network_1 = tl.layers.LambdaLayer(Model_base.fun(network_1, alpha2), name='lambda_1')#network_1 = tf.multiply(network_1, alpha2)
            # 主分支的特征加低层特征处理后的特征谱图作为下一层输入
            network = tf.add(network, network_1, name="Eltwise2")
            network = MaxPool2d(network, filter_size=(3, 3), strides=(3, 3), padding='SAME', name='pool5')
        else:
            try:
                if tf.TensorShape(net_in) != [None, 448, 448, 3] or [None, 224, 224, 3]:
                    raise Exception(" input Error !");
            except Exception as e:
                print(e)
        return network
Exemplo n.º 24
0
    def __get_network__(self,
                        encode_seq,
                        decode_seq,
                        features,
                        is_train=True,
                        reuse=False):
        w_init = tf.random_normal_initializer(stddev=0.02)
        g_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope(self.model_name, reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)

            net_features = InputLayer(features, name="in_features")
            if is_train:
                net_features = ReshapeLayer(
                    net_features,
                    (config.batch_size *
                     (config.out_seq_length + 1), config.dim_features),
                    name="reshape_feature_1")
            else:
                net_features = ReshapeLayer(net_features,
                                            (config.batch_size *
                                             (1), config.dim_features),
                                            name="reshape_feature_1")

            net_features = DenseLayer(net_features,
                                      n_units=32,
                                      act=tf.nn.relu,
                                      name='dense_features')

            net_encode = InputLayer(encode_seq, name='in_root')
            net_decode = InputLayer(decode_seq, name="decode")

            net_rnn = Seq2Seq(
                net_encode,
                net_decode,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden=config.dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_encode.outputs),
                decode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_decode.outputs),
                initial_state_encode=None,
                # dropout=(0.8 if is_train else None),
                dropout=None,
                n_layer=1,
                return_seq_2d=True,
                name='seq2seq')

            # net_out = DenseLayer(net_rnn, n_units=64, act=tf.identity, name='dense1')
            net_out = ConcatLayer([net_rnn, net_features],
                                  concat_dim=-1,
                                  name="concat")
            net_out = DenseLayer(net_out,
                                 n_units=1,
                                 act=tf.identity,
                                 name='dense2')
            if is_train:
                net_out = ReshapeLayer(
                    net_out, (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_out = ReshapeLayer(net_out, (config.batch_size, 1, 1),
                                       name="reshape_out")

            self.net_rnn = net_rnn

            return net_out
    I[I != 0] = 1
    return I.astype(np.float).ravel()


env = gym.make("Pong-v0")
observation = env.reset()
prev_x = None
running_reward = None
reward_sum = 0
episode_number = 0

xs, ys, rs = [], [], []
# observation for training and inference
t_states = tf.placeholder(tf.float32, shape=[None, D])
# policy network
network = InputLayer(t_states, name='input')
network = DenseLayer(network, n_units=H, act=tf.nn.relu, name='hidden')
network = DenseLayer(network, n_units=3, name='output')
probs = network.outputs
sampling_prob = tf.nn.softmax(probs)

t_actions = tf.placeholder(tf.int32, shape=[None])
t_discount_rewards = tf.placeholder(tf.float32, shape=[None])
loss = tl.rein.cross_entropy_reward_loss(probs, t_actions, t_discount_rewards)
train_op = tf.train.RMSPropOptimizer(learning_rate, decay_rate).minimize(loss)

with tf.Session() as sess:
    tl.layers.initialize_global_variables(sess)
    # if resume:
    #     load_params = tl.files.load_npz(name=model_file_name+'.npz')
    #     tl.files.assign_params(sess, load_params, network)
Exemplo n.º 26
0
    def __get_network__(self,
                        encode_seq,
                        decode_seq,
                        is_train=True,
                        reuse=False):
        w_init = tf.random_normal_initializer(stddev=0.02)
        g_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope("seq2seq_model", reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            net_encode = InputLayer(encode_seq, name='in_root')

            net_decode = InputLayer(decode_seq, name="decode")

            net_rnn = Seq2Seq(
                net_encode,
                net_decode,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden=config.dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_encode.outputs),
                decode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_decode.outputs),
                initial_state_encode=None,
                # dropout=(0.8 if is_train else None),
                dropout=None,
                n_layer=1,
                return_seq_2d=True,
                name='seq2seq')
            # self.net_rnn_seq2seq = net_rnn
            net_rnn_seq2seq = net_rnn

            net_out_seq2seq = DenseLayer(net_rnn,
                                         n_units=1,
                                         act=tf.identity,
                                         name='dense2')
            if is_train:
                net_out_seq2seq = ReshapeLayer(
                    net_out_seq2seq,
                    (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_out_seq2seq = ReshapeLayer(net_out_seq2seq,
                                               (config.batch_size, 1, 1),
                                               name="reshape_out")

            # net_out_seq2seq = net_out_seq2seq
            # net_out = DenseLayer(net_rnn, n_units=64, act=tf.identity, name='dense1')
            # net_out = DenseLayer(net_rnn, n_units=1, act=tf.identity, name='dense2')
            # net_out = ReshapeLayer(net_out, (config.batch_size, config.out_seq_length + 1, 1), name="reshape_out")

        with tf.variable_scope(self.model_name, reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            net_encode_query = InputLayer(self.query_x, name='in_root_query')

            net_decode_query = InputLayer(self.query_decode_seq,
                                          name="decode_query")

            net_rnn_query = RNNLayer(
                net_decode_query,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                cell_init_args={"forget_bias": 1.0},
                n_hidden=config.query_dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                n_steps=config.out_seq_length,
                return_last=True,
                # return_last=False,
                # return_seq_2d=True,
                name="rnn_query")
            net_rnn_query = ExpandDimsLayer(net_rnn_query,
                                            axis=1,
                                            name="rnn_query_expand")
            net_rnn_query = TileLayer(net_rnn_query,
                                      [1, config.out_seq_length, 1],
                                      name="rnn_query_tile")
            net_rnn_query = ReshapeLayer(
                net_rnn_query, (config.batch_size * config.out_seq_length,
                                config.query_dim_hidden),
                name="rnn_query_reshape")

            net_traffic_state = InputLayer(self.traffic_state,
                                           name="in_traffic_state")

            if is_train:
                net_rnn_traffic = ReshapeLayer(
                    net_rnn_seq2seq,
                    (config.batch_size, config.out_seq_length + 1,
                     config.dim_hidden),
                    name="reshape_traffic_q1")
                net_rnn_traffic.outputs = tf.slice(
                    net_rnn_traffic.outputs, [0, 0, 0], [
                        config.batch_size, config.out_seq_length,
                        config.dim_hidden
                    ],
                    name="slice_traffic_q")
                net_rnn_traffic = ReshapeLayer(
                    net_rnn_traffic,
                    (config.batch_size * config.out_seq_length,
                     config.dim_hidden),
                    name="reshape_traffic_q2")
                net_out = ConcatLayer([net_rnn_traffic, net_rnn_query],
                                      concat_dim=-1,
                                      name="concat_traffic_query1")
            else:
                net_out = ConcatLayer([net_traffic_state, net_rnn_query],
                                      concat_dim=-1,
                                      name="concat_traffic_query2")

            # net_out = DenseLayer(net_out, n_units=128, act=tf.nn.relu, name="dense_query1")
            # net_out = DenseLayer(net_out, n_units=32, act=tf.nn.relu, name="dense_query2")
            net_out = DenseLayer(net_out,
                                 n_units=1,
                                 act=tf.identity,
                                 name="dense_query3")
            # net_out = ReshapeLayer(net_out, (config.batch_size, config.out_seq_length + 1, 1), name="reshape_out")
            # if is_train:
            net_out = ReshapeLayer(
                net_out, (config.batch_size, config.out_seq_length, 1),
                name="reshape_out")
            # else:
            #    net_out = ReshapeLayer(net_out, (config.batch_size, 1, 1), name="reshape_out")
        return net_rnn_seq2seq, net_out_seq2seq, net_rnn_query, net_out
Exemplo n.º 27
0
def generator(inputs, is_train=True, reuse=False):
    image_size = 128
    #s32 = image_size // 32
    gf_dim = 64  # Dimension of gen filters in first conv layer. [64]
    c_dim = 1  # n_color 1
    w_init = tf.glorot_normal_initializer()
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.name_scope("GENERATOR"):

        with tf.variable_scope("generator", reuse=reuse):

            with tf.name_scope("net_in"):
                net_in = InputLayer(inputs, name='g/in')
        #############################################################################
            with tf.name_scope("layer0"):
                net_h0 = DenseLayer(net_in,
                                    n_units=(gf_dim * 32 * 4 * 4),
                                    W_init=w_init,
                                    act=tf.identity,
                                    name='g/h0/lin')
                net_h0 = ReshapeLayer(net_h0,
                                      shape=[-1, 4, 4, gf_dim * 32],
                                      name='g/h0/reshape')
                net_h0 = BatchNormLayer(net_h0,
                                        decay=0.9,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='g/h0/batch_norm')

            with tf.name_scope("layer1"):
                net_h1 = DeConv2d(net_h0,
                                  gf_dim * 8, (5, 5),
                                  strides=(2, 2),
                                  padding='SAME',
                                  act=None,
                                  W_init=w_init,
                                  name='g/h1/decon2d')
                net_h1 = BatchNormLayer(net_h1,
                                        decay=0.9,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='g/h1/batch_norm')

            with tf.name_scope("layer2"):
                net_h2 = DeConv2d(net_h1,
                                  gf_dim * 4, (5, 5),
                                  strides=(2, 2),
                                  padding='SAME',
                                  act=None,
                                  W_init=w_init,
                                  name='g/h2/decon2d')
                net_h2 = BatchNormLayer(net_h2,
                                        decay=0.9,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='g/h2/batch_norm')

            with tf.name_scope("layer3"):
                net_h3 = DeConv2d(net_h2,
                                  gf_dim * 2, (5, 5),
                                  strides=(2, 2),
                                  padding='SAME',
                                  act=None,
                                  W_init=w_init,
                                  name='g/h3/decon2d')
                net_h3 = BatchNormLayer(net_h3,
                                        decay=0.9,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='g/h3/batch_norm')

            with tf.name_scope("layer4"):
                net_h4 = DeConv2d(net_h3,
                                  gf_dim, (5, 5),
                                  strides=(2, 2),
                                  padding='SAME',
                                  act=None,
                                  W_init=w_init,
                                  name='g/h4/decon2d')
                net_h4 = BatchNormLayer(net_h4,
                                        decay=0.9,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='g/h4/batch_norm')

            with tf.name_scope("layer5"):
                net_h5 = DeConv2d(net_h4,
                                  c_dim, (5, 5),
                                  strides=(2, 2),
                                  padding='SAME',
                                  act=None,
                                  W_init=w_init,
                                  name='g/h5/decon2d')
                #net_h5.outputs = tf.nn.tanh(net_h5.outputs)
                net_h5.outputs = tf.nn.tanh(net_h5.outputs)

        return net_h5
Exemplo n.º 28
0
    def __get_network__(self,
                        encode_seq,
                        neighbour_seq,
                        decode_seq,
                        features,
                        features_full,
                        is_train=True,
                        reuse=False):
        w_init = tf.random_normal_initializer(stddev=0.02)
        g_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope(self.model_name + "_spatial",
                               reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            inputs_x_root = InputLayer(encode_seq, name='in_root')
            inputs_x_nbor = InputLayer(neighbour_seq, name="in_neighbour")

            # encoding neighbour graph information
            n = ReshapeLayer(inputs_x_nbor,
                             (config.batch_size * config.in_seq_length,
                              config.num_neighbour), "reshape1")
            n.outputs = tf.expand_dims(n.outputs, axis=-1)
            n = Conv1d(n,
                       4,
                       4,
                       1,
                       act=tf.identity,
                       padding='SAME',
                       W_init=w_init,
                       name='conv1')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=g_init,
                               name='bn1')
            n = MaxPool1d(n, 2, 2, padding='valid', name='maxpool1')
            n = FlattenLayer(n, name="flatten1")
            n = ReshapeLayer(n, (config.batch_size, config.in_seq_length, -1),
                             name="reshape1_back")

            net_encode = ConcatLayer([inputs_x_root, n],
                                     concat_dim=-1,
                                     name="encode")
            net_decode = InputLayer(decode_seq, name="decode")

            net_rnn = Seq2Seq(
                net_encode,
                net_decode,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden=config.dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_encode.outputs),
                decode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_decode.outputs),
                initial_state_encode=None,
                # dropout=(0.8 if is_train else None),
                dropout=None,
                n_layer=1,
                return_seq_2d=True,
                name='seq2seq')
            net_rnn_seq2seq = net_rnn

            net_spatial_out = DenseLayer(net_rnn,
                                         n_units=1,
                                         act=tf.identity,
                                         name='dense2')
            if is_train:
                net_spatial_out = ReshapeLayer(
                    net_spatial_out,
                    (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_spatial_out = ReshapeLayer(net_spatial_out,
                                               (config.batch_size, 1, 1),
                                               name="reshape_out")

        with tf.variable_scope(self.model_name + "_wide", reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            # Features
            net_features = InputLayer(features, name="in_features")
            net_features_full = InputLayer(features_full,
                                           name="in_features_full")
            net_features_full = ReshapeLayer(
                net_features_full,
                (config.batch_size *
                 (config.out_seq_length + 1), config.dim_features),
                name="reshape_feature_full_1")
            if is_train:
                net_features = ReshapeLayer(
                    net_features,
                    (config.batch_size *
                     (config.out_seq_length + 1), config.dim_features),
                    name="reshape_feature_1")
            else:
                net_features = ReshapeLayer(net_features,
                                            (config.batch_size *
                                             (1), config.dim_features),
                                            name="reshape_feature_1")

            self.net_features_dim = 32
            net_features = DenseLayer(net_features,
                                      n_units=self.net_features_dim,
                                      act=tf.nn.relu,
                                      name='dense_features')
            net_features_full = DenseLayer(net_features_full,
                                           n_units=self.net_features_dim,
                                           act=tf.nn.relu,
                                           name='dense_features_full')
            # self.net_features = net_features

            net_wide_out = ConcatLayer([net_rnn_seq2seq, net_features],
                                       concat_dim=-1,
                                       name="concat_features")
            net_wide_out = DenseLayer(net_wide_out,
                                      n_units=1,
                                      act=tf.identity,
                                      name='dense2')

            if is_train:
                net_wide_out = ReshapeLayer(
                    net_wide_out,
                    (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_wide_out = ReshapeLayer(net_wide_out,
                                            (config.batch_size, 1, 1),
                                            name="reshape_out")

        with tf.variable_scope(self.model_name + "_query", reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)

            net_decode_query = InputLayer(self.query_decode_seq,
                                          name="decode_query")

            net_rnn_query = RNNLayer(
                net_decode_query,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                cell_init_args={"forget_bias": 1.0},
                n_hidden=config.query_dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                n_steps=config.out_seq_length,
                return_last=True,

                # return_last=False,
                # return_seq_2d=True,
                name="rnn_query")
            '''
            net_rnn_query = DynamicRNNLayer(
                net_decode_query,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                cell_init_args={"forget_bias": 1.0},
                # n_hidden=config.query_dim_hidden,
                n_hidden=32,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                return_last=True,
                # dropout=0.8,
                sequence_length=tl.layers.retrieve_seq_length_op(net_decode_query.outputs),
                # return_last=False,
                # return_seq_2d=True,
                name="rnn_query_dynamic"
            )
            '''

            net_rnn_query = ExpandDimsLayer(net_rnn_query,
                                            axis=1,
                                            name="rnn_query_expand")
            net_rnn_query = TileLayer(net_rnn_query,
                                      [1, config.out_seq_length, 1],
                                      name="rnn_query_tile")
            net_rnn_query = ReshapeLayer(
                net_rnn_query, (config.batch_size * config.out_seq_length,
                                config.query_dim_hidden),
                name="rnn_query_reshape")
            # net_rnn_query = ReshapeLayer(net_rnn_query, (config.batch_size * config.out_seq_length, 32), name="rnn_query_reshape")

            # self.net_rnn_query = net_rnn_query

            net_traffic_state = InputLayer(self.traffic_state,
                                           name="in_traffic_state")
            '''
            if is_train:
                net_rnn_traffic = ReshapeLayer(net_rnn_seq2seq, (config.batch_size, config.out_seq_length + 1, config.dim_hidden), name="reshape_traffic_q1")
                net_rnn_traffic.outputs = tf.slice(net_rnn_traffic.outputs, [0, 0, 0], [config.batch_size, config.out_seq_length, config.dim_hidden], name="slice_traffic_q")
                net_rnn_traffic = ReshapeLayer(net_rnn_traffic, (config.batch_size * config.out_seq_length, config.dim_hidden), name="reshape_traffic_q2")

                net_features_traffic = ReshapeLayer(net_features, (config.batch_size, config.out_seq_length + 1, self.net_features_dim), name="reshape_features_q1")
                net_features_traffic.outputs = tf.slice(net_features_traffic.outputs, [0, 0, 0], [config.batch_size, config.out_seq_length, self.net_features_dim], name="slice_features_q")
                net_features_traffic = ReshapeLayer(net_features_traffic, (config.batch_size * config.out_seq_length, self.net_features_dim), name="reshape_features_q2")

                net_query_out = ConcatLayer([net_rnn_traffic, net_features_traffic, net_rnn_query], concat_dim=-1, name="concat_traffic_query1")
                # net_query_out = ConcatLayer([net_rnn_traffic, net_rnn_query], concat_dim=-1, name="concat_traffic_query1")
            else:
            '''
            net_features_traffic = ReshapeLayer(
                net_features_full,
                (config.batch_size, config.out_seq_length + 1,
                 self.net_features_dim),
                name="reshape_features_q1")
            net_features_traffic.outputs = tf.slice(
                net_features_traffic.outputs, [0, 0, 0], [
                    config.batch_size, config.out_seq_length,
                    self.net_features_dim
                ],
                name="slice_features_q")
            net_features_traffic = ReshapeLayer(
                net_features_traffic,
                (config.batch_size * config.out_seq_length,
                 self.net_features_dim),
                name="reshape_features_q2")

            net_query_out = ConcatLayer(
                [net_traffic_state, net_features_traffic, net_rnn_query],
                concat_dim=-1,
                name="concat_traffic_query1")
            # net_rnn_traffic = ReshapeLayer(net_rnn_seq2seq, (config.batch_size, config.out_seq_length + 1, config.dim_hidden), name="reshape_traffic_q1")
            # net_rnn_traffic.outputs = tf.slice(net_rnn_traffic.outputs, [0, 0, 0], [config.batch_size, config.out_seq_length, config.dim_hidden], name="slice_traffic_q")
            # net_rnn_traffic = ReshapeLayer(net_rnn_traffic, (config.batch_size * config.out_seq_length, config.dim_hidden), name="reshape_traffic_q2")
            # net_query_out = ConcatLayer([net_rnn_traffic, net_features_traffic, net_rnn_query], concat_dim=-1, name="concat_traffic_query1")

            # net_out = DenseLayer(net_out, n_units=128, act=tf.nn.relu, name="dense_query1")
            # net_out = DenseLayer(net_out, n_units=64, act=tf.nn.relu, name="dense_query2")
            # net_query_out = DropoutLayer(net_query_out, keep=0.8, is_fix=True, is_train=is_train, name='drop_query3')
            net_query_out = DenseLayer(net_query_out,
                                       n_units=1,
                                       act=tf.identity,
                                       name="dense_query3")
            # net_out = ReshapeLayer(net_out, (config.batch_size, config.out_seq_length + 1, 1), name="reshape_out")
            # if is_train:
            net_query_out = ReshapeLayer(
                net_query_out, (config.batch_size, config.out_seq_length, 1),
                name="reshape_out")
            # else:
            #    net_out = ReshapeLayer(net_out, (config.batch_size, 1, 1), name="reshape_out")

            # TODO residual net
            '''
            if is_train:
                net_query_out.outputs = tf.add(
                    net_query_out.outputs,
                    tf.slice(net_wide_out.outputs, [0, 0, 0], [config.batch_size, config.out_seq_length, 1]),
                    name="res_add"
                )
            else:
            '''
            net_base_pred = InputLayer(self.base_pred, name="in_net_base_pred")
            net_query_out.outputs = tf.add(net_query_out.outputs,
                                           net_base_pred.outputs,
                                           name="res_add")

        return net_rnn_seq2seq, net_spatial_out, net_wide_out, net_rnn_query, net_query_out
Exemplo n.º 29
0
def DBPN_front(input,
               feat=64,
               base_filter=32,
               upscale=False,
               reuse=False,
               name='dbpn'):
    '''
    DBPN with last several layers removed
    out_size = in_size * 2
    Params:
        
    '''
    act = prelu
    kernel = 3

    stride = 2
    additional_up_down_pair = 2

    with tf.variable_scope(name, reuse=reuse):
        n_channels = input.shape[-1]
        x = InputLayer(input, name='input')

        # initial feature extration
        x = conv3d(x, out_channels=feat, filter_size=3, act=act, name='feat0')
        x = conv3d(x,
                   out_channels=base_filter,
                   filter_size=1,
                   act=act,
                   name='feat1')

        #back-projection
        h1 = _up_block(x,
                       n_filters=base_filter,
                       k_size=kernel,
                       stride=stride,
                       act=act,
                       name='up1')
        l1 = _down_block(h1,
                         n_filters=base_filter,
                         k_size=kernel,
                         stride=stride,
                         act=act,
                         name='down1')
        h2 = _up_block(l1,
                       n_filters=base_filter,
                       k_size=kernel,
                       stride=stride,
                       act=act,
                       name='up2')

        concat_h = concat([h2, h1])
        l = _d_down_block(concat_h,
                          n_filters=base_filter,
                          k_size=kernel,
                          stride=stride,
                          act=act,
                          name='down2')

        concat_l = concat([l, l1])
        h = _d_up_block(concat_l,
                        n_filters=base_filter,
                        k_size=kernel,
                        stride=stride,
                        act=act,
                        name='up3')

        for i in range(0, additional_up_down_pair - 1):
            concat_h = concat([h, concat_h])
            l = _d_down_block(concat_h,
                              n_filters=base_filter,
                              k_size=kernel,
                              stride=stride,
                              act=act,
                              name='down%d' % (i + 3))

            concat_l = concat([l, concat_l])
            h = _d_up_block(concat_l,
                            n_filters=base_filter,
                            k_size=kernel,
                            stride=stride,
                            act=act,
                            name='up%d' % (i + 4))

        concat_h = concat([h, concat_h])

        return concat_h
Exemplo n.º 30
0
    def build_classifier(self, im, inf_norm, reuse=False):
        with tf.variable_scope('C', reuse=reuse) as vs:
            tensorlayer.layers.set_name_reuse(reuse)

            x = tf.reshape(im, [-1, 3, 32, 32])
            x = tf.transpose(x, [0, 2, 3, 1])

            xmin = tf.clip_by_value(x - inf_norm, 0., 1.)
            xmax = tf.clip_by_value(x + inf_norm, 0., 1.)
            x = tf.random_uniform(tf.shape(x), xmin, xmax, dtype=tf.float32)

            # Crop the central [height, width] of the image.
            # x = tf.image.resize_image_with_crop_or_pad(x, 24, 24)
            x = tf.map_fn(
                lambda frame: tf.image.per_image_standardization(frame), x)

            net = InputLayer(x)
            net = Conv2dLayer(net, \
                    act=tf.nn.relu, \
                    shape=[5,5,3,64], \
                    name="conv1")
            net = MaxPool2d(net, \
                    filter_size=(3,3), \
                    strides=(2,2), \
                    name="pool1")
            net = LocalResponseNormLayer(net, \
                    depth_radius=4, \
                    bias=1.0, \
                    alpha = 0.001/9.0, \
                    beta = 0.75, \
                    name="norm1")
            net = Conv2dLayer(net, \
                    act=tf.nn.relu, \
                    shape=[5,5,64,64], \
                    name="conv2")
            net = LocalResponseNormLayer(net, \
                    depth_radius=4, \
                    bias=1.0, \
                    alpha=0.001/9.0, \
                    beta = 0.75, \
                    name="norm2")
            net = MaxPool2d(net, \
                    filter_size=(3,3), \
                    strides=(2,2), \
                    name="pool2")
            net = FlattenLayer(net, name="flatten_1")
            net = DenseLayer(net, n_units=384, name="local3", act=tf.nn.relu)

            net = DenseLayer(net, n_units=192, name="local4", act=tf.nn.relu)
            net = DenseLayer(net,
                             n_units=10,
                             name="softmax_linear",
                             act=tf.identity)

            cla_vars = tf.contrib.framework.get_variables(vs)

            def name_fixer(var):
                return var.op.name.replace("W", "weights") \
                                    .replace("b", "biases") \
                                    .replace("weights_conv2d", "weights") \
                                    .replace("biases_conv2d", "biases")

            cla_vars = {name_fixer(var): var for var in cla_vars}
            return net.outputs, cla_vars
import tensorflow as tf
from tensorlayer.layers import SubpixelConv1d, SubpixelConv2d, InputLayer, Conv1d, Conv2d

## 1D
t_signal = tf.placeholder('float32', [10, 100, 4], name='x')
n = InputLayer(t_signal, name='in')
n = Conv1d(n, 32, 3, 1, padding='SAME', name='conv1d')
n = SubpixelConv1d(n, scale=2, name='subpixel')
print(n.outputs.shape)
# ... (10, 200, 2)
n.print_layers()
n.print_params(False)

shape = n.outputs.get_shape().as_list()
if shape != [10, 200, 16]:
    raise Exception("shape dont match")

if len(n.all_layers) != 2:
    raise Exception("layers dont match")

if len(n.all_params) != 2:
    raise Exception("params dont match")

if n.count_params() != 416:
    raise Exception("params dont match")

## 2D
x = tf.placeholder('float32', [10, 100, 100, 3], name='x')
n = InputLayer(x, name='in')
n = Conv2d(n, 32, (3, 2), (1, 1), padding='SAME', name='conv2d')
n = SubpixelConv2d(n, scale=2, name='subpixel2d')
Exemplo n.º 32
0
def UNet_A(lf_extra,
           n_slices,
           output_size,
           is_train=True,
           reuse=False,
           name='unet'):
    '''U-net based VCD-Net for light field reconstruction.
    Params:
        lf_extra: tf.tensor 
            In shape of [batch, height, width, n_num^2], the extracted views from the light field image
        n_slices: int
            The slices number of the 3-D reconstruction.
        output_size: list of int
            Lateral size of the 3-D reconstruction, i.e., [height, width].
        is_train: boolean 
            Sees tl.layers.BatchNormLayer.
        reuse: boolean 
            Whether to reuse the variables or not. See tf.variable_scope() for details.
        name: string
            The name of the variable scope.
    Return:
        The 3-D reconstruction in shape of [batch, height, width, depth=n_slices]
    '''
    n_interp = 4
    # _, w, h, _ = lf_extra.shape
    #channels_interp = in_channels.value
    channels_interp = 128
    act = tf.nn.relu

    with tf.variable_scope(name, reuse=reuse):
        n = InputLayer(lf_extra, 'lf_extra')
        n = conv2d(n, n_filter=channels_interp, filter_size=7, name='conv1')

        ## Up-scale input
        with tf.variable_scope('interp'):
            for i in range(n_interp):
                channels_interp = channels_interp / 2
                n = SubpixelConv2d(n, scale=2, name='interp/subpixel%d' % i)
                n = conv2d(n,
                           n_filter=channels_interp,
                           filter_size=3,
                           name='conv%d' % i)

            n = conv2d(n,
                       n_filter=channels_interp,
                       filter_size=3,
                       name='conv_final')  # 176*176
            n = batch_norm(n, is_train=is_train, name='bn_final')
            n = ReluLayer(n, name='reul_final')

        pyramid_channels = [
            128, 256, 512, 512, 512
        ]  # output channels number of each conv layer in the encoder
        encoder_layers = []
        with tf.variable_scope('encoder'):
            n = conv2d(n, n_filter=64, filter_size=3, stride=1, name='conv0')
            n = batch_norm(n, is_train=is_train, name='bn_0')
            n = ReluLayer(n, name='reul0')

            for idx, nc in enumerate(pyramid_channels):
                encoder_layers.append(
                    n
                )  # append n0, n1, n2, n3, n4 (but without n5)to the layers list
                print('encoder %d : %s' % (idx, str(n.outputs.get_shape())))
                n = conv2d(n,
                           n_filter=nc,
                           filter_size=3,
                           stride=1,
                           name='conv%d' % (idx + 1))
                n = batch_norm(n, is_train=is_train, name='bn%d' % (idx + 1))
                n = ReluLayer(n, name='reul%d' % (idx + 1))
                n1 = PadDepth(encoder_layers[-1], desired_channels=nc)
                n = merge([n, n1], name='add%d' % (idx + 1))
                n = tl.layers.MaxPool2d(n,
                                        filter_size=(3, 3),
                                        strides=(2, 2),
                                        name='maxplool%d' % (idx + 1))

        nl = len(encoder_layers)
        with tf.variable_scope('decoder'):
            _, h, w, _ = encoder_layers[-1].outputs.shape.as_list()
            n = UpSampling2dLayer(n,
                                  size=(h, w),
                                  is_scale=False,
                                  name='upsamplimg')

            for idx in range(nl - 1, -1, -1):  # idx = 4,3,2,1,0
                if idx > 0:
                    _, h, w, _ = encoder_layers[idx -
                                                1].outputs.shape.as_list()
                    out_size = (h, w)
                    out_channels = pyramid_channels[idx - 1]
                else:
                    #out_size = None
                    out_channels = n_slices

                print('decoder %d : %s' % (idx, str(n.outputs.get_shape())))
                n = ConcatLayer([encoder_layers[idx], n],
                                concat_dim=-1,
                                name='concat%d' % (nl - idx))
                n = conv2d(n,
                           out_channels,
                           filter_size=3,
                           stride=1,
                           name='conv%d' % (nl - idx + 1))
                n = ReluLayer(n, name='relu%d' % (nl - idx + 1))
                n = batch_norm(n,
                               is_train=is_train,
                               name='bn%d' % (nl - idx + 1))
                #n = UpConv(n, 512, filter_size=4, factor=2, name='upconv2')
                n = UpSampling2dLayer(n,
                                      size=out_size,
                                      is_scale=False,
                                      name='upsamplimg%d' % (nl - idx + 1))

                #n = DropoutLayer(n, keep=0.5, is_fix=True, is_train=is_train, name='dropout1')

            if n.outputs.shape[1] != output_size[0]:
                n = UpSampling2dLayer(n,
                                      size=output_size,
                                      is_scale=False,
                                      name='resize_final')
            #n = conv2d(n, n_slices, filter_size=3, stride=1,name='conv_final' )
            n.outputs = tf.tanh(n.outputs)
            #n.outputs = tf.nn.relu(n.outputs)
            #n = conv2d(n, n_filter=n_slices, filter_size=3, act=tf.tanh, name='out')
            return n