コード例 #1
0
ファイル: utils.py プロジェクト: eglrp/U-Net-2
def u_net_2d_64_1024_deconv(x, n_out=2):
    from tensorlayer.layers import InputLayer, Conv2d, MaxPool2d, DeConv2d, ConcatLayer
    nx = int(x._shape[1])
    ny = int(x._shape[2])
    nz = int(x._shape[3])
    print(" * Input: size of image: %d %d %d" % (nx, ny, nz))

    w_init = tf.truncated_normal_initializer(stddev=0.01)
    b_init = tf.constant_initializer(value=0.0)
    inputs = InputLayer(x, name='inputs')

    conv1 = Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1')
    conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2')
    pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1')

    conv2 = Conv2d(pool1, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1')
    conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2')
    pool2 = MaxPool2d(conv2, (2, 2), padding='SAME', name='pool2')

    conv3 = Conv2d(pool2, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_1')
    conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_2')
    pool3 = MaxPool2d(conv3, (2, 2), padding='SAME', name='pool3')

    conv4 = Conv2d(pool3, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_1')
    conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_2')
    pool4 = MaxPool2d(conv4, (2, 2), padding='SAME', name='pool4')

    conv5 = Conv2d(pool4, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_1')
    conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_2')

    print(" * After conv: %s" % conv5.outputs)

    up4 = DeConv2d(conv5, 512, (3, 3), out_size = (nx/8, ny/8), strides = (2, 2),
                                padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv4')
    up4 = ConcatLayer([up4, conv4], concat_dim=3, name='concat4')
    conv4 = Conv2d(up4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv4_1')
    conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv4_2')

    up3 = DeConv2d(conv4, 256, (3, 3), out_size = (nx/4, ny/4), strides = (2, 2),
                                padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv3')
    up3 = ConcatLayer([up3, conv3], concat_dim=3, name='concat3')
    conv3 = Conv2d(up3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv3_1')
    conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv3_2')

    up2 = DeConv2d(conv3, 128, (3, 3), out_size = (nx/2, ny/2), strides = (2, 2),
                                padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv2')
    up2 = ConcatLayer([up2, conv2] ,concat_dim=3, name='concat2')
    conv2 = Conv2d(up2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv2_1')
    conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv2_2')

    up1 = DeConv2d(conv2, 64, (3, 3), out_size = (nx/1, ny/1), strides = (2, 2),
                                padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv1')
    up1 = ConcatLayer([up1, conv1] ,concat_dim=3, name='concat1')
    conv1 = Conv2d(up1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv1_1')
    conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv1_2')

    conv1 = Conv2d(conv1, n_out, (1, 1), act=None, name='uconv1')
    print(" * Output: %s" % conv1.outputs)
    outputs = tl.act.pixel_wise_softmax(conv1.outputs)
    return conv1, outputs
コード例 #2
0
    def __get_network__(self,
                        encode_seq,
                        decode_seq,
                        query_decode_seq,
                        is_train=True,
                        reuse=False):

        w_init = tf.random_normal_initializer(stddev=0.02)
        g_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope(self.model_name, reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            net_encode_traffic = InputLayer(encode_seq, name='in_root_net')
            net_encode_query = InputLayer(self.query_x, name="in_query_net")
            net_encode = ConcatLayer([net_encode_traffic, net_encode_query],
                                     concat_dim=-1,
                                     name="encode")

            net_decode_traffic = InputLayer(decode_seq, name="decode_root")
            net_decode_query = InputLayer(query_decode_seq,
                                          name="decode_query_net")
            net_decode = ConcatLayer([net_decode_traffic, net_decode_query],
                                     concat_dim=-1,
                                     name="decode")

            net_rnn = Seq2Seq(
                net_encode,
                net_decode,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden=config.dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_encode.outputs),
                decode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_decode.outputs),
                initial_state_encode=None,
                # dropout=(0.8 if is_train else None),
                dropout=None,
                n_layer=1,
                return_seq_2d=True,
                name='seq2seq')
            # net_out = DenseLayer(net_rnn, n_units=64, act=tf.identity, name='dense1')
            net_out = DenseLayer(net_rnn,
                                 n_units=1,
                                 act=tf.identity,
                                 name='dense2')
            if is_train:
                net_out = ReshapeLayer(
                    net_out, (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_out = ReshapeLayer(net_out, (config.batch_size, 1, 1),
                                       name="reshape_out")

            self.net_rnn = net_rnn

            return net_out
コード例 #3
0
def stage2(cnn, b1, b2, n_pos, maskInput1, maskInput2, is_train, scope_name):
    """Define the archuecture of stage 2 and so on."""
    with tf.variable_scope(scope_name):
        net = ConcatLayer([cnn, b1, b2], -1, name='concat')
        with tf.variable_scope("branch1"):
            b1 = Conv2d(net, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', W_init=W_init, b_init=b_init, name='c1')
            b1 = Conv2d(b1, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', W_init=W_init, b_init=b_init, name='c2')
            b1 = Conv2d(b1, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', W_init=W_init, b_init=b_init, name='c3')
            b1 = Conv2d(b1, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', W_init=W_init, b_init=b_init, name='c4')
            b1 = Conv2d(b1, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', W_init=W_init, b_init=b_init, name='c5')
            b1 = Conv2d(b1, 128, (1, 1), (1, 1), tf.nn.relu, 'VALID', W_init=W_init, b_init=b_init, name='c6')
            b1 = Conv2d(b1, n_pos, (1, 1), (1, 1), None, 'VALID', W_init=W_init, b_init=b_init, name='conf')
            if is_train:
                b1.outputs = b1.outputs * maskInput1
        with tf.variable_scope("branch2"):
            b2 = Conv2d(net, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', W_init=W_init, b_init=b_init, name='c1')
            b2 = Conv2d(b2, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', W_init=W_init, b_init=b_init, name='c2')
            b2 = Conv2d(b2, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', W_init=W_init, b_init=b_init, name='c3')
            b2 = Conv2d(b2, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', W_init=W_init, b_init=b_init, name='c4')
            b2 = Conv2d(b2, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', W_init=W_init, b_init=b_init, name='c5')
            b2 = Conv2d(b2, 128, (1, 1), (1, 1), tf.nn.relu, 'VALID', W_init=W_init, b_init=b_init, name='c6')
            b2 = Conv2d(b2, n_pos * 2, (1, 1), (1, 1), None, 'VALID', W_init=W_init, b_init=b_init, name='pafs')
            if is_train:
                b2.outputs = b2.outputs * maskInput2
    return b1, b2
コード例 #4
0
ファイル: models_vgg.py プロジェクト: tommarz/openpose-plus
    def stage2(cnn, b1, b2, n_pos, maskInput1, maskInput2, is_train, scope_name):
        """Define the archuecture of stage 2 and so on."""

        with tf.variable_scope(scope_name):
            if data_format == 'channels_last':
                concat_dim = -1
            elif data_format == 'channels_first':
                concat_dim = 1
            else:
                raise ValueError('invalid data_format: %s' % data_format)
            net = ConcatLayer([cnn, b1, b2], concat_dim, name='concat')

            with tf.variable_scope("branch1"):
                b1 = _conv2d(net, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', 'c1')
                b1 = _conv2d(b1, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', 'c2')
                b1 = _conv2d(b1, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', 'c3')
                b1 = _conv2d(b1, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', 'c4')
                b1 = _conv2d(b1, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', 'c5')
                b1 = _conv2d(b1, 128, (1, 1), (1, 1), tf.nn.relu, 'VALID', 'c6')
                b1 = _conv2d(b1, n_pos, (1, 1), (1, 1), None, 'VALID', 'conf')
                if is_train:
                    b1.outputs = b1.outputs * maskInput1
            with tf.variable_scope("branch2"):
                b2 = _conv2d(net, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', 'c1')
                b2 = _conv2d(b2, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', 'c2')
                b2 = _conv2d(b2, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', 'c3')
                b2 = _conv2d(b2, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', 'c4')
                b2 = _conv2d(b2, 128, (7, 7), (1, 1), tf.nn.relu, 'SAME', 'c5')
                b2 = _conv2d(b2, 128, (1, 1), (1, 1), tf.nn.relu, 'VALID', 'c6')
                b2 = _conv2d(b2, n_pos * 2, (1, 1), (1, 1), None, 'VALID', 'pafs')
                if is_train:
                    b2.outputs = b2.outputs * maskInput2
        return b1, b2
コード例 #5
0
 def concat(inputs, name):
     if data_format == 'channels_last':
         concat_dim = -1
     elif data_format == 'channels_first':
         concat_dim = 1
     else:
         raise ValueError('invalid data_format: %s' % data_format)
     return ConcatLayer(inputs, concat_dim, name=name)
コード例 #6
0
ファイル: shufflenetv1.py プロジェクト: Waikkii/TL_ShuffleNet
 def group_conv(cls, x, groups, n_filter, filter_size=(3, 3), strides=(1, 1), name='_groupconv'):
     with tf.variable_scope(name):
         in_channels = x.outputs.get_shape()[3]
         #print(in_channels)
         gc_list = []
         assert n_filter % groups == 0,'groups数必须可以整除组数!'
         for i in range(groups):
             x_group = LambdaLayer(prev_layer=x, fn=lambda z: z[:, :, :, i * in_channels: (i + 1) * in_channels])
             #print("xgroup"+str(i), x_group.outputs.get_shape())
             gc_list.append(Conv2d(x_group, n_filter=n_filter//groups, filter_size=filter_size, strides=strides,
                                   padding='SAME', name=name+'_Conv2d'+str(i)))
             #print(gc_list[i].outputs.get_shape())
         return ConcatLayer(gc_list)
コード例 #7
0
def Flow(input_coordinates, h_res, w_res, num_out, ngf, min_, max_):

    # we calculated the amount of padding for each layer and
    # the total number of upsampling in each dimension to output the resolution h_res*w_res.
    padx, pady, up_x, up_y = upsampling_factor_padding(h_res, w_res)

    num_l = len(padx)
    layer_specs = [
        ngf * 16, ngf * 16, ngf * 16, ngf * 8, ngf * 8, ngf * 8, ngf * 4
    ]
    layer_specs.extend([ngf * 4] * (num_l - len(layer_specs)))

    # coordconv layer
    coordconv = tf.constant(
        [[[[min_, min_], [max_, min_]], [[min_, max_], [max_, max_]]]],
        dtype=tf.float32)

    coordconv_tl = InputLayer(
        tf.tile(coordconv, [input_coordinates.shape[0], 1, 1, 1]))
    output = InputLayer(input_coordinates)

    for num, num_filter in enumerate(layer_specs):

        with tf.variable_scope("layer_%d" % (num)):

            upsampled = UpSampling2dLayer(output, (up_y[num], up_x[num]), True,
                                          0, True)
            if num == 0:
                padding = [[0, 0], [0, pady[num]], [0, padx[num]], [0, 0]]
                output = conv_layer(upsampled, num_filter, padding, 1)
                coordconv_tl = PadLayer(coordconv_tl, padding, "REFLECT")
                # concatenating the coordconv layer
                output = ConcatLayer([output, coordconv_tl], -1)
            else:
                padding = [[0, 0], [1, 1 + pady[num]], [1, 1 + padx[num]],
                           [0, 0]]
                output = conv_layer(upsampled, num_filter, padding, 3)

    with tf.variable_scope("outputs_flows"):
        flows = gen_flow(output, num_out)

    return flows
コード例 #8
0
ファイル: shufflenetv1.py プロジェクト: Waikkii/TL_ShuffleNet
 def shufflenet_unit(self, inputs, n_filter, filter_size, strides, groups, stage, bottleneck_ratio=0.25, name='_shufflenetunit'):
     in_channels = inputs.outputs.get_shape()[3]
     #print("input", inputs.outputs.get_shape())
     bottleneck_channels = int(n_filter * bottleneck_ratio)
     if stage == 2:
         x = Conv2d(inputs, n_filter=bottleneck_channels, filter_size=filter_size, strides=(1, 1),
                    padding='SAME', name=name+'_Conv2d1')
         #print("conv", x.outputs.get_shape())
     else:
         x = self.group_conv(inputs, groups, bottleneck_channels, (1, 1), (1, 1), name=name+'_groupconv1')
     x = BatchNormLayer(x, act=tf.nn.leaky_relu, name=name+'_Batch1')
     #print("batch", x.outputs.get_shape())
     x = self.channel_shuffle(x, groups, name=name+'_channelshuffle')
     #print("shuffle", x.outputs.get_shape())
     #x = PadLayer(x, [[0, 0], [4, 4], [4, 4], [0, 0]], "CONSTANT", name=name+'_pad')
     #print("pad", x.outputs.get_shape())
     x = DepthwiseConv2d(x, shape=filter_size, strides=strides, depth_multiplier=1,
                         padding='SAME', name=name+'_DepthwiseConv2d')
     #print("deep", x.outputs.get_shape())
     #x = Conv2d(x, n_filter=in_channels, filter_size=filter_size, strides=(1, 1),padding='SAME', name=name+'_Conv2d2')
     #print("conv", x.outputs.get_shape())
     x = BatchNormLayer(x, name=name+'_Batch2')
     #print("deep_batch", x.outputs.get_shape())
     if strides == (2, 2):
         x = self.group_conv(x, groups, n_filter - in_channels, (1, 1), (1, 1), name=name+'_groupconv2')#n_filter - in_channels ??????????
         #print("gonv", x.outputs.get_shape())
         x = BatchNormLayer(x, name=name+'_Batch3')
         #print("batch", x.outputs.get_shape())
         avg = MeanPool2d(inputs, filter_size=(3, 3), strides=(2, 2), padding='SAME', name=name+'_AvePool')
         #print("avg", avg.outputs.get_shape())
         x = ConcatLayer([x, avg], concat_dim=-1, name=name+'_Concat')
         #print("x1out", x.outputs.get_shape())
     else:
         x = self.group_conv(x, groups, n_filter, (1, 1), (1, 1), name=name+'_groupconv3')
         #print("x", x.outputs.get_shape())
         x = BatchNormLayer(x, name=name+'_Batch4')
         if x.outputs.get_shape()[3] != inputs.outputs.get_shape()[3]:
             x = Conv2d(x, n_filter=in_channels, filter_size=filter_size, strides=(1, 1),
                        padding='SAME', name=name+'_Conv2d2')
         x = ElementwiseLayer([x, inputs], combine_fn=tf.add, name=name+'_Elementwise')
     return x
コード例 #9
0
def stage(cnn, b1, b2, n_pos, is_train, name='stageX'):
    """Define the archuecture of stage 2 to 6."""
    with tf.variable_scope(name):
        net = ConcatLayer([cnn, b1, b2], -1, name='concat')
        with tf.variable_scope("branch1"):
            b1 = depthwise_conv_block(net, 128, filter_size=(7, 7), is_train=is_train, name="c1")
            b1 = depthwise_conv_block(b1, 128, filter_size=(7, 7), is_train=is_train, name="c2")
            b1 = depthwise_conv_block(b1, 128, filter_size=(7, 7), is_train=is_train, name="c3")
            b1 = depthwise_conv_block(b1, 128, filter_size=(7, 7), is_train=is_train, name="c4")
            b1 = depthwise_conv_block(b1, 128, filter_size=(7, 7), is_train=is_train, name="c5")
            b1 = depthwise_conv_block(b1, 128, filter_size=(1, 1), is_train=is_train, name="c6")
            b1 = Conv2d(b1, n_pos, (1, 1), (1, 1), None, 'VALID', W_init=W_init, b_init=b_init2, name='conf')
        with tf.variable_scope("branch2"):
            b2 = depthwise_conv_block(net, 128, filter_size=(7, 7), is_train=is_train, name="c1")
            b2 = depthwise_conv_block(b2, 128, filter_size=(7, 7), is_train=is_train, name="c2")
            b2 = depthwise_conv_block(b2, 128, filter_size=(7, 7), is_train=is_train, name="c3")
            b2 = depthwise_conv_block(b2, 128, filter_size=(7, 7), is_train=is_train, name="c4")
            b2 = depthwise_conv_block(b2, 128, filter_size=(7, 7), is_train=is_train, name="c5")
            b2 = depthwise_conv_block(b2, 128, filter_size=(1, 1), is_train=is_train, name="c6")
            b2 = Conv2d(b2, 38, (1, 1), (1, 1), None, 'VALID', W_init=W_init, b_init=b_init2, name='pafs')
    return b1, b2
コード例 #10
0
    def __get_network__(self,
                        model_name,
                        encode_seqs,
                        reuse=False,
                        is_train=True):
        # the architecture of networks
        with tf.variable_scope(model_name, reuse=reuse):
            # tl.layers.set_name_reuse(reuse)
            net_in = InputLayer(inputs=encode_seqs, name="in_word_embed")

            filter_length = [3, 4, 5]
            n_filter = 200
            net_cnn_list = list()
            for fsz in filter_length:
                net_cnn = Conv1d(net_in,
                                 n_filter=n_filter,
                                 filter_size=fsz,
                                 stride=1,
                                 act=tf.nn.relu,
                                 name="cnn%d" % fsz)
                net_cnn.outputs = tf.reduce_max(net_cnn.outputs,
                                                axis=1,
                                                name="global_maxpool%d" % fsz)
                net_cnn_list.append(net_cnn)

            net_cnn = ConcatLayer(net_cnn_list, concat_dim=-1)
            net_fc = DenseLayer(net_cnn,
                                n_units=300,
                                act=tf.nn.relu,
                                name="fc_1")

            net_fc = DenseLayer(net_fc,
                                n_units=1,
                                act=tf.nn.sigmoid,
                                name="fc_2")
        return net_fc, net_cnn
    def __get_network_rnnfc__(self, model_name, encode_seqs, class_label_seqs, reuse=False, is_train=True):
        with tf.variable_scope(model_name, reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_word_embed = InputLayer(
                inputs=encode_seqs,
                name="in_word_embed"
            )

            net_class_label_embed = InputLayer(
                inputs=class_label_seqs,
                name="in_class_label_embed"
            )

            net_class_label_embed.outputs = tf.slice(
                net_class_label_embed.outputs,
                [0, 0, 0],
                [config.batch_size, 1, self.word_embedding_dim],
                name="slice_word"
            )

            net_class_label_embed.outputs = tf.squeeze(
                net_class_label_embed.outputs,
                name="squeeze_word"
            )

            net_in = ConcatLayer(
                [net_word_embed],
                concat_dim=-1,
                name='concat_vw'
            )

            net_rnn = RNNLayer(
                net_in,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden = 512,
                n_steps = self.max_length,
                return_last = True,
                name = 'lstm'
            )

            net_fc = ConcatLayer([net_rnn, net_class_label_embed], concat_dim=-1)

            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop1')

            net_fc = DenseLayer(
                net_fc,
                n_units=400,
                act=tf.nn.relu,
                name="fc_1"
            )

            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop2')

            # dbpedia
            net_fc = DenseLayer(
                net_fc,
                n_units=100,
                act=tf.nn.relu,
                name="fc_2"
            )
            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop3')

            net_fc = DenseLayer(
                net_fc,
                n_units=1,
                act=tf.nn.sigmoid,
                name="fc_3"
            )
        return net_fc
    def __get_network_cnnfc__(self, model_name, encode_seqs, class_label_seqs, reuse=False, is_train=True):
        with tf.variable_scope(model_name, reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_word_embed = InputLayer(
                inputs=encode_seqs,
                name="in_word_embed"
            )

            net_class_label_embed = InputLayer(
                inputs=class_label_seqs,
                name="in_class_label_embed"
            )

            net_class_label_embed.outputs = tf.slice(
                net_class_label_embed.outputs,
                [0, 0, 0],
                [config.batch_size, 1, self.word_embedding_dim],
                name="slice_word"
            )

            net_class_label_embed.outputs = tf.squeeze(
                net_class_label_embed.outputs,
                name="squeeze_word"
            )

            net_in = ConcatLayer(
                [net_word_embed],
                concat_dim=-1,
                name='concat_vw'
            )

            filter_length = [2, 4, 8]
            # dbpedia
            n_filter = 600
            # n_filter = 200

            net_cnn_list = list()

            for fsz in filter_length:

                net_cnn = Conv1d(
                    net_in,
                    n_filter=n_filter,
                    filter_size=fsz,
                    stride=1,
                    act=tf.nn.relu,
                    name="cnn%d" % fsz
                )
                net_cnn.outputs = tf.reduce_max(net_cnn.outputs, axis=1, name="global_maxpool%d" % fsz)
                net_cnn_list.append(net_cnn)

            net_cnn = ConcatLayer(net_cnn_list + [net_class_label_embed], concat_dim=-1)

            net_fc = DropoutLayer(net_cnn, keep=0.5, is_fix=True, is_train=is_train, name='drop1')

            net_fc = DenseLayer(
                net_fc,
                n_units=400,
                act=tf.nn.relu,
                name="fc_1"
            )

            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop2')

            # dbpedia
            net_fc = DenseLayer(
                net_fc,
                n_units=100,
                act=tf.nn.relu,
                name="fc_2"
            )
            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop3')

            net_fc = DenseLayer(
                net_fc,
                n_units=1,
                act=tf.nn.sigmoid,
                name="fc_3"
            )
        return net_fc
コード例 #13
0
ファイル: utils.py プロジェクト: eglrp/U-Net-2
def u_net_2d_32_1024_upsam(x, n_out=2):
    """
    https://github.com/jocicmarko/ultrasound-nerve-segmentation
    """
    from tensorlayer.layers import InputLayer, Conv2d, MaxPool2d, DeConv2d, ConcatLayer
    batch_size = int(x._shape[0])
    nx = int(x._shape[1])
    ny = int(x._shape[2])
    nz = int(x._shape[3])
    print(" * Input: size of image: %d %d %d" % (nx, ny, nz))
    ## define initializer
    w_init = tf.truncated_normal_initializer(stddev=0.01)
    b_init = tf.constant_initializer(value=0.0)
    inputs = InputLayer(x, name='inputs')

    conv1 = Conv2d(inputs, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1')
    conv1 = Conv2d(conv1, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2')
    pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1')

    conv2 = Conv2d(pool1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1')
    conv2 = Conv2d(conv2, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2')
    pool2 = MaxPool2d(conv2, (2,2), padding='SAME', name='pool2')

    conv3 = Conv2d(pool2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_1')
    conv3 = Conv2d(conv3, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_2')
    pool3 = MaxPool2d(conv3, (2, 2), padding='SAME', name='pool3')

    conv4 = Conv2d(pool3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_1')
    conv4 = Conv2d(conv4, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_2')
    pool4 = MaxPool2d(conv4, (2, 2), padding='SAME', name='pool4')

    conv5 = Conv2d(pool4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_1')
    conv5 = Conv2d(conv5, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_2')
    pool5 = MaxPool2d(conv5, (2, 2), padding='SAME', name='pool6')

    # hao add
    conv6 = Conv2d(pool5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_1')
    conv6 = Conv2d(conv6, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_2')

    print(" * After conv: %s" % conv6.outputs)

    # hao add
    up7 = UpSampling2dLayer(conv6, (15, 15), is_scale=False, method=1, name='up7')
    up7 =  ConcatLayer([up7, conv5], concat_dim=3, name='concat7')
    conv7 = Conv2d(up7, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv7_1')
    conv7 = Conv2d(conv7, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv7_2')

    # print(nx/8,ny/8) # 30 30
    up8 = UpSampling2dLayer(conv7, (2, 2), method=1, name='up8')
    up8 = ConcatLayer([up8, conv4], concat_dim=3, name='concat8')
    conv8 = Conv2d(up8, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv8_1')
    conv8 = Conv2d(conv8, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv8_2')

    up9 = UpSampling2dLayer(conv8, (2, 2), method=1, name='up9')
    up9 = ConcatLayer([up9, conv3] ,concat_dim=3, name='concat9')
    conv9 = Conv2d(up9, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv9_1')
    conv9 = Conv2d(conv9, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv9_2')

    up10 = UpSampling2dLayer(conv9, (2, 2), method=1, name='up10')
    up10 = ConcatLayer([up10, conv2] ,concat_dim=3, name='concat10')
    conv10 = Conv2d(up10, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv10_1')
    conv10 = Conv2d(conv10, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv10_2')

    up11 = UpSampling2dLayer(conv10, (2, 2), method=1, name='up11')
    up11 = ConcatLayer([up11, conv1] ,concat_dim=3, name='concat11')
    conv11 = Conv2d(up11, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv11_1')
    conv11 = Conv2d(conv11, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv11_2')

    conv12 = Conv2d(conv11, n_out, (1, 1), act=None, name='conv12')
    print(" * Output: %s" % conv12.outputs)
    outputs = tl.act.pixel_wise_softmax(conv12.outputs)
    return conv10, outputs
コード例 #14
0
    def __get_network__(self,
                        encode_seq,
                        neighbour_seq,
                        decode_seq,
                        is_train=True,
                        reuse=False):
        w_init = tf.random_normal_initializer(stddev=0.02)
        g_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope(self.model_name, reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            inputs_x_root = InputLayer(encode_seq, name='in_root')
            inputs_x_nbor = InputLayer(neighbour_seq, name="in_neighbour")

            # encoding neighbour graph information
            n = ReshapeLayer(inputs_x_nbor,
                             (config.batch_size * config.in_seq_length,
                              config.num_neighbour), "reshape1")
            n.outputs = tf.expand_dims(n.outputs, axis=-1)
            n = Conv1d(n,
                       4,
                       4,
                       1,
                       act=tf.identity,
                       padding='SAME',
                       W_init=w_init,
                       name='conv1')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=g_init,
                               name='bn1')
            n = MaxPool1d(n, 2, 2, padding='valid', name='maxpool1')
            n = FlattenLayer(n, name="flatten1")
            n = ReshapeLayer(n, (config.batch_size, config.in_seq_length, -1),
                             name="reshape1_back")

            net_encode = ConcatLayer([inputs_x_root, n],
                                     concat_dim=-1,
                                     name="encode")
            net_decode = InputLayer(decode_seq, name="decode")

            net_rnn = Seq2Seq(
                net_encode,
                net_decode,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden=config.dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_encode.outputs),
                decode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_decode.outputs),
                initial_state_encode=None,
                # dropout=(0.8 if is_train else None),
                dropout=None,
                n_layer=1,
                return_seq_2d=True,
                name='seq2seq')
            # net_out = DenseLayer(net_rnn, n_units=64, act=tf.identity, name='dense1')
            net_out = DenseLayer(net_rnn,
                                 n_units=1,
                                 act=tf.identity,
                                 name='dense2')
            if is_train:
                net_out = ReshapeLayer(
                    net_out, (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_out = ReshapeLayer(net_out, (config.batch_size, 1, 1),
                                       name="reshape_out")

            self.net_rnn = net_rnn

            return net_out
コード例 #15
0
def model(x,
          n_pos,
          mask_miss1,
          mask_miss2,
          is_train=False,
          train_bn=False,
          reuse=None,
          data_format='channels_last'):  # hao25
    def depthwise_conv_block(n,
                             n_filter,
                             filter_size=(3, 3),
                             strides=(1, 1),
                             name="depth_block"):
        with tf.variable_scope(name):
            n = DepthwiseConv2d(n,
                                filter_size,
                                strides,
                                W_init=W_init,
                                b_init=None,
                                name='depthwise')
            n = BatchNormLayer(n,
                               decay=decay,
                               act=tf.nn.relu6,
                               is_train=train_bn,
                               name='batchnorm1')
            n = Conv2d(n,
                       n_filter, (1, 1), (1, 1),
                       W_init=W_init,
                       b_init=None,
                       name='conv')
            n = BatchNormLayer(n,
                               decay=decay,
                               act=tf.nn.relu6,
                               is_train=train_bn,
                               name='batchnorm2')
        return n

    def stage(cnn,
              b1,
              b2,
              n_pos,
              maskInput1,
              maskInput2,
              is_train,
              name='stageX'):
        """Define the archuecture of stage 2 to 6."""
        with tf.variable_scope(name):
            net = ConcatLayer([cnn, b1, b2], -1, name='concat')
            with tf.variable_scope("branch1"):
                b1 = depthwise_conv_block(net,
                                          128,
                                          filter_size=(7, 7),
                                          name="c1")
                b1 = depthwise_conv_block(b1,
                                          128,
                                          filter_size=(7, 7),
                                          name="c2")
                b1 = depthwise_conv_block(b1,
                                          128,
                                          filter_size=(7, 7),
                                          name="c3")
                b1 = depthwise_conv_block(b1,
                                          128,
                                          filter_size=(7, 7),
                                          name="c4")
                b1 = depthwise_conv_block(b1,
                                          128,
                                          filter_size=(7, 7),
                                          name="c5")
                b1 = depthwise_conv_block(b1,
                                          128,
                                          filter_size=(1, 1),
                                          name="c6")
                b1 = Conv2d(b1,
                            n_pos, (1, 1), (1, 1),
                            None,
                            'VALID',
                            W_init=W_init,
                            b_init=b_init2,
                            name='conf')
                if is_train:
                    b1.outputs = b1.outputs * maskInput1
            with tf.variable_scope("branch2"):
                b2 = depthwise_conv_block(net,
                                          128,
                                          filter_size=(7, 7),
                                          name="c1")
                b2 = depthwise_conv_block(b2,
                                          128,
                                          filter_size=(7, 7),
                                          name="c2")
                b2 = depthwise_conv_block(b2,
                                          128,
                                          filter_size=(7, 7),
                                          name="c3")
                b2 = depthwise_conv_block(b2,
                                          128,
                                          filter_size=(7, 7),
                                          name="c4")
                b2 = depthwise_conv_block(b2,
                                          128,
                                          filter_size=(7, 7),
                                          name="c5")
                b2 = depthwise_conv_block(b2,
                                          128,
                                          filter_size=(1, 1),
                                          name="c6")
                b2 = Conv2d(b2,
                            38, (1, 1), (1, 1),
                            None,
                            'VALID',
                            W_init=W_init,
                            b_init=b_init2,
                            name='pafs')
                if is_train:
                    b2.outputs = b2.outputs * maskInput2
        return b1, b2

    if data_format != 'channels_last':
        # TODO: support NCHW
        print('data_format=%s is ignored' % data_format)

    b1_list = []
    b2_list = []
    with tf.variable_scope('model', reuse):
        x = x - 0.5
        n = InputLayer(x, name='in')
        n = Conv2d(n,
                   32, (3, 3), (1, 1),
                   None,
                   'SAME',
                   W_init=W_init,
                   b_init=b_init,
                   name='conv1_1')
        n = BatchNormLayer(n,
                           decay=decay,
                           is_train=train_bn,
                           act=tf.nn.relu,
                           name='bn1')
        n = depthwise_conv_block(n, 64, name="conv1_depth1")

        n = depthwise_conv_block(n, 128, strides=(2, 2), name="conv2_depth1")
        n = depthwise_conv_block(n, 128, name="conv2_depth2")
        n1 = n

        n = depthwise_conv_block(n, 256, strides=(2, 2), name="conv3_depth1")
        n = depthwise_conv_block(n, 256, name="conv3_depth2")
        n2 = n

        n = depthwise_conv_block(n, 512, strides=(2, 2), name="conv4_depth1")
        n = depthwise_conv_block(n, 512, name="conv4_depth2")
        n = depthwise_conv_block(n, 512, name="conv4_depth3")
        n = depthwise_conv_block(n, 512, name="conv4_depth4")
        cnn = depthwise_conv_block(n, 512, name="conv4_depth5")

        ## low-level features
        # n1 = MaxPool2d(n1, (2, 2), (2, 2), 'same', name='maxpool2d')
        n1 = depthwise_conv_block(n1, 128, strides=(2, 2), name="n1_down1")
        n1 = depthwise_conv_block(n1, 128, strides=(2, 2), name="n1_down2")
        ## mid-level features
        n2 = depthwise_conv_block(n2, 256, strides=(2, 2), name="n2_down1")
        ## combine features
        cnn = ConcatLayer([cnn, n1, n2], -1, name='cancat')

        ## stage1
        with tf.variable_scope("stage1/branch1"):
            b1 = depthwise_conv_block(cnn, 128, filter_size=(7, 7), name="c1")
            b1 = depthwise_conv_block(b1, 128, filter_size=(7, 7), name="c2")
            b1 = depthwise_conv_block(b1, 128, filter_size=(7, 7), name="c3")
            b1 = depthwise_conv_block(b1, 512, filter_size=(1, 1), name="c4")
            b1 = Conv2d(b1,
                        n_pos, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=W_init,
                        b_init=b_init,
                        name='confs')

            if is_train:
                b1.outputs = b1.outputs * mask_miss1
        with tf.variable_scope("stage1/branch2"):
            b2 = depthwise_conv_block(cnn, 128, filter_size=(7, 7), name="c1")
            b2 = depthwise_conv_block(b2, 128, filter_size=(7, 7), name="c2")
            b2 = depthwise_conv_block(b2, 128, filter_size=(7, 7), name="c3")
            b2 = depthwise_conv_block(b2, 512, filter_size=(1, 1), name="c4")
            b2 = Conv2d(b2,
                        38, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=W_init,
                        b_init=b_init2,
                        name='pafs')
            if is_train:
                b2.outputs = b2.outputs * mask_miss2
            b1_list.append(b1)
            b2_list.append(b2)

        ## other stages
        # for i in range(2, 7): # [2, 3, 4, 5, 6]
        # for i in [5, 6]:
        for i in [3, 4, 5, 6]:
            b1, b2 = stage(cnn,
                           b1_list[-1],
                           b2_list[-1],
                           n_pos,
                           mask_miss1,
                           mask_miss2,
                           is_train,
                           name='stage%d' % i)
            b1_list.append(b1)
            b2_list.append(b2)
        net = tl.layers.merge_networks([b1_list[-1], b2_list[-1]])
    return cnn, b1_list, b2_list, net
コード例 #16
0
    def __get_network__(self,
                        model_name,
                        encode_seqs,
                        reuse=False,
                        is_train=True):
        with tf.variable_scope(model_name, reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_in = InputLayer(inputs=encode_seqs, name="in_word_embed")
            '''
            net_in = ReshapeLayer(
                net_in,
                (-1, self.max_length, self.word_embedding_dim, 1),
                name="reshape"
            )
            '''

            filter_length = [2, 4, 8]
            n_filter = 600

            net_cnn_list = list()

            for fsz in filter_length:

                net_cnn = Conv1d(net_in,
                                 n_filter=n_filter,
                                 filter_size=fsz,
                                 stride=1,
                                 act=tf.nn.relu,
                                 name="cnn%d" % fsz)
                net_cnn.outputs = tf.reduce_max(net_cnn.outputs,
                                                axis=1,
                                                name="global_maxpool%d" % fsz)
                net_cnn_list.append(net_cnn)

            net_cnn = ConcatLayer(net_cnn_list, concat_dim=-1)
            '''
            net_cnn = Conv1d(net_in, 400, 8, act=tf.nn.relu, name="cnn_1")
            net_cnn = MaxPool1d(net_cnn, 2, 2, padding="valid", name="maxpool_1")

            net_cnn = Conv1d(net_cnn, 600, 4, act=tf.nn.relu, name="cnn_2")
            net_cnn = MaxPool1d(net_cnn, 2, 2, padding="valid", name="maxpool_2")

            net_cnn = Conv1d(net_cnn, 600, 2, act=tf.nn.relu, name="cnn_3")
            net_cnn = MaxPool1d(net_cnn, 2, 2, padding="valid", name="maxpool_3")

            net_cnn = FlattenLayer(net_cnn, name="flatten")
            '''
            '''
            net_cnn = Conv2d(net_in, 64, (8, 8), act=tf.nn.relu, name="cnn_1")
            net_cnn = MaxPool2d(net_cnn, (2, 2), padding="valid", name="maxpool_1")

            net_cnn = Conv2d(net_cnn, 32, (4, 4), act=tf.nn.relu, name="cnn_2")
            net_cnn = MaxPool2d(net_cnn, (2, 4), padding="valid", name="maxpool_2")

            net_cnn = Conv2d(net_cnn, 8, (2, 2), act=tf.nn.relu, name="cnn_3")
            net_cnn = MaxPool2d(net_cnn, (2, 2), padding="valid", name="maxpool_3")

            net_cnn = FlattenLayer(net_cnn, name="flatten")
            '''

            net_cnn = DropoutLayer(net_cnn,
                                   keep=0.5,
                                   is_fix=True,
                                   is_train=is_train,
                                   name='drop1')

            net_fc = DenseLayer(net_cnn,
                                n_units=400,
                                act=tf.nn.relu,
                                name="fc_1")

            net_fc = DropoutLayer(net_fc,
                                  keep=0.5,
                                  is_fix=True,
                                  is_train=is_train,
                                  name='drop2')

            net_fc = DenseLayer(net_fc,
                                n_units=100,
                                act=tf.nn.relu,
                                name="fc_2")

            net_fc = DropoutLayer(net_fc,
                                  keep=0.5,
                                  is_fix=True,
                                  is_train=is_train,
                                  name='drop3')

            net_fc = DenseLayer(net_fc,
                                n_units=self.number_of_seen_classes,
                                act=tf.nn.relu,
                                name="fc_3")

        return net_fc
コード例 #17
0
def UNet_A(lf_extra,
           n_slices,
           output_size,
           is_train=True,
           reuse=False,
           name='unet'):
    '''U-net based VCD-Net for light field reconstruction.
    Params:
        lf_extra: tf.tensor 
            In shape of [batch, height, width, n_num^2], the extracted views from the light field image
        n_slices: int
            The slices number of the 3-D reconstruction.
        output_size: list of int
            Lateral size of the 3-D reconstruction, i.e., [height, width].
        is_train: boolean 
            Sees tl.layers.BatchNormLayer.
        reuse: boolean 
            Whether to reuse the variables or not. See tf.variable_scope() for details.
        name: string
            The name of the variable scope.
    Return:
        The 3-D reconstruction in shape of [batch, height, width, depth=n_slices]
    '''
    n_interp = 4
    # _, w, h, _ = lf_extra.shape
    #channels_interp = in_channels.value
    channels_interp = 128
    act = tf.nn.relu

    with tf.variable_scope(name, reuse=reuse):
        n = InputLayer(lf_extra, 'lf_extra')
        n = conv2d(n, n_filter=channels_interp, filter_size=7, name='conv1')

        ## Up-scale input
        with tf.variable_scope('interp'):
            for i in range(n_interp):
                channels_interp = channels_interp / 2
                n = SubpixelConv2d(n, scale=2, name='interp/subpixel%d' % i)
                n = conv2d(n,
                           n_filter=channels_interp,
                           filter_size=3,
                           name='conv%d' % i)

            n = conv2d(n,
                       n_filter=channels_interp,
                       filter_size=3,
                       name='conv_final')  # 176*176
            n = batch_norm(n, is_train=is_train, name='bn_final')
            n = ReluLayer(n, name='reul_final')

        pyramid_channels = [
            128, 256, 512, 512, 512
        ]  # output channels number of each conv layer in the encoder
        encoder_layers = []
        with tf.variable_scope('encoder'):
            n = conv2d(n, n_filter=64, filter_size=3, stride=1, name='conv0')
            n = batch_norm(n, is_train=is_train, name='bn_0')
            n = ReluLayer(n, name='reul0')

            for idx, nc in enumerate(pyramid_channels):
                encoder_layers.append(
                    n
                )  # append n0, n1, n2, n3, n4 (but without n5)to the layers list
                print('encoder %d : %s' % (idx, str(n.outputs.get_shape())))
                n = conv2d(n,
                           n_filter=nc,
                           filter_size=3,
                           stride=1,
                           name='conv%d' % (idx + 1))
                n = batch_norm(n, is_train=is_train, name='bn%d' % (idx + 1))
                n = ReluLayer(n, name='reul%d' % (idx + 1))
                n1 = PadDepth(encoder_layers[-1], desired_channels=nc)
                n = merge([n, n1], name='add%d' % (idx + 1))
                n = tl.layers.MaxPool2d(n,
                                        filter_size=(3, 3),
                                        strides=(2, 2),
                                        name='maxplool%d' % (idx + 1))

        nl = len(encoder_layers)
        with tf.variable_scope('decoder'):
            _, h, w, _ = encoder_layers[-1].outputs.shape.as_list()
            n = UpSampling2dLayer(n,
                                  size=(h, w),
                                  is_scale=False,
                                  name='upsamplimg')

            for idx in range(nl - 1, -1, -1):  # idx = 4,3,2,1,0
                if idx > 0:
                    _, h, w, _ = encoder_layers[idx -
                                                1].outputs.shape.as_list()
                    out_size = (h, w)
                    out_channels = pyramid_channels[idx - 1]
                else:
                    #out_size = None
                    out_channels = n_slices

                print('decoder %d : %s' % (idx, str(n.outputs.get_shape())))
                n = ConcatLayer([encoder_layers[idx], n],
                                concat_dim=-1,
                                name='concat%d' % (nl - idx))
                n = conv2d(n,
                           out_channels,
                           filter_size=3,
                           stride=1,
                           name='conv%d' % (nl - idx + 1))
                n = ReluLayer(n, name='relu%d' % (nl - idx + 1))
                n = batch_norm(n,
                               is_train=is_train,
                               name='bn%d' % (nl - idx + 1))
                #n = UpConv(n, 512, filter_size=4, factor=2, name='upconv2')
                n = UpSampling2dLayer(n,
                                      size=out_size,
                                      is_scale=False,
                                      name='upsamplimg%d' % (nl - idx + 1))

                #n = DropoutLayer(n, keep=0.5, is_fix=True, is_train=is_train, name='dropout1')

            if n.outputs.shape[1] != output_size[0]:
                n = UpSampling2dLayer(n,
                                      size=output_size,
                                      is_scale=False,
                                      name='resize_final')
            #n = conv2d(n, n_slices, filter_size=3, stride=1,name='conv_final' )
            n.outputs = tf.tanh(n.outputs)
            #n.outputs = tf.nn.relu(n.outputs)
            #n = conv2d(n, n_filter=n_slices, filter_size=3, act=tf.tanh, name='out')
            return n
コード例 #18
0
def model(x,
          n_pos,
          is_train=False,
          reuse=None,
          data_format='channels_last'):  # hao25
    if data_format != 'channels_last':
        # TODO: support NCHW
        print('data_format=%s is ignored' % data_format)

    b1_list = []
    b2_list = []
    with tf.variable_scope('model', reuse):
        x = x - 0.5
        n = InputLayer(x, name='in')
        n = Conv2d(n,
                   32, (3, 3), (1, 1),
                   None,
                   'SAME',
                   W_init=W_init,
                   b_init=b_init,
                   name='conv1_1')
        n = BatchNormLayer(n,
                           decay=decay,
                           is_train=is_train,
                           act=tf.nn.relu,
                           name='bn1')
        n = depthwise_conv_block(n, 64, is_train=is_train, name="conv1_depth1")

        n = depthwise_conv_block(n,
                                 128,
                                 strides=(2, 2),
                                 is_train=is_train,
                                 name="conv2_depth1")
        n = depthwise_conv_block(n,
                                 128,
                                 is_train=is_train,
                                 name="conv2_depth2")
        n1 = n

        n = depthwise_conv_block(n,
                                 256,
                                 strides=(2, 2),
                                 is_train=is_train,
                                 name="conv3_depth1")
        n = depthwise_conv_block(n,
                                 256,
                                 is_train=is_train,
                                 name="conv3_depth2")
        n2 = n

        n = depthwise_conv_block(n,
                                 512,
                                 strides=(2, 2),
                                 is_train=is_train,
                                 name="conv4_depth1")
        n = depthwise_conv_block(n,
                                 512,
                                 is_train=is_train,
                                 name="conv4_depth2")
        n = depthwise_conv_block(n,
                                 512,
                                 is_train=is_train,
                                 name="conv4_depth3")
        n = depthwise_conv_block(n,
                                 512,
                                 is_train=is_train,
                                 name="conv4_depth4")
        cnn = depthwise_conv_block(n,
                                   512,
                                   is_train=is_train,
                                   name="conv4_depth5")

        ## low-level features
        # n1 = MaxPool2d(n1, (2, 2), (2, 2), 'same', name='maxpool2d')
        n1 = depthwise_conv_block(n1,
                                  128,
                                  strides=(2, 2),
                                  is_train=is_train,
                                  name="n1_down1")
        n1 = depthwise_conv_block(n1,
                                  128,
                                  strides=(2, 2),
                                  is_train=is_train,
                                  name="n1_down2")
        ## mid-level features
        n2 = depthwise_conv_block(n2,
                                  256,
                                  strides=(2, 2),
                                  is_train=is_train,
                                  name="n2_down1")
        ## combine features
        cnn = ConcatLayer([cnn, n1, n2], -1, name='cancat')

        ## stage1
        with tf.variable_scope("stage1/branch1"):
            b1 = depthwise_conv_block(cnn,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c1")
            b1 = depthwise_conv_block(b1,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c2")
            b1 = depthwise_conv_block(b1,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c3")
            b1 = depthwise_conv_block(b1,
                                      512,
                                      filter_size=(1, 1),
                                      is_train=is_train,
                                      name="c4")
            b1 = Conv2d(b1,
                        n_pos, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=W_init,
                        b_init=b_init,
                        name='confs')

        with tf.variable_scope("stage1/branch2"):
            b2 = depthwise_conv_block(cnn,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c1")
            b2 = depthwise_conv_block(b2,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c2")
            b2 = depthwise_conv_block(b2,
                                      128,
                                      filter_size=(7, 7),
                                      is_train=is_train,
                                      name="c3")
            b2 = depthwise_conv_block(b2,
                                      512,
                                      filter_size=(1, 1),
                                      is_train=is_train,
                                      name="c4")
            b2 = Conv2d(b2,
                        38, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=W_init,
                        b_init=b_init2,
                        name='pafs')

            b1_list.append(b1)
            b2_list.append(b2)

        ## other stages
        # for i in range(2, 7): # [2, 3, 4, 5, 6]
        # for i in [5, 6]:
        for i in [3, 4, 5, 6]:
            b1, b2 = stage(cnn,
                           b1_list[-1],
                           b2_list[-1],
                           n_pos,
                           is_train,
                           name='stage%d' % i)
            b1_list.append(b1)
            b2_list.append(b2)
        net = tl.layers.merge_networks([b1_list[-1], b2_list[-1]])
    return cnn, b1_list, b2_list, net
コード例 #19
0
def squeezenet(x, is_train=True, reuse=False):
    # model from: https://github.com/wohlert/keras-squeezenet
    #             https://github.com/DT42/squeezenet_demo/blob/master/model.py
    with tf.variable_scope("squeezenet", reuse=reuse):
        with tf.variable_scope("input"):
            n = InputLayer(x)
            # n = Conv2d(n, 96, (7,7),(2,2),tf.nn.relu,'SAME',name='conv1')
            n = Conv2d(n, 64, (3, 3), (2, 2), tf.nn.relu, 'SAME', name='conv1')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire2"):
            n = Conv2d(n,
                       16, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        64, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        64, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire3"):
            n = Conv2d(n,
                       16, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        64, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        64, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire4"):
            n = Conv2d(n,
                       32, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        128, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        128, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire5"):
            n = Conv2d(n,
                       32, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        128, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        128, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire6"):
            n = Conv2d(n,
                       48, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        192, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        192, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire7"):
            n = Conv2d(n,
                       48, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        192, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        192, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire8"):
            n = Conv2d(n,
                       64, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        256, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        256, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire9"):
            n = Conv2d(n,
                       64, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        256, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        256, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("output"):
            n = DropoutLayer(n,
                             keep=0.5,
                             is_fix=True,
                             is_train=is_train,
                             name='drop1')
            n = Conv2d(n, 1000, (1, 1), (1, 1), padding='VALID',
                       name='conv10')  # 13, 13, 1000
            n = GlobalMeanPool2d(n)
        return n
コード例 #20
0
    def __get_network__(self,
                        encode_seq,
                        neighbour_seq,
                        decode_seq,
                        features,
                        features_full,
                        is_train=True,
                        reuse=False):
        w_init = tf.random_normal_initializer(stddev=0.02)
        g_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope(self.model_name + "_spatial",
                               reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            inputs_x_root = InputLayer(encode_seq, name='in_root')
            inputs_x_nbor = InputLayer(neighbour_seq, name="in_neighbour")

            # encoding neighbour graph information
            n = ReshapeLayer(inputs_x_nbor,
                             (config.batch_size * config.in_seq_length,
                              config.num_neighbour), "reshape1")
            n.outputs = tf.expand_dims(n.outputs, axis=-1)
            n = Conv1d(n,
                       4,
                       4,
                       1,
                       act=tf.identity,
                       padding='SAME',
                       W_init=w_init,
                       name='conv1')
            n = BatchNormLayer(n,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=g_init,
                               name='bn1')
            n = MaxPool1d(n, 2, 2, padding='valid', name='maxpool1')
            n = FlattenLayer(n, name="flatten1")
            n = ReshapeLayer(n, (config.batch_size, config.in_seq_length, -1),
                             name="reshape1_back")

            net_encode = ConcatLayer([inputs_x_root, n],
                                     concat_dim=-1,
                                     name="encode")
            net_decode = InputLayer(decode_seq, name="decode")

            net_rnn = Seq2Seq(
                net_encode,
                net_decode,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden=config.dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_encode.outputs),
                decode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_decode.outputs),
                initial_state_encode=None,
                # dropout=(0.8 if is_train else None),
                dropout=None,
                n_layer=1,
                return_seq_2d=True,
                name='seq2seq')
            net_rnn_seq2seq = net_rnn

            net_spatial_out = DenseLayer(net_rnn,
                                         n_units=1,
                                         act=tf.identity,
                                         name='dense2')
            if is_train:
                net_spatial_out = ReshapeLayer(
                    net_spatial_out,
                    (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_spatial_out = ReshapeLayer(net_spatial_out,
                                               (config.batch_size, 1, 1),
                                               name="reshape_out")

        with tf.variable_scope(self.model_name + "_wide", reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            # Features
            net_features = InputLayer(features, name="in_features")
            net_features_full = InputLayer(features_full,
                                           name="in_features_full")
            net_features_full = ReshapeLayer(
                net_features_full,
                (config.batch_size *
                 (config.out_seq_length + 1), config.dim_features),
                name="reshape_feature_full_1")
            if is_train:
                net_features = ReshapeLayer(
                    net_features,
                    (config.batch_size *
                     (config.out_seq_length + 1), config.dim_features),
                    name="reshape_feature_1")
            else:
                net_features = ReshapeLayer(net_features,
                                            (config.batch_size *
                                             (1), config.dim_features),
                                            name="reshape_feature_1")

            self.net_features_dim = 32
            net_features = DenseLayer(net_features,
                                      n_units=self.net_features_dim,
                                      act=tf.nn.relu,
                                      name='dense_features')
            net_features_full = DenseLayer(net_features_full,
                                           n_units=self.net_features_dim,
                                           act=tf.nn.relu,
                                           name='dense_features_full')
            # self.net_features = net_features

            net_wide_out = ConcatLayer([net_rnn_seq2seq, net_features],
                                       concat_dim=-1,
                                       name="concat_features")
            net_wide_out = DenseLayer(net_wide_out,
                                      n_units=1,
                                      act=tf.identity,
                                      name='dense2')

            if is_train:
                net_wide_out = ReshapeLayer(
                    net_wide_out,
                    (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_wide_out = ReshapeLayer(net_wide_out,
                                            (config.batch_size, 1, 1),
                                            name="reshape_out")

        with tf.variable_scope(self.model_name + "_query", reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)

            net_decode_query = InputLayer(self.query_decode_seq,
                                          name="decode_query")

            net_rnn_query = RNNLayer(
                net_decode_query,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                cell_init_args={"forget_bias": 1.0},
                n_hidden=config.query_dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                n_steps=config.out_seq_length,
                return_last=True,

                # return_last=False,
                # return_seq_2d=True,
                name="rnn_query")
            '''
            net_rnn_query = DynamicRNNLayer(
                net_decode_query,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                cell_init_args={"forget_bias": 1.0},
                # n_hidden=config.query_dim_hidden,
                n_hidden=32,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                return_last=True,
                # dropout=0.8,
                sequence_length=tl.layers.retrieve_seq_length_op(net_decode_query.outputs),
                # return_last=False,
                # return_seq_2d=True,
                name="rnn_query_dynamic"
            )
            '''

            net_rnn_query = ExpandDimsLayer(net_rnn_query,
                                            axis=1,
                                            name="rnn_query_expand")
            net_rnn_query = TileLayer(net_rnn_query,
                                      [1, config.out_seq_length, 1],
                                      name="rnn_query_tile")
            net_rnn_query = ReshapeLayer(
                net_rnn_query, (config.batch_size * config.out_seq_length,
                                config.query_dim_hidden),
                name="rnn_query_reshape")
            # net_rnn_query = ReshapeLayer(net_rnn_query, (config.batch_size * config.out_seq_length, 32), name="rnn_query_reshape")

            # self.net_rnn_query = net_rnn_query

            net_traffic_state = InputLayer(self.traffic_state,
                                           name="in_traffic_state")
            '''
            if is_train:
                net_rnn_traffic = ReshapeLayer(net_rnn_seq2seq, (config.batch_size, config.out_seq_length + 1, config.dim_hidden), name="reshape_traffic_q1")
                net_rnn_traffic.outputs = tf.slice(net_rnn_traffic.outputs, [0, 0, 0], [config.batch_size, config.out_seq_length, config.dim_hidden], name="slice_traffic_q")
                net_rnn_traffic = ReshapeLayer(net_rnn_traffic, (config.batch_size * config.out_seq_length, config.dim_hidden), name="reshape_traffic_q2")

                net_features_traffic = ReshapeLayer(net_features, (config.batch_size, config.out_seq_length + 1, self.net_features_dim), name="reshape_features_q1")
                net_features_traffic.outputs = tf.slice(net_features_traffic.outputs, [0, 0, 0], [config.batch_size, config.out_seq_length, self.net_features_dim], name="slice_features_q")
                net_features_traffic = ReshapeLayer(net_features_traffic, (config.batch_size * config.out_seq_length, self.net_features_dim), name="reshape_features_q2")

                net_query_out = ConcatLayer([net_rnn_traffic, net_features_traffic, net_rnn_query], concat_dim=-1, name="concat_traffic_query1")
                # net_query_out = ConcatLayer([net_rnn_traffic, net_rnn_query], concat_dim=-1, name="concat_traffic_query1")
            else:
            '''
            net_features_traffic = ReshapeLayer(
                net_features_full,
                (config.batch_size, config.out_seq_length + 1,
                 self.net_features_dim),
                name="reshape_features_q1")
            net_features_traffic.outputs = tf.slice(
                net_features_traffic.outputs, [0, 0, 0], [
                    config.batch_size, config.out_seq_length,
                    self.net_features_dim
                ],
                name="slice_features_q")
            net_features_traffic = ReshapeLayer(
                net_features_traffic,
                (config.batch_size * config.out_seq_length,
                 self.net_features_dim),
                name="reshape_features_q2")

            net_query_out = ConcatLayer(
                [net_traffic_state, net_features_traffic, net_rnn_query],
                concat_dim=-1,
                name="concat_traffic_query1")
            # net_rnn_traffic = ReshapeLayer(net_rnn_seq2seq, (config.batch_size, config.out_seq_length + 1, config.dim_hidden), name="reshape_traffic_q1")
            # net_rnn_traffic.outputs = tf.slice(net_rnn_traffic.outputs, [0, 0, 0], [config.batch_size, config.out_seq_length, config.dim_hidden], name="slice_traffic_q")
            # net_rnn_traffic = ReshapeLayer(net_rnn_traffic, (config.batch_size * config.out_seq_length, config.dim_hidden), name="reshape_traffic_q2")
            # net_query_out = ConcatLayer([net_rnn_traffic, net_features_traffic, net_rnn_query], concat_dim=-1, name="concat_traffic_query1")

            # net_out = DenseLayer(net_out, n_units=128, act=tf.nn.relu, name="dense_query1")
            # net_out = DenseLayer(net_out, n_units=64, act=tf.nn.relu, name="dense_query2")
            # net_query_out = DropoutLayer(net_query_out, keep=0.8, is_fix=True, is_train=is_train, name='drop_query3')
            net_query_out = DenseLayer(net_query_out,
                                       n_units=1,
                                       act=tf.identity,
                                       name="dense_query3")
            # net_out = ReshapeLayer(net_out, (config.batch_size, config.out_seq_length + 1, 1), name="reshape_out")
            # if is_train:
            net_query_out = ReshapeLayer(
                net_query_out, (config.batch_size, config.out_seq_length, 1),
                name="reshape_out")
            # else:
            #    net_out = ReshapeLayer(net_out, (config.batch_size, 1, 1), name="reshape_out")

            # TODO residual net
            '''
            if is_train:
                net_query_out.outputs = tf.add(
                    net_query_out.outputs,
                    tf.slice(net_wide_out.outputs, [0, 0, 0], [config.batch_size, config.out_seq_length, 1]),
                    name="res_add"
                )
            else:
            '''
            net_base_pred = InputLayer(self.base_pred, name="in_net_base_pred")
            net_query_out.outputs = tf.add(net_query_out.outputs,
                                           net_base_pred.outputs,
                                           name="res_add")

        return net_rnn_seq2seq, net_spatial_out, net_wide_out, net_rnn_query, net_query_out
コード例 #21
0
    def __get_network__(self,
                        encode_seq,
                        decode_seq,
                        is_train=True,
                        reuse=False):
        w_init = tf.random_normal_initializer(stddev=0.02)
        g_init = tf.random_normal_initializer(1., 0.02)

        with tf.variable_scope("seq2seq_model", reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            net_encode = InputLayer(encode_seq, name='in_root')

            net_decode = InputLayer(decode_seq, name="decode")

            net_rnn = Seq2Seq(
                net_encode,
                net_decode,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden=config.dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                encode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_encode.outputs),
                decode_sequence_length=tl.layers.retrieve_seq_length_op(
                    net_decode.outputs),
                initial_state_encode=None,
                # dropout=(0.8 if is_train else None),
                dropout=None,
                n_layer=1,
                return_seq_2d=True,
                name='seq2seq')
            # self.net_rnn_seq2seq = net_rnn
            net_rnn_seq2seq = net_rnn

            net_out_seq2seq = DenseLayer(net_rnn,
                                         n_units=1,
                                         act=tf.identity,
                                         name='dense2')
            if is_train:
                net_out_seq2seq = ReshapeLayer(
                    net_out_seq2seq,
                    (config.batch_size, config.out_seq_length + 1, 1),
                    name="reshape_out")
            else:
                net_out_seq2seq = ReshapeLayer(net_out_seq2seq,
                                               (config.batch_size, 1, 1),
                                               name="reshape_out")

            # net_out_seq2seq = net_out_seq2seq
            # net_out = DenseLayer(net_rnn, n_units=64, act=tf.identity, name='dense1')
            # net_out = DenseLayer(net_rnn, n_units=1, act=tf.identity, name='dense2')
            # net_out = ReshapeLayer(net_out, (config.batch_size, config.out_seq_length + 1, 1), name="reshape_out")

        with tf.variable_scope(self.model_name, reuse=reuse) as vs:
            tl.layers.set_name_reuse(reuse)
            net_encode_query = InputLayer(self.query_x, name='in_root_query')

            net_decode_query = InputLayer(self.query_decode_seq,
                                          name="decode_query")

            net_rnn_query = RNNLayer(
                net_decode_query,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                cell_init_args={"forget_bias": 1.0},
                n_hidden=config.query_dim_hidden,
                initializer=tf.random_uniform_initializer(-0.1, 0.1),
                n_steps=config.out_seq_length,
                return_last=True,
                # return_last=False,
                # return_seq_2d=True,
                name="rnn_query")
            net_rnn_query = ExpandDimsLayer(net_rnn_query,
                                            axis=1,
                                            name="rnn_query_expand")
            net_rnn_query = TileLayer(net_rnn_query,
                                      [1, config.out_seq_length, 1],
                                      name="rnn_query_tile")
            net_rnn_query = ReshapeLayer(
                net_rnn_query, (config.batch_size * config.out_seq_length,
                                config.query_dim_hidden),
                name="rnn_query_reshape")

            net_traffic_state = InputLayer(self.traffic_state,
                                           name="in_traffic_state")

            if is_train:
                net_rnn_traffic = ReshapeLayer(
                    net_rnn_seq2seq,
                    (config.batch_size, config.out_seq_length + 1,
                     config.dim_hidden),
                    name="reshape_traffic_q1")
                net_rnn_traffic.outputs = tf.slice(
                    net_rnn_traffic.outputs, [0, 0, 0], [
                        config.batch_size, config.out_seq_length,
                        config.dim_hidden
                    ],
                    name="slice_traffic_q")
                net_rnn_traffic = ReshapeLayer(
                    net_rnn_traffic,
                    (config.batch_size * config.out_seq_length,
                     config.dim_hidden),
                    name="reshape_traffic_q2")
                net_out = ConcatLayer([net_rnn_traffic, net_rnn_query],
                                      concat_dim=-1,
                                      name="concat_traffic_query1")
            else:
                net_out = ConcatLayer([net_traffic_state, net_rnn_query],
                                      concat_dim=-1,
                                      name="concat_traffic_query2")

            # net_out = DenseLayer(net_out, n_units=128, act=tf.nn.relu, name="dense_query1")
            # net_out = DenseLayer(net_out, n_units=32, act=tf.nn.relu, name="dense_query2")
            net_out = DenseLayer(net_out,
                                 n_units=1,
                                 act=tf.identity,
                                 name="dense_query3")
            # net_out = ReshapeLayer(net_out, (config.batch_size, config.out_seq_length + 1, 1), name="reshape_out")
            # if is_train:
            net_out = ReshapeLayer(
                net_out, (config.batch_size, config.out_seq_length, 1),
                name="reshape_out")
            # else:
            #    net_out = ReshapeLayer(net_out, (config.batch_size, 1, 1), name="reshape_out")
        return net_rnn_seq2seq, net_out_seq2seq, net_rnn_query, net_out
    def __get_network__(self, model_name, encode_seqs, class_label_seqs, kg_vector, reuse=False, is_train=True):
        with tf.variable_scope(model_name, reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_word_embed = InputLayer(
                inputs=encode_seqs,
                name="in_word_embed"
            )

            net_class_label_embed = InputLayer(
                inputs=class_label_seqs,
                name="in_class_label_embed"
            )

            net_kg = InputLayer(
                inputs=kg_vector,
                name='in_kg'
            )

            net_kg = ReshapeLayer(
                net_kg,
                shape=(-1, self.kg_embedding_dim),
                name="reshape_kg_1"
            )

            net_kg = ReshapeLayer(
                net_kg,
                shape=(-1, self.max_length, self.kg_embedding_dim),
                name="reshape_kg_2"
            )

            if config.model == "vwvcvkg":
                # dbpedia and 20news
                net_in = ConcatLayer(
                    [net_word_embed, net_class_label_embed, net_kg],
                    concat_dim=-1,
                    name='concat_vw_vwc_vc'
                )
            elif config.model == "vwvc":
                net_in = ConcatLayer(
                    [net_word_embed, net_class_label_embed],
                    concat_dim=-1,
                    name='concat_vw_vc'
                )
            elif config.model == "vwvkg":
                net_in = ConcatLayer(
                    [net_word_embed, net_kg],
                    concat_dim=-1,
                    name='concat_vw_vwc'
                )
            elif config.model == "vcvkg":
                net_in = ConcatLayer(
                    [net_class_label_embed, net_kg],
                    concat_dim=-1,
                    name='concat_vc_vwc'
                )
            elif config.model == "kgonly":
                net_in = ConcatLayer(
                    [net_kg],
                    concat_dim=-1,
                    name='concat_vwc'
                )
            else:
                raise Exception("config.model value error")

            filter_length = [2, 4, 8]
            # dbpedia
            n_filter = 600
            # n_filter = 200

            net_cnn_list = list()

            for fsz in filter_length:

                net_cnn = Conv1d(
                    net_in,
                    n_filter=n_filter,
                    filter_size=fsz,
                    stride=1,
                    act=tf.nn.relu,
                    name="cnn%d" % fsz
                )
                net_cnn.outputs = tf.reduce_max(net_cnn.outputs, axis=1, name="global_maxpool%d" % fsz)
                net_cnn_list.append(net_cnn)

            '''
            if config.model == "vwvcvkg":
                net_class_label_embed.outputs = tf.slice(
                    net_class_label_embed.outputs,
                    [0, 0, 0],
                    [config.batch_size, 1, self.word_embedding_dim],
                    name="slice_word"
                )
                net_class_label_embed.outputs = tf.squeeze(
                    net_class_label_embed.outputs,
                    name="squeeze_word"
                )
                net_cnn = ConcatLayer(net_cnn_list + [net_class_label_embed], concat_dim=-1)
            else:
                net_cnn = ConcatLayer(net_cnn_list, concat_dim=-1)
            '''
            net_cnn = ConcatLayer(net_cnn_list, concat_dim=-1)

            net_fc = DropoutLayer(net_cnn, keep=0.5, is_fix=True, is_train=is_train, name='drop1')

            net_fc = DenseLayer(
                net_fc,
                n_units=400,
                act=tf.nn.relu,
                name="fc_1"
            )

            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop2')

            # dbpedia
            net_fc = DenseLayer(
                net_fc,
                n_units=100,
                act=tf.nn.relu,
                name="fc_2"
            )
            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop3')

            net_fc = DenseLayer(
                net_fc,
                n_units=1,
                act=tf.nn.sigmoid,
                name="fc_3"
            )
        return net_fc, net_cnn
コード例 #23
0
def concat(layers, concat_dim=-1, name='concat'):
    return ConcatLayer(layers, concat_dim=concat_dim, name=name)        
コード例 #24
0
def UNet_B(lf_extra,
           n_slices,
           output_size,
           n_pyramid_levels=4,
           n_base_filters=128,
           using_batch_norm=False,
           is_train=False,
           last_act=tf.nn.relu,
           reuse=False,
           name='unet'):
    '''U-net based VCD-Net for sparse light field reconstruction, faster than UNet_A
    Params:
        lf_extra: tf.tensor 
            In shape of [batch, height, width, n_num^2], the extracted views from the light field image
        n_slices: int
            The slices number of the 3-D reconstruction.
        output_size: list of int
            Lateral size of the 3-D reconstruction, i.e., [height, width].
        using_batch_norm: boolean
            Whether using batch normalization after each convolutional layer. 
        is_train: boolean, only valid when using_batch_norm=True.
            Sees tl.layers.BatchNormLayer.
        last_act: tensorflow activation functions
            Acivation function applied to the final layer.
        reuse: boolean 
            Whether to reuse the variables or not. See tf.variable_scope() for details.
        name: string
            The name of the variable scope.
    Return:
        The 3-D reconstruction in shape of [batch, height, width, depth=n_slices]
    '''
    n_interp = 4
    channels_interp = 128
    act = tf.nn.leaky_relu

    with tf.variable_scope(name, reuse=reuse):
        n = InputLayer(lf_extra, 'lf_extra')
        n = conv2d(n, n_filter=channels_interp, filter_size=5, name='conv1')
        # n = conv(n, n_filter=channels_interp, filter_size=5, act=act, using_batch_norm=using_batch_norm, is_train=is_train, name='conv1')

        ## Up-scale input
        with tf.variable_scope('interp'):
            for i in range(n_interp):
                channels_interp = channels_interp / 2
                n = upscale(n,
                            out_channels=channels_interp,
                            scale=2,
                            mode='subpixel',
                            name='upsale%d' % i)

        pyramid_channels = [
            n_base_filters * i for i in range(1, n_pyramid_levels + 1)
        ]  # output channels number of each conv layer in the encoder
        encoder_layers = []
        with tf.variable_scope('encoder'):
            n = conv2d(n, n_filter=64, filter_size=3, stride=2, name='conv0')

            for idx, nc in enumerate(pyramid_channels):

                encoder_layers.append(
                    n
                )  # append n0, n1, n2, n3 (but without n4)to the layers list
                print('encoder %d : %s' % (idx, str(n.outputs.get_shape())))
                n = LReluLayer(n, name='relu%d' % (idx + 1))
                n = conv2d(n,
                           n_filter=nc,
                           filter_size=3,
                           stride=2,
                           name='conv%d' % (idx + 1))
                # n = max_pool2d(n, filter_size=2, stride=2)

        nl = len(encoder_layers)
        with tf.variable_scope('decoder'):
            _, h, w, _ = encoder_layers[-1].outputs.shape.as_list()
            n = ReluLayer(n, name='relu1')
            n = upscale(n,
                        out_channels=pyramid_channels[-1],
                        out_size=(h, w),
                        mode='upconv',
                        name='upsale1')

            for idx in range(nl - 1, -1, -1):  # idx = 4,3,2,1,0
                if idx > 0:
                    _, h, w, _ = encoder_layers[idx -
                                                1].outputs.shape.as_list()
                    out_size = (h, w)
                    out_channels = pyramid_channels[idx - 1]

                else:
                    out_size = output_size
                    out_channels = n_base_filters

                print('decoder %d : %s' % (idx, str(n.outputs.get_shape())))
                n = ConcatLayer([encoder_layers[idx], n],
                                concat_dim=-1,
                                name='concat%d' % (nl - idx))
                n = ReluLayer(n, name='relu%d' % (nl - idx + 1))
                n = upscale(n,
                            out_channels=out_channels,
                            out_size=out_size,
                            mode='upconv',
                            name='upscale%d' % (nl - idx + 1))
                #n = DropoutLayer(n, keep=0.5, is_fix=True, is_train=is_train, name='dropout1')

            n = conv2d(n,
                       n_filter=n_slices,
                       filter_size=3,
                       act=last_act,
                       name='out')
            return n
コード例 #25
0
ファイル: vgg_model.py プロジェクト: istar123456/tensorlayer
def _stage(cnn, b1, b2, n_pos, maskInput1, maskInput2, name='stageX'):
    with tf.variable_scope(name):
        net = ConcatLayer([cnn, b1, b2], -1, name='concat')
        with tf.variable_scope("branch1"):
            b1 = Conv2d(net,
                        128, (7, 7), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c1')
            b1 = Conv2d(b1,
                        128, (7, 7), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c2')
            b1 = Conv2d(b1,
                        128, (7, 7), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c3')
            b1 = Conv2d(b1,
                        128, (7, 7), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c4')
            b1 = Conv2d(b1,
                        128, (7, 7), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c5')
            b1 = Conv2d(b1,
                        128, (1, 1), (1, 1),
                        tf.nn.relu,
                        'VALID',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c6')
            b1 = Conv2d(b1,
                        n_pos, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='conf')
            b1.outputs = b1.outputs * maskInput1
        with tf.variable_scope("branch2"):
            b2 = Conv2d(net,
                        128, (7, 7), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c1')
            b2 = Conv2d(b2,
                        128, (7, 7), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c2')
            b2 = Conv2d(b2,
                        128, (7, 7), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c3')
            b2 = Conv2d(b2,
                        128, (7, 7), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c4')
            b2 = Conv2d(b2,
                        128, (7, 7), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c5')
            b2 = Conv2d(b2,
                        128, (1, 1), (1, 1),
                        tf.nn.relu,
                        'VALID',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='c6')
            b2 = Conv2d(b2,
                        38, (1, 1), (1, 1),
                        None,
                        'VALID',
                        W_init=_init_norm,
                        b_init=_b_initer,
                        name='pafs')
            b2.outputs = b2.outputs * maskInput2
    return b1, b2
コード例 #26
0
def u_net(x, is_train=False, reuse=False, n_out=1):
    _, nx, ny, nz = x.get_shape().as_list()
    with tf.variable_scope("u_net", reuse=reuse):
        tl.layers.set_name_reuse(reuse)
        inputs = InputLayer(x, name='inputs')
        conv1 = Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, name='conv1_1')
        #        conv1 = DropoutLayer(conv1, keep=0.2, name='drop1')
        conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, name='conv1_2')
        pool1 = MaxPool2d(conv1, (2, 2), name='pool1')  #120*120

        conv2 = Conv2d(pool1, 128, (3, 3), act=tf.nn.relu, name='conv2_1')
        #        conv2 = DropoutLayer(conv2, keep=0.2, name='drop2')
        conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, name='conv2_2')
        pool2 = MaxPool2d(conv2, (2, 2), name='pool2')  #60*60

        conv3 = Conv2d(pool2, 256, (3, 3), act=tf.nn.relu, name='conv3_1')
        #        conv3 = DropoutLayer(conv3, keep=0.2, name='drop3')
        conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, name='conv3_2')
        pool3 = MaxPool2d(conv3, (2, 2), name='pool3')  #30*30

        conv4 = Conv2d(pool3, 512, (3, 3), act=tf.nn.relu, name='conv4_1')
        #        conv4 = DropoutLayer(conv4, keep=0.2, name='drop4')
        conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, name='conv4_2')
        pool4 = MaxPool2d(conv4, (2, 2), name='pool4')  #15*15

        conv5 = Conv2d(pool4, 1024, (3, 3), act=tf.nn.relu, name='conv5_1')
        #        conv5 = DropoutLayer(conv5, keep=0.2, name='drop5')
        conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, name='conv5_2')

        up4 = DeConv2d(conv5,
                       512, (3, 3), (nx / 8, ny / 8), (2, 2),
                       name='deconv4')
        up4 = ConcatLayer([up4, conv4], 3, name='concat4')
        conv4 = Conv2d(up4, 512, (3, 3), act=tf.nn.relu, name='uconv4_1')
        #        conv4 = DropoutLayer(conv4, keep=0.2, name='drop6')
        conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, name='uconv4_2')

        up3 = DeConv2d(conv4,
                       256, (3, 3), (nx / 4, ny / 4), (2, 2),
                       name='deconv3')
        up3 = ConcatLayer([up3, conv3], 3, name='concat3')
        conv3 = Conv2d(up3, 256, (3, 3), act=tf.nn.relu, name='uconv3_1')
        #        conv3 = DropoutLayer(conv3, keep=0.2, name='drop7')
        conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, name='uconv3_2')

        up2 = DeConv2d(conv3,
                       128, (3, 3), (nx / 2, ny / 2), (2, 2),
                       name='deconv2')
        up2 = ConcatLayer([up2, conv2], 3, name='concat2')
        conv2 = Conv2d(up2, 128, (3, 3), act=tf.nn.relu, name='uconv2_1')
        #        conv2 = DropoutLayer(conv2, keep=0.2, name='drop8')
        conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, name='uconv2_2')

        up1 = DeConv2d(conv2,
                       64, (3, 3), (nx / 1, ny / 1), (2, 2),
                       name='deconv1')
        up1 = ConcatLayer([up1, conv1], 3, name='concat1')
        conv1 = Conv2d(up1, 64, (3, 3), act=tf.nn.relu, name='uconv1_1')
        #        conv1 = DropoutLayer(conv1, keep=0.2, name='drop9')
        conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, name='uconv1_2')
        #conv1 = Conv2d(conv1, n_out, (1, 1), act=tf.nn.sigmoid, name='uconv1')
        conv1 = Conv2d(conv1, n_out, (1, 1), act=tf.nn.sigmoid, name='uconv1')

    return conv1
コード例 #27
0
    def squeezenetv1(cls, x, end_with='output', is_train=False, reuse=None):
        with tf.compat.v1.variable_scope("squeezenetv1", reuse=reuse):
            with tf.compat.v1.variable_scope("input"):
                n = InputLayer(x)
                # n = Conv2d(n, 96, (7,7),(2,2),tf.nn.relu,'SAME',name='conv1')
                n = Conv2d(n,
                           64, (3, 3), (2, 2),
                           tf.nn.relu,
                           'SAME',
                           name='conv1')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire2"):
                n = Conv2d(n,
                           16, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            64, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            64, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire3"):
                n = Conv2d(n,
                           16, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            64, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            64, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire4"):
                n = Conv2d(n,
                           32, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            128, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            128, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire5"):
                n = Conv2d(n,
                           32, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            128, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            128, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire6"):
                n = Conv2d(n,
                           48, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            192, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            192, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire7"):
                n = Conv2d(n,
                           48, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            192, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            192, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire8"):
                n = Conv2d(n,
                           64, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            256, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            256, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire9"):
                n = Conv2d(n,
                           64, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            256, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            256, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("output"):
                n = DropoutLayer(n,
                                 keep=0.5,
                                 is_fix=True,
                                 is_train=is_train,
                                 name='drop1')
                n = Conv2d(n,
                           1000, (1, 1), (1, 1),
                           padding='VALID',
                           name='conv10')  # 13, 13, 1000
                n = GlobalMeanPool2d(n)
            if end_with in n.outputs.name:
                return n

            raise Exception("end_with : input, fire2, fire3 ... fire9, output")
コード例 #28
0
ファイル: model.py プロジェクト: zeroslope/a2net
def a2net(x, is_train=True, reuse=False):
    w_init = tf.random_normal_initializer(stddev=0.02)
    gamma_init = tf.random_normal_initializer(1., 0.02)

    with tf.variable_scope('a2net', reuse=reuse):
        net_in = InputLayer(x, name='input')
        inputY = InputLayer(x[:, :, :, :1], name='inputY')
        inputUV = InputLayer(x[:, :, :, 1:], name='inputUV')

        # Encoder

        conv1 = Conv2d(net_in,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv1')
        conv1 = BatchNormLayer(conv1,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn1')
        conv2 = Conv2d(conv1,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv2')
        conv2 = BatchNormLayer(conv2,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn2')

        concat1 = ConcatLayer([conv1, conv2],
                              concat_dim=-1,
                              name='encoder/concat1')
        aggregation1 = Conv2d(concat1,
                              32, (4, 4), (2, 2),
                              act=None,
                              W_init=w_init,
                              b_init=None,
                              name='encoder/aggregation1')
        aggregation1 = BatchNormLayer(aggregation1,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='encoder/bn3')

        conv3 = Conv2d(aggregation1,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv3')
        conv3 = BatchNormLayer(conv3,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn4')

        concat2 = ConcatLayer([aggregation1, conv3],
                              concat_dim=-1,
                              name='encoder/concat2')
        aggregation2 = Conv2d(concat2,
                              32, (4, 4), (2, 2),
                              act=None,
                              W_init=w_init,
                              b_init=None,
                              name='encoder/aggregation2')
        aggregation2 = BatchNormLayer(aggregation2,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='encoder/bn5')

        conv4 = Conv2d(aggregation2,
                       32, (3, 3), (1, 1),
                       act=None,
                       W_init=w_init,
                       b_init=None,
                       name='encoder/conv4')
        conv4 = BatchNormLayer(conv4,
                               act=tf.nn.relu,
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='encoder/bn6')

        concat3 = ConcatLayer([aggregation2, conv4],
                              concat_dim=-1,
                              name='encoder/concat3')
        aggregation3 = Conv2d(concat3,
                              32, (4, 4), (2, 2),
                              act=None,
                              W_init=w_init,
                              b_init=None,
                              name='encoder/aggregation3')
        aggregation3 = BatchNormLayer(aggregation3,
                                      act=tf.nn.relu,
                                      is_train=is_train,
                                      gamma_init=gamma_init,
                                      name='encoder/bn7')

        # DecoderY

        convY_1 = Conv2d(aggregation3,
                         32, (3, 3), (1, 1),
                         act=None,
                         W_init=w_init,
                         b_init=None,
                         name='decoderY/conv1')
        convY_1 = BatchNormLayer(convY_1,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 gamma_init=gamma_init,
                                 name='decoderY/bn1')

        concatY_1 = ConcatLayer([aggregation3, convY_1],
                                concat_dim=-1,
                                name='decoderY/concat1')
        aggregationY_1 = DeConv2d(concatY_1,
                                  32, (2, 2), (2, 2),
                                  act=None,
                                  W_init=w_init,
                                  b_init=None,
                                  name='decoderY/aggregation1')
        aggregationY_1 = BatchNormLayer(aggregationY_1,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='decoderY/bn2')

        copyY_1 = ConcatLayer([conv4, aggregationY_1],
                              concat_dim=-1,
                              name='decoderY/copy1')
        convY_2 = Conv2d(copyY_1,
                         32, (3, 3), (1, 1),
                         act=None,
                         W_init=w_init,
                         b_init=None,
                         name='decoderY/conv2')
        convY_2 = BatchNormLayer(convY_2,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 gamma_init=gamma_init,
                                 name='decoderY/bn3')

        concatY_2 = ConcatLayer([copyY_1, convY_2],
                                concat_dim=-1,
                                name='decoderY/concat2')
        aggregationY_2 = DeConv2d(concatY_2,
                                  32, (2, 2), (2, 2),
                                  act=None,
                                  W_init=w_init,
                                  b_init=None,
                                  name='decoderY/aggregation2')
        aggregationY_2 = BatchNormLayer(aggregationY_2,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='decoderY/bn4')

        copyY_2 = ConcatLayer([conv3, aggregationY_2],
                              concat_dim=-1,
                              name='decoderY/copy2')
        convY_3 = Conv2d(copyY_2,
                         32, (3, 3), (1, 1),
                         act=None,
                         W_init=w_init,
                         b_init=None,
                         name='decoderY/conv3')
        convY_3 = BatchNormLayer(convY_3,
                                 act=tf.nn.relu,
                                 is_train=is_train,
                                 gamma_init=gamma_init,
                                 name='decoderY/bn5')

        concatY_3 = ConcatLayer([copyY_2, convY_3],
                                concat_dim=-1,
                                name='decoderY/concat3')
        aggregationY_3 = DeConv2d(concatY_3,
                                  32, (2, 2), (2, 2),
                                  act=None,
                                  W_init=w_init,
                                  b_init=None,
                                  name='decoderY/aggregation3')
        aggregationY_3 = BatchNormLayer(aggregationY_3,
                                        act=tf.nn.relu,
                                        is_train=is_train,
                                        gamma_init=gamma_init,
                                        name='decoderY/bn6')

        copyY_3 = ConcatLayer([conv2, aggregationY_3],
                              concat_dim=-1,
                              name='decoderY/copy3')

        outputY = Conv2d(copyY_3,
                         1, (3, 3), (1, 1),
                         act=tf.nn.tanh,
                         name='decoderY/output')

        # DecoderUV

        convUV_1 = Conv2d(aggregation3,
                          UV_SIZE, (3, 3), (1, 1),
                          act=None,
                          W_init=w_init,
                          b_init=None,
                          name='decoderUV/conv1')
        convUV_1 = BatchNormLayer(convUV_1,
                                  act=tf.nn.relu,
                                  is_train=is_train,
                                  gamma_init=gamma_init,
                                  name='decoderUV/bn1')

        concatUV_1 = ConcatLayer([aggregation3, convUV_1],
                                 concat_dim=-1,
                                 name='decoderUV/concat1')
        aggregationUV_1 = DeConv2d(concatUV_1,
                                   UV_SIZE, (2, 2), (2, 2),
                                   act=None,
                                   W_init=w_init,
                                   b_init=None,
                                   name='decoderUV/aggregation1')
        aggregationUV_1 = BatchNormLayer(aggregationUV_1,
                                         act=tf.nn.relu,
                                         is_train=is_train,
                                         gamma_init=gamma_init,
                                         name='decoderUV/bn2')

        copyUV_1 = ConcatLayer([conv4, aggregationUV_1],
                               concat_dim=-1,
                               name='decoderUV/copy1')
        convUV_2 = Conv2d(copyUV_1,
                          UV_SIZE, (3, 3), (1, 1),
                          act=None,
                          W_init=w_init,
                          b_init=None,
                          name='decoderUV/conv2')
        convUV_2 = BatchNormLayer(convUV_2,
                                  act=tf.nn.relu,
                                  is_train=is_train,
                                  gamma_init=gamma_init,
                                  name='decoderUV/bn3')

        concatUV_2 = ConcatLayer([copyUV_1, convUV_2],
                                 concat_dim=-1,
                                 name='decoderUV/concat2')
        aggregationUV_2 = DeConv2d(concatUV_2,
                                   UV_SIZE, (2, 2), (2, 2),
                                   act=None,
                                   W_init=w_init,
                                   b_init=None,
                                   name='decoderUV/aggregation2')
        aggregationUV_2 = BatchNormLayer(aggregationUV_2,
                                         act=tf.nn.relu,
                                         is_train=is_train,
                                         gamma_init=gamma_init,
                                         name='decoderUV/bn4')

        copyUV_2 = ConcatLayer([conv3, aggregationUV_2],
                               concat_dim=-1,
                               name='decoderUV/copy2')
        convUV_3 = Conv2d(copyUV_2,
                          UV_SIZE, (3, 3), (1, 1),
                          act=None,
                          W_init=w_init,
                          b_init=None,
                          name='decoderUV/conv3')
        convUV_3 = BatchNormLayer(convUV_3,
                                  act=tf.nn.relu,
                                  is_train=is_train,
                                  gamma_init=gamma_init,
                                  name='decoderUV/bn5')

        concatUV_3 = ConcatLayer([copyUV_2, convUV_3],
                                 concat_dim=-1,
                                 name='decoderUV/concat3')
        aggregationUV_3 = DeConv2d(concatUV_3,
                                   UV_SIZE, (2, 2), (2, 2),
                                   act=None,
                                   W_init=w_init,
                                   b_init=None,
                                   name='decoderUV/aggregation3')
        aggregationUV_3 = BatchNormLayer(aggregationUV_3,
                                         act=tf.nn.relu,
                                         is_train=is_train,
                                         gamma_init=gamma_init,
                                         name='decoderUV/bn6')

        copyUV_3 = ConcatLayer([conv2, aggregationUV_3],
                               concat_dim=-1,
                               name='decoderUV/copy3')

        outputUV = Conv2d(copyUV_3,
                          2, (3, 3), (1, 1),
                          act=tf.nn.tanh,
                          name='decoderUV/output')

        outY_plus_Y = ElementwiseLambdaLayer([outputY, inputY],
                                             fn=lambda x, y: BETA * x +
                                             (1 - BETA) * y,
                                             name='outY_plus_Y')

        outUV_plus_UV = ElementwiseLambdaLayer([outputUV, inputUV],
                                               fn=lambda x, y: BETA * x +
                                               (1 - BETA) * y,
                                               name='outUV_plus_UV')

        net_out = ConcatLayer([outY_plus_Y, outUV_plus_UV],
                              concat_dim=-1,
                              name='net_out')

        return outY_plus_Y, outUV_plus_UV, net_out
コード例 #29
0
def u_net_bn(x, is_train=False, reuse=False, n_out=1):
    _, nx, ny, nz = x.get_shape().as_list()
    gamma_init = tf.random_normal_initializer(1., 0.02)
    with tf.variable_scope("u_net", reuse=reuse):
        tl.layers.set_name_reuse(reuse)
        inputs = InputLayer(x, name='inputs')
        conv1 = Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, name='conv1_1')
        #        conv1 = DropoutLayer(conv1, keep=0.2, name='drop1')
        conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, name='conv1_2')
        pool1 = MaxPool2d(conv1, (2, 2), name='pool1')  #120
        conv2 = BatchNormLayer(pool1,
                               act=lambda x: tl.act.lrelu(x, 0.2),
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='bn1')

        conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, name='conv2_1')
        #        conv2 = DropoutLayer(conv2, keep=0.2, name='drop2')
        conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, name='conv2_2')
        pool2 = MaxPool2d(conv2, (2, 2), name='pool2')  #60
        conv3 = BatchNormLayer(pool2,
                               act=lambda x: tl.act.lrelu(x, 0.2),
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='bn2')

        conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, name='conv3_1')
        #        conv3 = DropoutLayer(conv3, keep=0.2, name='drop3')
        conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, name='conv3_2')
        pool3 = MaxPool2d(conv3, (2, 2), name='pool3')  #30
        conv4 = BatchNormLayer(pool3,
                               act=lambda x: tl.act.lrelu(x, 0.2),
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='bn3')

        conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, name='conv4_1')
        #        conv4 = DropoutLayer(conv4, keep=0.2, name='drop4')
        conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, name='conv4_2')
        pool4 = MaxPool2d(conv4, (2, 2), name='pool4')  #15
        conv5 = BatchNormLayer(pool4,
                               act=lambda x: tl.act.lrelu(x, 0.2),
                               is_train=is_train,
                               gamma_init=gamma_init,
                               name='bn4')

        conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, name='conv5_1')
        #        conv5 = DropoutLayer(conv5, keep=0.2, name='drop5')
        conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, name='conv5_2')

        up4 = DeConv2d(conv5,
                       512, (3, 3), (nx / 8, ny / 8), (2, 2),
                       name='deconv4')
        up4 = BatchNormLayer(up4,
                             act=lambda x: tl.act.lrelu(x, 0.2),
                             is_train=is_train,
                             gamma_init=gamma_init,
                             name='bn5')
        up4 = ConcatLayer([up4, conv4], 3, name='concat4')  #30

        conv4 = Conv2d(up4, 512, (3, 3), act=tf.nn.relu, name='uconv4_1')
        #        conv4 = DropoutLayer(conv4, keep=0.2, name='drop6')
        conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, name='uconv4_2')
        up3 = DeConv2d(conv4,
                       256, (3, 3), (nx / 4, ny / 4), (2, 2),
                       name='deconv3')
        up3 = BatchNormLayer(up3,
                             act=lambda x: tl.act.lrelu(x, 0.2),
                             is_train=is_train,
                             gamma_init=gamma_init,
                             name='bn6')
        up3 = ConcatLayer([up3, conv3], 3, name='concat3')  #60

        conv3 = Conv2d(up3, 256, (3, 3), act=tf.nn.relu, name='uconv3_1')
        #        conv3 = DropoutLayer(conv3, keep=0.2, name='drop7')
        conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, name='uconv3_2')
        up2 = DeConv2d(conv3,
                       128, (3, 3), (nx / 2, ny / 2), (2, 2),
                       name='deconv2')
        up2 = BatchNormLayer(up2,
                             act=lambda x: tl.act.lrelu(x, 0.2),
                             is_train=is_train,
                             gamma_init=gamma_init,
                             name='bn7')
        up2 = ConcatLayer([up2, conv2], 3, name='concat2')  #120

        conv2 = Conv2d(up2, 128, (3, 3), act=tf.nn.relu, name='uconv2_1')
        #        conv2 = DropoutLayer(conv2, keep=0.2, name='drop8')
        conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, name='uconv2_2')
        up1 = DeConv2d(conv2,
                       64, (3, 3), (nx / 1, ny / 1), (2, 2),
                       name='deconv1')
        up1 = BatchNormLayer(up1,
                             act=lambda x: tl.act.lrelu(x, 0.2),
                             is_train=is_train,
                             gamma_init=gamma_init,
                             name='bn8')
        up1 = ConcatLayer([up1, conv1], 3, name='concat1')  #240

        conv1 = Conv2d(up1, 64, (3, 3), act=tf.nn.relu, name='uconv1_1')
        #        conv1 = DropoutLayer(conv1, keep=0.2, name='drop9')
        conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, name='uconv1_2')
        #conv1 = Conv2d(conv1, n_out, (1, 1), act=tf.nn.sigmoid, name='uconv1')
        conv1 = Conv2d(conv1, n_out, (1, 1), act=tf.nn.sigmoid, name='uconv1')

    return conv1
コード例 #30
0
ファイル: utils.py プロジェクト: eglrp/U-Net-2
def u_net_2d_32_512_upsam(x, n_out=2):
    """
    https://github.com/jocicmarko/ultrasound-nerve-segmentation
    """
    from tensorlayer.layers import InputLayer, Conv2d, MaxPool2d, DeConv2d, ConcatLayer
    batch_size = int(x._shape[0])
    nx = int(x._shape[1])
    ny = int(x._shape[2])
    nz = int(x._shape[3])
    print(" * Input: size of image: %d %d %d" % (nx, ny, nz))
    ## define initializer
    w_init = tf.truncated_normal_initializer(stddev=0.01)
    b_init = tf.constant_initializer(value=0.0)
    inputs = InputLayer(x, name='inputs')
    # inputs = Input((1, img_rows, img_cols))
    conv1 = Conv2d(inputs, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1')
    # print(conv1.outputs) # (10, 240, 240, 32)
    # conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs)
    conv1 = Conv2d(conv1, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2')
    # print(conv1.outputs)    # (10, 240, 240, 32)
    # conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1)
    pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1')
    # pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    # print(pool1.outputs)    # (10, 120, 120, 32)
    # exit()
    conv2 = Conv2d(pool1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1')
    # conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1)
    conv2 = Conv2d(conv2, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2')
    # conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2)
    pool2 = MaxPool2d(conv2, (2,2), padding='SAME', name='pool2')
    # pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)

    conv3 = Conv2d(pool2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_1')
    # conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2)
    conv3 = Conv2d(conv3, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_2')
    # conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3)
    pool3 = MaxPool2d(conv3, (2, 2), padding='SAME', name='pool3')
    # pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    # print(pool3.outputs)   # (10, 30, 30, 64)

    conv4 = Conv2d(pool3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_1')
    # print(conv4.outputs)    # (10, 30, 30, 256)
    # conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3)
    conv4 = Conv2d(conv4, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_2')
    # print(conv4.outputs)    # (10, 30, 30, 256) != (10, 30, 30, 512)
    # conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4)
    pool4 = MaxPool2d(conv4, (2, 2), padding='SAME', name='pool4')
    # pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)

    conv5 = Conv2d(pool4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_1')
    # conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4)
    conv5 = Conv2d(conv5, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_2')
    # conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5)
    # print(conv5.outputs)    # (10, 15, 15, 512)
    print(" * After conv: %s" % conv5.outputs)
    # print(nx/8,ny/8) # 30 30
    up6 = UpSampling2dLayer(conv5, (2, 2), name='up6')
    # print(up6.outputs)  # (10, 30, 30, 512) == (10, 30, 30, 512)
    up6 = ConcatLayer([up6, conv4], concat_dim=3, name='concat6')
    # print(up6.outputs)  # (10, 30, 30, 768)
    # up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1)
    conv6 = Conv2d(up6, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_1')
    # conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6)
    conv6 = Conv2d(conv6, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_2')
    # conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6)

    up7 = UpSampling2dLayer(conv6, (2, 2), name='up7')
    up7 = ConcatLayer([up7, conv3] ,concat_dim=3, name='concat7')
    # up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1)
    conv7 = Conv2d(up7, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv7_1')
    # conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7)
    conv7 = Conv2d(conv7, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv7_2')
    # conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7)

    up8 = UpSampling2dLayer(conv7, (2, 2), name='up8')
    up8 = ConcatLayer([up8, conv2] ,concat_dim=3, name='concat8')
    # up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1)
    conv8 = Conv2d(up8, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv8_1')
    # conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8)
    conv8 = Conv2d(conv8, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv8_2')
    # conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8)

    up9 = UpSampling2dLayer(conv8, (2, 2), name='up9')
    up9 = ConcatLayer([up9, conv1] ,concat_dim=3, name='concat9')
    # up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1)
    conv9 = Conv2d(up9, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv9_1')
    # conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9)
    conv9 = Conv2d(conv9, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv9_2')
    # conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9)

    conv10 = Conv2d(conv9, n_out, (1, 1), act=None, name='conv9')
    # conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9)
    print(" * Output: %s" % conv10.outputs)
    outputs = tl.act.pixel_wise_softmax(conv10.outputs)
    return conv10, outputs