Пример #1
0
    def formNet(self, img_ph, ann_ph, base_filter_num=8):
        down_layer_list = {}
        curr_layer = img_ph

        # Down sampling
        for i in range(5):
            num_filter = base_filter_num * 2**i
            if i == 0:
                conv1 = layers.conv2d(
                    curr_layer,
                    W=[3, 3,
                       img_ph.get_shape().as_list()[-1], num_filter],
                    b=[num_filter])
            else:
                conv1 = layers.conv2d(curr_layer,
                                      W=[3, 3, num_filter // 2, num_filter],
                                      b=[num_filter])
            relu1 = tf.nn.relu(conv1)
            conv2 = layers.conv2d(relu1,
                                  W=[3, 3, num_filter, num_filter],
                                  b=[num_filter])
            down_layer_list[i] = tf.nn.relu(conv2)
            print('layer: ', i, '\tsize: ',
                  down_layer_list[i].get_shape().as_list())
            if i < 4:
                curr_layer = layers.max_pool(down_layer_list[i])
        curr_layer = down_layer_list[4]

        # Up sampling
        for i in range(3, -1, -1):
            num_filter = base_filter_num * 2**(i + 1)
            deconv_output_shape = tf.shape(down_layer_list[i])
            deconv1 = layers.conv2d_transpose(
                curr_layer,
                W=[3, 3, num_filter // 2, num_filter],
                b=[num_filter // 2],
                stride=2)
            concat1 = layers.crop_and_concat(tf.nn.relu(deconv1),
                                             down_layer_list[i])
            conv1 = layers.conv2d(concat1,
                                  W=[3, 3, num_filter, num_filter // 2],
                                  b=[num_filter // 2],
                                  strides=[1, 1, 1, 1])
            relu1 = tf.nn.relu(conv1)
            conv2 = layers.conv2d(relu1,
                                  W=[3, 3, num_filter // 2, num_filter // 2],
                                  b=[num_filter // 2],
                                  strides=[1, 1, 1, 1])
            relu2 = tf.nn.relu(conv2)
            curr_layer = relu2

        # Output
        conv = layers.conv2d(curr_layer, W=[1, 1, base_filter_num, 3], b=[3])
        relu = tf.nn.relu(conv)
        print('final relu: ', relu.get_shape().as_list())
        return tf.expand_dims(tf.argmax(relu, axis=-1), axis=-1), relu
Пример #2
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """
        Creates a new convolutional unet for the given parametrization.
        :param x: input tensor, shape [?,nx,ny,channels]
        :param keep_prob: dropout probability tensor
        :param channels: number of channels in the input image
        :param n_class: number of output labels
        :param layers: number of layers in the net
        :param features_root: number of features in the first layer
        :param filter_size: size of the convolution filter
        :param pool_size: size of the max pooling operation
        :param summaries: Flag if summaries should be created
        """
    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    with tf.name_scope("preprocessing"):
        nx = tf.shape(x)[1]
        ny = tf.shape(x)[2]
        x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
        in_node = x_image
        batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 128
    size = in_size
    # Down layers
    for layer in range(0, layers):
        with tf.name_scope("down_conv_{}".format(str(layer))):
            features = 2**layer * features_root  # output features number
            stddev = np.sqrt(2 / (filter_size**2 * features))
            # Set weights and bias of 2 convolution
            if layer == 0:
                w1 = weight_variable(
                    [filter_size, filter_size, channels, features],
                    stddev,
                    name="w1")
            else:
                w1 = weight_variable(
                    [filter_size, filter_size, features // 2, features],
                    stddev,
                    name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features, features],
                stddev,
                name="w2")
            b1 = bias_variable([features], name="b1")
            b2 = bias_variable([features], name="b2")
            # Build 2conv model
            conv1 = conv2d(in_node, w1, b1, keep_prob)
            tmp_h_conv = tf.nn.leaky_relu(conv1)
            conv2 = conv2d(tmp_h_conv, w2, b2, keep_prob)
            dw_h_convs[layer] = tf.nn.leaky_relu(conv2)
            # Record
            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))
            # Do pooling and calculate image processing size
            if layer < layers - 1:
                pools[layer] = max_pool(dw_h_convs[layer], pool_size)
                in_node = pools[layer]
                size /= 2

    in_node = dw_h_convs[layers - 1]
    # Up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_{}".format(str(layer))):
            features = 2**(layer + 1) * features_root
            stddev = np.sqrt(2 / (filter_size**2 * features))
            # Up convolution and skip connection    # shape[kernelx, kernely, out features, in features]
            wd = weight_variable_devonc(
                [pool_size, pool_size, features // 2, features],
                stddev,
                name="wd")
            bd = bias_variable([features // 2], name="bd")
            h_deconv = tf.nn.leaky_relu(deconv2d(in_node, wd, pool_size) + bd)
            h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)
            deconv[layer] = h_deconv_concat
            # Set weights and bias of 2 convolution
            w1 = weight_variable(
                [filter_size, filter_size, features, features // 2],
                stddev,
                name="w1")
            w2 = weight_variable(
                [filter_size, filter_size, features // 2, features // 2],
                stddev,
                name="w2")
            b1 = bias_variable([features // 2], name="b1")
            b2 = bias_variable([features // 2], name="b2")
            # Build 2conv model
            conv1 = conv2d(h_deconv_concat, w1, b1, keep_prob)
            h_conv = tf.nn.leaky_relu(conv1)
            conv2 = conv2d(h_conv, w2, b2, keep_prob)
            in_node = tf.nn.leaky_relu(conv2)
            up_h_convs[layer] = in_node
            # Record
            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv1, conv2))

            size *= 2
    # Output map
    with tf.name_scope("output_map"):
        weight = weight_variable([1, 1, features_root, n_class], stddev)
        bias = bias_variable([n_class], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_map = tf.nn.leaky_relu(conv)
        up_h_convs["out"] = output_map

    # blur map
    with tf.name_scope("output_blur"):
        weight = weight_variable([1, 1, features_root, 1], stddev)
        bias = bias_variable([1], name="bias")
        conv = conv2d(in_node, weight, bias, tf.constant(1.0))
        output_blur = tf.nn.leaky_relu(conv)
        up_h_convs["blur"] = output_blur

    if summaries:
        with tf.name_scope("summaries"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('summary_conv_%02d_01' % i,
                                 get_image_summary(c1))
                tf.summary.image('summary_conv_%02d_02' % i,
                                 get_image_summary(c2))

            for k in pools.keys():
                tf.summary.image('summary_pool_%02d' % k,
                                 get_image_summary(pools[k]))

            for k in deconv.keys():
                tf.summary.image('summary_deconv_concat_%02d' % k,
                                 get_image_summary(deconv[k]))

            for k in dw_h_convs.keys():
                tf.summary.histogram(
                    "dw_convolution_%02d" % k + '/activations', dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                     up_h_convs[k])
    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)
    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, output_blur, variables, int(in_size - size)
Пример #3
0
def create_conv_net(x,
                    keep_prob,
                    channels_in,
                    channels_out,
                    n_class,
                    layers=2,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """
    Creates a new convolutional unet for the given parametrization.
    
    :param x: input tensor, shape [?,nx,ny,channels_in]
    :param keep_prob: dropout probability tensor
    :param channels_in: number of channels in the input image
    :param channels_out: number of channels in the output image
    :param n_class: number of output labels
    :param layers: number of layers in the net
    :param features_root: number of features in the first layer
    :param filter_size: size of the convolution filter
    :param pool_size: size of the max pooling operation
    :param summaries: Flag if summaries should be created
    """

    logging.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))
    # Placeholder for the input image
    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels_in]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    in_size = 1000
    size = in_size
    # down layers
    for layer in range(0, layers):
        features = 2**layer * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, channels_in, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features // 2, features], stddev)

        w2 = weight_variable([filter_size, filter_size, features, features],
                             stddev)
        b1 = bias_variable([features])
        b2 = bias_variable([features])

        conv1 = conv2d(in_node, w1, keep_prob)
        tmp_h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(tmp_h_conv, w2, keep_prob)
        dw_h_convs[layer] = tf.nn.relu(conv2 + b2)

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size -= 4
        if layer < layers - 1:  #because after it's the end of the U
            pools[layer] = max_pool(dw_h_convs[layer], pool_size)
            in_node = pools[layer]
            size /= 2

    in_node = dw_h_convs[
        layers - 1]  #it's the last layer the bottom of the U but it's because
    #of the definition of range we have layers -1 and not layers

    # up layers
    for layer in range(layers - 2, -1,
                       -1):  #we don't begin at the bottom of the U
        features = 2**(layer + 1) * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))

        wd = weight_variable_devonc(
            [pool_size, pool_size, features // 2, features], stddev)
        bd = bias_variable([features // 2])  # weights and bias for upsampling
        #from a layer to another !!
        h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
        #recall that in_node is the last layer
        #bottom of the U

        h_deconv_concat = crop_and_concat(dw_h_convs[layer], h_deconv)  #layer
        #before the bottom of the  U
        deconv[layer] = h_deconv_concat

        w1 = weight_variable(
            [filter_size, filter_size, features, features // 2], stddev)
        w2 = weight_variable(
            [filter_size, filter_size, features // 2, features // 2], stddev)
        b1 = bias_variable([features // 2])
        b2 = bias_variable([features // 2])

        conv1 = conv2d(h_deconv_concat, w1, keep_prob)
        h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(h_conv, w2, keep_prob)
        in_node = tf.nn.relu(conv2 + b2)
        up_h_convs[layer] = in_node

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size *= 2
        size -= 4

    # Output Map
    weight = weight_variable([1, 1, features_root, n_class * channels_out],
                             stddev)
    bias = bias_variable([n_class * channels_out])
    conv = conv2d(in_node, weight, tf.constant(1.0))
    output_map = tf.nn.relu(conv + bias)
    up_h_convs["out"] = output_map

    if summaries:
        for i, (c1, c2) in enumerate(convs):
            tf.summary.image('summary_conv_%02d_01' % i, get_image_summary(c1))
            tf.summary.image('summary_conv_%02d_02' % i, get_image_summary(c2))

        for k in pools.keys():
            tf.summary.image('summary_pool_%02d' % k,
                             get_image_summary(pools[k]))

        for k in deconv.keys():
            tf.summary.image('summary_deconv_concat_%02d' % k,
                             get_image_summary(deconv[k]))

        for k in dw_h_convs.keys():
            tf.summary.histogram("dw_convolution_%02d" % k + '/activations',
                                 dw_h_convs[k])

        for k in up_h_convs.keys():
            tf.summary.histogram("up_convolution_%s" % k + '/activations',
                                 up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
Пример #4
0
def create_conv_net(x,
                    keep_prob,
                    channels,
                    n_class,
                    layers=3,
                    features_root=16,
                    filter_size=3,
                    pool_size=2,
                    summaries=True):
    """ Creates a new convolutional unet for the given parametrization.
    ### Params:
        * x - Tensor: shape [batch_size, height, width, channels]
        * keep_prob - float: dropout probability
        * channels - integer: number of channels in the input image
        * n_class - integer: number of output labels
        * layers - integer: number of layers in the net
        * features_root - integer: number of features in the first layer
        * filter_size - integer: size of the convolution filter
        * pool_size - integer: size of the max pooling operation
        * summaries - bool: Flag if summaries should be created
    """

    logger.info(
        "Layers {layers}, features {features}, filter size {filter_size}x{filter_size}, pool size: {pool_size}x{pool_size}"
        .format(layers=layers,
                features=features_root,
                filter_size=filter_size,
                pool_size=pool_size))

    nx = tf.shape(x)[1]
    ny = tf.shape(x)[2]
    x_image = tf.reshape(x, tf.stack([-1, nx, ny, channels]))
    in_node = x_image
    batch_size = tf.shape(x_image)[0]

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_convs = OrderedDict()
    up_convs = OrderedDict()

    # Record the size difference
    in_size = 1000
    size = in_size

    # Encode
    for layer in range(0, layers):
        features = 2**layer * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, channels, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features // 2, features], stddev)

        w2 = weight_variable([filter_size, filter_size, features, features],
                             stddev)
        b1 = bias_variable([features])
        b2 = bias_variable([features])

        conv1 = conv2d(in_node, w1, keep_prob)
        tmp_h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(tmp_h_conv, w2, keep_prob)
        dw_convs[layer] = tf.nn.relu(conv2 + b2)

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size -= 4
        if layer < layers - 1:
            pools[layer] = max_pool(dw_convs[layer], pool_size)
            in_node = pools[layer]
            size /= 2

    in_node = dw_convs[layers - 1]

    # Decode
    for layer in range(layers - 2, -1, -1):
        features = 2**(layer + 1) * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))

        wd = weight_variable_devonc(
            [pool_size, pool_size, features // 2, features], stddev)
        bd = bias_variable([features // 2])
        h_deconv = tf.nn.relu(deconv2d(in_node, wd, pool_size) + bd)
        h_deconv_concat = crop_and_concat(dw_convs[layer], h_deconv)
        deconv[layer] = h_deconv_concat

        w1 = weight_variable(
            [filter_size, filter_size, features, features // 2], stddev)
        w2 = weight_variable(
            [filter_size, filter_size, features // 2, features // 2], stddev)
        b1 = bias_variable([features // 2])
        b2 = bias_variable([features // 2])

        conv1 = conv2d(h_deconv_concat, w1, keep_prob)
        h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(h_conv, w2, keep_prob)
        in_node = tf.nn.relu(conv2 + b2)
        up_convs[layer] = in_node

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        size *= 2
        size -= 4

    # Output Map
    weight = weight_variable([1, 1, features_root, n_class], stddev)
    bias = bias_variable([n_class])
    conv = conv2d(in_node, weight, tf.constant(1.0))
    output_map = tf.nn.relu(conv + bias)
    up_convs["out"] = output_map

    # Summary the results of convolution and pooling
    if summaries:
        with tf.name_scope("summary_conv"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image('layer_%02d_01' % i, get_image_summary(c1))
                tf.summary.image('layer_%02d_02' % i, get_image_summary(c2))

        with tf.name_scope("summary_max_pooling"):
            for k in pools.keys():
                tf.summary.image('pool_%02d' % k, get_image_summary(pools[k]))

        with tf.name_scope("summary_deconv"):
            for k in deconv.keys():
                tf.summary.image('deconv_concat_%02d' % k,
                                 get_image_summary(deconv[k]))

        with tf.name_scope("down_convolution"):
            for k in dw_convs.keys():
                tf.summary.histogram("layer_%02d" % k + '/activations',
                                     dw_convs[k])

        with tf.name_scope("up_convolution"):
            for k in up_convs.keys():
                tf.summary.histogram("layer_%s" % k + '/activations',
                                     up_convs[k])

    # Record all the variables which can be used in L2 regularization
    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_map, variables, int(in_size - size)
Пример #5
0
def _create_conv_net(X,
                     image_z,
                     image_width,
                     image_height,
                     image_channel,
                     phase,
                     drop,
                     n_class=1):
    inputX = tf.reshape(
        X, [-1, image_z, image_width, image_height, image_channel])
    # Model
    # layer1->convolution
    layer0 = conv_bn_relu_drop(x=inputX,
                               kernal=(3, 3, 3, image_channel, 16),
                               phase=phase,
                               drop=drop,
                               scope='layer0')
    layer1 = conv_bn_relu_drop(x=layer0,
                               kernal=(3, 3, 3, 16, 16),
                               phase=phase,
                               drop=drop,
                               scope='layer1')
    layer1 = resnet_Add(x1=layer0, x2=layer1)
    # down sampling1
    down1 = down_sampling(x=layer1,
                          kernal=(3, 3, 3, 16, 32),
                          phase=phase,
                          drop=drop,
                          scope='down1')

    # layer2->convolution
    layer2 = conv_bn_relu_drop(x=down1,
                               kernal=(3, 3, 3, 32, 32),
                               phase=phase,
                               drop=drop,
                               scope='layer2_1')
    layer2 = conv_bn_relu_drop(x=layer2,
                               kernal=(3, 3, 3, 32, 32),
                               phase=phase,
                               drop=drop,
                               scope='layer2_2')
    layer2 = resnet_Add(x1=down1, x2=layer2)
    # down sampling2
    down2 = down_sampling(x=layer2,
                          kernal=(3, 3, 3, 32, 64),
                          phase=phase,
                          drop=drop,
                          scope='down2')

    # layer3->convolution
    layer3 = conv_bn_relu_drop(x=down2,
                               kernal=(3, 3, 3, 64, 64),
                               phase=phase,
                               drop=drop,
                               scope='layer3_1')
    layer3 = conv_bn_relu_drop(x=layer3,
                               kernal=(3, 3, 3, 64, 64),
                               phase=phase,
                               drop=drop,
                               scope='layer3_2')
    layer3 = conv_bn_relu_drop(x=layer3,
                               kernal=(3, 3, 3, 64, 64),
                               phase=phase,
                               drop=drop,
                               scope='layer3_3')
    layer3 = resnet_Add(x1=down2, x2=layer3)
    # down sampling3
    down3 = down_sampling(x=layer3,
                          kernal=(3, 3, 3, 64, 128),
                          phase=phase,
                          drop=drop,
                          scope='down3')

    # layer4->convolution
    layer4 = conv_bn_relu_drop(x=down3,
                               kernal=(3, 3, 3, 128, 128),
                               phase=phase,
                               drop=drop,
                               scope='layer4_1')
    layer4 = conv_bn_relu_drop(x=layer4,
                               kernal=(3, 3, 3, 128, 128),
                               phase=phase,
                               drop=drop,
                               scope='layer4_2')
    layer4 = conv_bn_relu_drop(x=layer4,
                               kernal=(3, 3, 3, 128, 128),
                               phase=phase,
                               drop=drop,
                               scope='layer4_3')
    layer4 = resnet_Add(x1=down3, x2=layer4)
    # down sampling4
    down4 = down_sampling(x=layer4,
                          kernal=(3, 3, 3, 128, 256),
                          phase=phase,
                          drop=drop,
                          scope='down4')

    # layer5->convolution
    layer5 = conv_bn_relu_drop(x=down4,
                               kernal=(3, 3, 3, 256, 256),
                               phase=phase,
                               drop=drop,
                               scope='layer5_1')
    layer5 = conv_bn_relu_drop(x=layer5,
                               kernal=(3, 3, 3, 256, 256),
                               phase=phase,
                               drop=drop,
                               scope='layer5_2')
    layer5 = conv_bn_relu_drop(x=layer5,
                               kernal=(3, 3, 3, 256, 256),
                               phase=phase,
                               drop=drop,
                               scope='layer5_3')
    layer5 = resnet_Add(x1=down4, x2=layer5)

    #deconvolution-1
    deconv1 = deconv_relu(x=layer5,
                          kernal=(3, 3, 3, 128, 256),
                          scope='deconv1')

    # layer6->convolution
    layer6 = crop_and_concat(layer4, deconv1)
    _, Z, H, W, _ = layer4.get_shape().as_list()
    layer6 = conv_bn_relu_drop(x=layer6,
                               kernal=(3, 3, 3, 256, 128),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer6_1')
    layer6 = conv_bn_relu_drop(x=layer6,
                               kernal=(3, 3, 3, 128, 128),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer6_2')
    layer6 = conv_bn_relu_drop(x=layer6,
                               kernal=(3, 3, 3, 128, 128),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer6_3')
    layer6 = resnet_Add(x1=deconv1, x2=layer6)

    # deconvolution-2
    deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 64, 128), scope='deconv2')

    # layer7->convolution
    layer7 = crop_and_concat(layer3, deconv2)
    _, Z, H, W, _ = layer3.get_shape().as_list()
    layer7 = conv_bn_relu_drop(x=layer7,
                               kernal=(3, 3, 3, 128, 64),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer7_1')
    layer7 = conv_bn_relu_drop(x=layer7,
                               kernal=(3, 3, 3, 64, 64),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer7_2')
    layer7 = resnet_Add(x1=deconv2, x2=layer7)
    # deconvolution-3
    deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 32, 64), scope='deconv3')

    # layer8->convolution
    layer8 = crop_and_concat(layer2, deconv3)
    _, Z, H, W, _ = layer2.get_shape().as_list()
    layer8 = conv_bn_relu_drop(x=layer8,
                               kernal=(3, 3, 3, 64, 32),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer10_1')
    layer8 = conv_bn_relu_drop(x=layer8,
                               kernal=(3, 3, 3, 32, 32),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer10_2')
    layer8 = conv_bn_relu_drop(x=layer8,
                               kernal=(3, 3, 3, 32, 32),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer10_3')
    layer8 = resnet_Add(x1=deconv3, x2=layer8)
    # deconvolution-4
    deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 16, 32), scope='deconv4')

    # layer9->convolution
    layer9 = crop_and_concat(layer1, deconv4)
    _, Z, H, W, _ = layer1.get_shape().as_list()
    layer9 = conv_bn_relu_drop(x=layer9,
                               kernal=(3, 3, 3, 32, 32),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer11_1')
    layer9 = conv_bn_relu_drop(x=layer9,
                               kernal=(3, 3, 3, 32, 32),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer11_2')
    layer9 = conv_bn_relu_drop(x=layer9,
                               kernal=(3, 3, 3, 32, 32),
                               image_z=Z,
                               height=H,
                               width=W,
                               phase=phase,
                               drop=drop,
                               scope='layer11_3')
    layer9 = resnet_Add(x1=deconv4, x2=layer9)

    #output
    output_map = conv_sigmod(x=layer9,
                             kernal=(1, 1, 1, 32, n_class),
                             scope='output')
    return output_map