Ejemplo n.º 1
0
def identity_block_td(input_tensor, kernel_size, filters, stage, block, trainable=True):
    # identity block time distributed

    nb_filter1, nb_filter2, nb_filter3 = filters
    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # 我们可以使用包装器TimeDistributed包装Dense,以产生针对各个时间步(默认输入tensor的第一维,也就是batch之后的那个维度是时间维度)信号的独立全连接。
    x = TimeDistributed(Convolution2D(nb_filter1, (1, 1), trainable=trainable, kernel_initializer='normal'),
                        name=conv_name_base + '2a')(input_tensor)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = TimeDistributed(
        Convolution2D(nb_filter2, (kernel_size, kernel_size), trainable=trainable, kernel_initializer='normal',
                      padding='same'), name=conv_name_base + '2b')(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = TimeDistributed(Convolution2D(nb_filter3, (1, 1), trainable=trainable, kernel_initializer='normal'),
                        name=conv_name_base + '2c')(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2c')(x)

    x = Add()([x, input_tensor])
    x = Activation('relu')(x)

    return x
Ejemplo n.º 2
0
def conv_block_td(input_tensor, kernel_size, filters, stage, block, input_shape, strides=(2, 2), trainable=True):
    # conv block time distributed

    nb_filter1, nb_filter2, nb_filter3 = filters
    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = TimeDistributed(
        Convolution2D(nb_filter1, (1, 1), strides=strides, trainable=trainable, kernel_initializer='normal'),
        input_shape=input_shape, name=conv_name_base + '2a')(input_tensor)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = TimeDistributed(Convolution2D(nb_filter2, (kernel_size, kernel_size), padding='same', trainable=trainable,
                                      kernel_initializer='normal'), name=conv_name_base + '2b')(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = TimeDistributed(Convolution2D(nb_filter3, (1, 1), kernel_initializer='normal'), name=conv_name_base + '2c',
                        trainable=trainable)(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '2c')(x)

    shortcut = TimeDistributed(
        Convolution2D(nb_filter3, (1, 1), strides=strides, trainable=trainable, kernel_initializer='normal'),
        name=conv_name_base + '1')(input_tensor)
    shortcut = TimeDistributed(FixedBatchNormalization(axis=bn_axis), name=bn_name_base + '1')(shortcut)

    x = Add()([x, shortcut])
    x = Activation('relu')(x)
    return x
Ejemplo n.º 3
0
def identity_block(input_tensor, kernel_size, filters, stage, block, trainable=True):
    nb_filter1, nb_filter2, nb_filter3 = filters

    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Convolution2D(nb_filter1, (1, 1), name=conv_name_base + '2a', trainable=trainable)(input_tensor)
    x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Convolution2D(nb_filter2, (kernel_size, kernel_size), padding='same', name=conv_name_base + '2b',
                      trainable=trainable)(x)
    x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Convolution2D(nb_filter3, (1, 1), name=conv_name_base + '2c', trainable=trainable)(x)
    x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    x = Add()([x, input_tensor])
    x = Activation('relu')(x)
    return x
Ejemplo n.º 4
0
def conv_block(input_tensor,
               kernel_size,
               filters,
               stage,
               block,
               strides=(2, 2),
               trainable=True):
    nb_filter1, nb_filter2, nb_filter3 = filters
    # -if K.image_dim_ordering() == 'tf':
    if K.image_data_format() == 'channels_last':
        # -bn_axis = 3
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = Convolution2D(nb_filter1, (1, 1),
                      strides=strides,
                      name=conv_name_base + '2a',
                      trainable=trainable)(input_tensor)
    x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = Convolution2D(nb_filter2, (kernel_size, kernel_size),
                      padding='same',
                      name=conv_name_base + '2b',
                      trainable=trainable)(x)
    x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = Convolution2D(nb_filter3, (1, 1),
                      name=conv_name_base + '2c',
                      trainable=trainable)(x)
    x = FixedBatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)

    shortcut = Convolution2D(nb_filter3, (1, 1),
                             strides=strides,
                             name=conv_name_base + '1',
                             trainable=trainable)(input_tensor)
    shortcut = FixedBatchNormalization(axis=bn_axis,
                                       name=bn_name_base + '1')(shortcut)

    x = Add()([x, shortcut])
    x = Activation('relu')(x)

    return x
Ejemplo n.º 5
0
def nn_base(input_tensor=None, trainable=False, channels = 3):
    # Determine proper input shape
    if K.image_dim_ordering() == 'th':
        input_shape = (channels, None, None)
    else:
        input_shape = (None, None, channels)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1

    x = ZeroPadding2D((3, 3), name='conv1_pad')(img_input)
    if channels == 3:
        x = Convolution2D(64, (7, 7), strides=(2, 2), name='conv1', trainable=trainable)(x)
        x = FixedBatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    else:
        x = Convolution2D(64, (7, 7), strides=(2, 2), name='conv1_channel_'+str(channels), trainable=trainable)(x)
        x = FixedBatchNormalization(axis=bn_axis, name='bn_conv1_channel_'+str(channels))(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=trainable)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=trainable)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=trainable)

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=trainable)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=trainable)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=trainable)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=trainable)

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=trainable)

    return x
Ejemplo n.º 6
0
def identity_block_td(input_tensor,
                      kernel_size,
                      filters,
                      stage,
                      block,
                      trainable=True):
    # Identity block time distributed
    nb_filter1, nb_filter2, nb_filter3 = filters
    # -if K.image_dim_ordering() == 'tf':
    if K.image_data_format() == 'channels_last':
        # -bn_axis = 3
        bn_axis = -1
    else:
        bn_axis = 1

    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    x = TimeDistributed(Convolution2D(nb_filter1, (1, 1),
                                      trainable=trainable,
                                      kernel_initializer='normal'),
                        name=conv_name_base + '2a')(input_tensor)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis),
                        name=bn_name_base + '2a')(x)
    x = Activation('relu')(x)

    x = TimeDistributed(Convolution2D(nb_filter2, (kernel_size, kernel_size),
                                      trainable=trainable,
                                      kernel_initializer='normal',
                                      padding='same'),
                        name=conv_name_base + '2b')(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis),
                        name=bn_name_base + '2b')(x)
    x = Activation('relu')(x)

    x = TimeDistributed(Convolution2D(nb_filter3, (1, 1),
                                      trainable=trainable,
                                      kernel_initializer='normal'),
                        name=conv_name_base + '2c')(x)
    x = TimeDistributed(FixedBatchNormalization(axis=bn_axis),
                        name=bn_name_base + '2c')(x)

    x = Add()([x, input_tensor])
    x = Activation('relu')(x)

    return x
Ejemplo n.º 7
0
def nn_base(input_tensor=None, trainable=False):
    # Determine proper input shape
    if K.image_dim_ordering() == 'th':
        input_shape = (3, None, None)
    else:
        input_shape = (None, None, 3)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1

    x = ZeroPadding2D((3, 3))(img_input)

    # print('++ zero padding: ', x)

    x = Convolution2D(64, (7, 7), strides=(2, 2), name='conv1', trainable=trainable)(x)

    # print('+ conv 2d: ', x)
    print('NOTE: this code only support to keras 2.0.3, newest version this line will got errors. see trace back.')
    x = FixedBatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=trainable)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=trainable)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=trainable)

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=trainable)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=trainable)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=trainable)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=trainable)

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=trainable)

    return x
Ejemplo n.º 8
0
def nn_base(input_tensor=None, trainable=False):
    # Determine proper input shape
    if K.image_dim_ordering() == 'th':
        input_shape = (3, None, None)
    else:
        input_shape = (None, None, 3)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1

    # ZeroPadding2D层的功能:对2D输入(如图片)的边界填充0,以控制卷积以后特征图的大小,(3,3)表示在图片边缘填充3个0
    x = ZeroPadding2D((3, 3))(img_input)

    x = Convolution2D(64, (7, 7), strides=(2, 2), name='conv1', trainable=trainable)(x)
    x = FixedBatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=trainable)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=trainable)
    x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=trainable)

    x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=trainable)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=trainable)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=trainable)
    x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=trainable)

    x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=trainable)
    x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=trainable)

    return x
Ejemplo n.º 9
0
def nn_base(input_tensor=None, trainable=False):
    # Determine proper input shape
    if K.image_dim_ordering() == 'th':
        input_shape = (3, None, None)
    else:
        input_shape = (None, None, 3)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1

    # 自定义输入,为了显示
    # img_numpy = np.full((1,38,166,3),2)
    # x = tf.convert_to_tensor(img_numpy,dtype=tf.float32)
    x = ZeroPadding2D((3, 3))(img_input)

    # stage1
    # conv1 7*7,64,stride 2
    conv1 = Convolution2D(64, (6, 6),
                          strides=(2, 2),
                          name='conv1',
                          trainable=trainable)(x)
    bn_conv1 = FixedBatchNormalization(axis=bn_axis, name='bn_conv1')(conv1)
    conv1_relu = Activation('relu')(bn_conv1)
    conv1_pool = MaxPooling2D((3, 3), strides=(2, 2))(conv1_relu)
    print("stage1:{}".format(conv1_pool.get_shape()))  # (1, 8, 40, 64)
    # print("stage1:{}".format(x.shape))
    # stage2
    # conv2_x [ [1*1,64], [3*3,64], [1*1,256] ]*3
    conv2_a = conv_block(conv1_pool,
                         3, [64, 64, 256],
                         stage=2,
                         block='a',
                         strides=(1, 1),
                         trainable=trainable)
    conv2_b = identity_block(conv2_a,
                             3, [64, 64, 256],
                             stage=2,
                             block='b',
                             trainable=trainable)
    conv2_c = identity_block(conv2_b,
                             3, [64, 64, 256],
                             stage=2,
                             block='c',
                             trainable=trainable)
    # 最后相加的
    x2 = UpSampling2D((2, 2))(conv2_c)
    # conv2_output (1, 16, 80, 1024)
    x2_output = Conv2D(filters=1024,
                       kernel_size=3,
                       strides=(1, 1),
                       padding='same')(x2)
    # x2_output = conv_block(x2, 3, [64, 64, 1024], stage=2, block='diy', strides=(1, 1), trainable=trainable)

    # tmp_conv2_1024 = identity_block(conv2_b, 3, [128, 128, 1024], stage=2, block='c', trainable=trainable)
    # ValueError: Operands could not be broadcast together with shapes (8, 40, 1024) (8, 40, 256) 是因为shortcut要同一尺度相加
    # x2 = UpSampling2D((2, 2))(conv2_c)
    print("stage2:{}".format(x2.get_shape()))
    print("x2_output:{}".format(x2_output.shape))
    # stage3
    # conv3_x [ [1*1,128], [3*3,128], [1*1,512] ]*4
    conv3_a = conv_block(x2,
                         3, [128, 128, 512],
                         stage=3,
                         block='a',
                         trainable=trainable)
    conv3_b = identity_block(conv3_a,
                             3, [128, 128, 512],
                             stage=3,
                             block='b',
                             trainable=trainable)
    conv3_c = identity_block(conv3_b,
                             3, [128, 128, 512],
                             stage=3,
                             block='c',
                             trainable=trainable)
    conv3_d = identity_block(conv3_c,
                             3, [128, 128, 512],
                             stage=3,
                             block='d',
                             trainable=trainable)
    x3 = UpSampling2D((2, 2))(conv3_d)
    print("stage3:{}".format(x3.get_shape()))
    # stage4
    # conv4_x [ [1*1,256], [3*3,256], [1*1,1024] ]*6
    conv4_a = conv_block(x3,
                         3, [256, 256, 1024],
                         stage=4,
                         block='a',
                         trainable=trainable)
    conv4_b = identity_block(conv4_a,
                             3, [256, 256, 1024],
                             stage=4,
                             block='b',
                             trainable=trainable)
    conv4_c = identity_block(conv4_b,
                             3, [256, 256, 1024],
                             stage=4,
                             block='c',
                             trainable=trainable)
    conv4_d = identity_block(conv4_c,
                             3, [256, 256, 1024],
                             stage=4,
                             block='d',
                             trainable=trainable)
    conv4_e = identity_block(conv4_d,
                             3, [256, 256, 1024],
                             stage=4,
                             block='e',
                             trainable=trainable)
    conv4_f = identity_block(conv4_e,
                             3, [256, 256, 1024],
                             stage=4,
                             block='f',
                             trainable=trainable)
    x4 = UpSampling2D((2, 2))(conv4_f)
    x = x4 + x2_output

    print("stage4:{}".format(x4.get_shape()))
    print("return x_shape:{}".format(x.shape))
    return x
def nn_base(input_tensor=None, trainable=False):
    # Determine proper input shape
    if K.image_dim_ordering() == 'th':
        input_shape = (3, None, None)  # RGB通道在前
    else:
        input_shape = (None, None, 3)  # RGB通道在尾

    if input_tensor is None:
        img_input = Input(shape=input_shape)  # 输入的shape大小不做限定
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    if K.image_dim_ordering() == 'tf':
        bn_axis = 3
    else:
        bn_axis = 1

    # ZeroPadding2D层的功能:对2D输入(如图片)的边界填充0,以控制卷积以后特征图的大小,(3,3)表示在图片边缘填充3个0
    x = ZeroPadding2D((3, 3))(img_input)

    x = Convolution2D(64, (7, 7),
                      strides=(2, 2),
                      name='conv1',
                      trainable=trainable)(x)
    print("Convolution2D shape:", x.shape)
    x = FixedBatchNormalization(axis=bn_axis, name='bn_conv1')(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)
    print("MaxPooling2D shape:", x.shape)

    x = conv_block(x,
                   3, [64, 64, 256],
                   stage=2,
                   block='a',
                   strides=(1, 1),
                   trainable=trainable)  #conv_block的默认stride是2
    print("x stage2 block a shape:", x.shape)
    x = identity_block(x,
                       3, [64, 64, 256],
                       stage=2,
                       block='b',
                       trainable=trainable)
    print("x stage2 block b shape:", x.shape)
    x = identity_block(x,
                       3, [64, 64, 256],
                       stage=2,
                       block='c',
                       trainable=trainable)
    print("x stage2 block c shape:", x.shape)

    x = conv_block(x,
                   3, [128, 128, 512],
                   stage=3,
                   block='a',
                   trainable=trainable)  #conv_block的默认stride是2
    print("x stage3 block a shape:", x.shape)
    x = identity_block(x,
                       3, [128, 128, 512],
                       stage=3,
                       block='b',
                       trainable=trainable)  # identity_block不改变输入输出的shape
    print("x stage3 block b shape:", x.shape)
    x = identity_block(x,
                       3, [128, 128, 512],
                       stage=3,
                       block='c',
                       trainable=trainable)
    print("x stage3 block c shape:", x.shape)
    xstage3 = identity_block(x,
                             3, [128, 128, 512],
                             stage=3,
                             block='d',
                             trainable=trainable)
    print("x stage3 block d shape:", xstage3.shape)

    x = conv_block(x,
                   3, [256, 256, 1024],
                   stage=4,
                   block='a',
                   trainable=trainable)
    print("x stage4 block a shape:", x.shape)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='b',
                       trainable=trainable)
    print("x stage4 block b shape:", x.shape)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='c',
                       trainable=trainable)
    print("x stage4 block c shape:", x.shape)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='d',
                       trainable=trainable)
    print("x stage4 block d shape:", x.shape)
    x = identity_block(x,
                       3, [256, 256, 1024],
                       stage=4,
                       block='e',
                       trainable=trainable)
    print("x stage4 block e shape:", x.shape)
    xstage4 = identity_block(x,
                             3, [256, 256, 1024],
                             stage=4,
                             block='f',
                             trainable=trainable)
    print("x stage4 block f shape:", xstage4.shape)
    #identity_block(input_tensor, kernel_size, filters, stage, block, trainable=True)

    # resnet的结构解读参考https://blog.csdn.net/lanran2/article/details/79057994
    # basenet只用到了resnet50的conv1到conv4_x,剪切掉了conv5_x

    # 合并两个卷积层
    stage3_lateral = Convolution2D(1024, (1, 1),
                                   strides=(1, 1),
                                   name='stage3_lateral',
                                   trainable=trainable)(xstage3)
    up4 = UpSampling2D(size=(2, 2))(xstage4)
    print("up4 shape:", up4.shape)
    print("up4.get_shape()=", up4.get_shape())

    #up4 = Cropping2D(cropping=((up4.get_shape()[1]-stage3_lateral.get_shape()[1], 0), (up4.get_shape()[2]-stage3_lateral.get_shape()[2], 0)), data_format=None)(up4)

    # 目前还缺一个crop层,导致不是所有情况下stage3_lateral,up4的shape都一样,从而不能Add
    share_layer = Add()([stage3_lateral, up4])
    print("share_layer shape:", share_layer.shape)

    return share_layer, xstage3, xstage4