Exemplo n.º 1
0
def new_Unet(model_flag='vUnet'):
    MARGIN = 30
    model = VGG16(include_top=False,
                  input_shape=(L_SIZE, L_SIZE, 3),
                  weights='imagenet')
    conv5 = model.get_layer('block5_conv3').output
    conv4 = model.get_layer('block4_conv3').output
    conv3 = model.get_layer('block3_conv3').output
    conv2 = model.get_layer('block2_conv2').output
    conv1 = model.get_layer('block1_conv2').output

    conv6 = decoder_block(512, conv5, conv4)
    conv7 = decoder_block(256, conv6, conv3)
    conv8 = decoder_block(128, conv7, conv2)

    conv91 = decoder_block(64, conv8, conv1)
    conv92 = decoder_block(64, conv8, conv1)

    out1 = layers.Cropping2D(cropping=((MARGIN, MARGIN),
                                       (MARGIN, MARGIN)))(conv91)  # for mask
    out1 = layers.Conv2D(1, (1, 1), activation='sigmoid', name='out1')(out1)

    if model_flag == 'vUnet':
        out2 = layers.Cropping2D(cropping=((MARGIN, MARGIN),
                                           (MARGIN,
                                            MARGIN)))(conv92)  # for edge
        out2 = layers.Conv2D(1, (1, 1), activation='sigmoid',
                             name='out2')(out2)

        model = tf.keras.Model(inputs=model.inputs, outputs=[out1, out2])
    else:
        model = tf.keras.Model(inputs=model.inputs, outputs=out1)

    return model
Exemplo n.º 2
0
 def decoder_block(input_tensor,
                   concat_tensor,
                   num_filters,
                   up_scale=2,
                   cropping=None):
     decoder = layers.Conv2DTranspose(num_filters, (up_scale, up_scale),
                                      strides=(up_scale, up_scale),
                                      padding='same')(input_tensor)
     decoder = layers.BatchNormalization()(decoder)
     decoder = layers.Activation("relu")(decoder)
     decoder = layers.Conv2D(num_filters * 2, (3, 3),
                             padding='same')(decoder)
     decoder = layers.BatchNormalization()(decoder)
     decoder = layers.Activation("relu")(decoder)
     decoder = layers.Conv2D(num_filters, (3, 3), padding='same')(decoder)
     if cropping is not None:
         decoder = layers.Cropping2D(
             ((cropping, 0), (cropping, 0)))(decoder)
     decoder = layers.concatenate([decoder, concat_tensor], axis=-1)
     decoder = layers.BatchNormalization()(decoder)
     decoder = layers.Activation("relu")(decoder)
     return decoder
Exemplo n.º 3
0
def _adjust_block(p, ip, filters, block_id=None):
    """Adjusts the input `previous path` to match the shape of the `input`.

  Used in situations where the output number of filters needs to be changed.

  Arguments:
      p: Input tensor which needs to be modified
      ip: Input tensor whose shape needs to be matched
      filters: Number of output filters to be matched
      block_id: String block_id

  Returns:
      Adjusted Keras tensor
  """
    channel_dim = 1 if backend.image_data_format() == 'channels_first' else -1
    img_dim = 2 if backend.image_data_format() == 'channels_first' else -2

    ip_shape = backend.int_shape(ip)

    if p is not None:
        p_shape = backend.int_shape(p)

    with backend.name_scope('adjust_block'):
        if p is None:
            p = ip

        elif p_shape[img_dim] != ip_shape[img_dim]:
            with backend.name_scope('adjust_reduction_block_%s' % block_id):
                p = layers.Activation('relu',
                                      name='adjust_relu_1_%s' % block_id)(p)
                p1 = layers.AveragePooling2D(
                    (1, 1),
                    strides=(2, 2),
                    padding='valid',
                    name='adjust_avg_pool_1_%s' % block_id)(p)
                p1 = layers.Conv2D(filters // 2, (1, 1),
                                   padding='same',
                                   use_bias=False,
                                   name='adjust_conv_1_%s' % block_id,
                                   kernel_initializer='he_normal')(p1)

                p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
                p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
                p2 = layers.AveragePooling2D(
                    (1, 1),
                    strides=(2, 2),
                    padding='valid',
                    name='adjust_avg_pool_2_%s' % block_id)(p2)
                p2 = layers.Conv2D(filters // 2, (1, 1),
                                   padding='same',
                                   use_bias=False,
                                   name='adjust_conv_2_%s' % block_id,
                                   kernel_initializer='he_normal')(p2)

                p = layers.concatenate([p1, p2], axis=channel_dim)
                p = layers.BatchNormalization(axis=channel_dim,
                                              momentum=0.9997,
                                              epsilon=1e-3,
                                              name='adjust_bn_%s' %
                                              block_id)(p)

        elif p_shape[channel_dim] != filters:
            with backend.name_scope('adjust_projection_block_%s' % block_id):
                p = layers.Activation('relu')(p)
                p = layers.Conv2D(filters, (1, 1),
                                  strides=(1, 1),
                                  padding='same',
                                  name='adjust_conv_projection_%s' % block_id,
                                  use_bias=False,
                                  kernel_initializer='he_normal')(p)
                p = layers.BatchNormalization(axis=channel_dim,
                                              momentum=0.9997,
                                              epsilon=1e-3,
                                              name='adjust_bn_%s' %
                                              block_id)(p)
    return p
Exemplo n.º 4
0
def _adjust_block(p, ip, filters, block_id=None):
    channel_dim = -1
    img_dim = -2

    ip_shape = backend.int_shape(ip)

    if p is not None:
        p_shape = backend.int_shape(p)

    with backend.name_scope('adjust_block'):
        if p is None:
            p = ip

        elif p_shape[img_dim] != ip_shape[img_dim]:
            with backend.name_scope('adjust_reduction_block_%s' % block_id):
                p = layers.Activation('relu',
                                      name='adjust_relu_1_%s' % block_id)(p)
                p1 = layers.AveragePooling2D(
                    (1, 1),
                    strides=(2, 2),
                    padding='valid',
                    name='adjust_avg_pool_1_%s' % block_id)(p)
                p1 = layers.Conv2D(filters // 2, (1, 1),
                                   padding='same',
                                   use_bias=False,
                                   name='adjust_conv_1_%s' % block_id,
                                   kernel_initializer='he_normal')(p1)

                p2 = layers.ZeroPadding2D(padding=((0, 1), (0, 1)))(p)
                p2 = layers.Cropping2D(cropping=((1, 0), (1, 0)))(p2)
                p2 = layers.AveragePooling2D(
                    (1, 1),
                    strides=(2, 2),
                    padding='valid',
                    name='adjust_avg_pool_2_%s' % block_id)(p2)
                p2 = layers.Conv2D(filters // 2, (1, 1),
                                   padding='same',
                                   use_bias=False,
                                   name='adjust_conv_2_%s' % block_id,
                                   kernel_initializer='he_normal')(p2)

                p = layers.concatenate([p1, p2], axis=channel_dim)
                p = layers.BatchNormalization(axis=channel_dim,
                                              momentum=0.9997,
                                              epsilon=1e-3,
                                              name='adjust_bn_%s' %
                                              block_id)(p)

        elif p_shape[channel_dim] != filters:
            with backend.name_scope('adjust_projection_block_%s' % block_id):
                p = layers.Activation('relu')(p)
                p = layers.Conv2D(filters, (1, 1),
                                  strides=(1, 1),
                                  padding='same',
                                  name='adjust_conv_projection_%s' % block_id,
                                  use_bias=False,
                                  kernel_initializer='he_normal')(p)
                p = layers.BatchNormalization(axis=channel_dim,
                                              momentum=0.9997,
                                              epsilon=1e-3,
                                              name='adjust_bn_%s' %
                                              block_id)(p)
    return p