示例#1
0
def CreateModelCNNV2(num_classes=1, drop=0.25, isBN=True, ad_batch_size=1):
    model = Sequential()
    model.add(
        SeparableConv2D(filters=14,
                        kernel_size=(7, 7),
                        padding='same',
                        strides=(2, 2),
                        input_shape=(ad_batch_size, 7, 1)))
    if isBN:
        model.add(BatchNormalization())
    model.add(ReLU(max_value=8))

    model.add(
        SeparableConv2D(filters=28,
                        kernel_size=(3, 3),
                        padding='same',
                        strides=(2, 2)))
    if isBN:
        model.add(BatchNormalization())
    model.add(ReLU(max_value=8))

    model.add(
        SeparableConv2D(filters=56,
                        kernel_size=(3, 3),
                        padding='same',
                        strides=(2, 2)))
    if isBN:
        model.add(BatchNormalization())
    model.add(ReLU(max_value=8))

    model.add(
        SeparableConv2D(filters=56,
                        kernel_size=(3, 3),
                        padding='same',
                        strides=(1, 1)))
    if isBN:
        model.add(BatchNormalization())
    model.add(ReLU(max_value=8))

    # -----------------
    model.add(Flatten())
    # -----------
    model.add(Dense(56))
    if isBN:
        model.add(BatchNormalization())
    model.add(Activation('tanh'))
    if drop > 0:
        model.add(Dropout(drop))

    model.add(Dense(28))
    if isBN:
        model.add(BatchNormalization())
    model.add(Activation('tanh'))
    if drop > 0:
        model.add(Dropout(drop))

    model.add(Dense(28))
    if isBN:
        model.add(BatchNormalization())
    model.add(Activation('tanh'))
    if drop > 0:
        model.add(Dropout(drop))

    model.add(Dense(num_classes))
    if isBN:
        model.add(BatchNormalization())

    model.summary()
    sModelName = 'smartcar_ad_CNNV2_drop_0%d_adSize_%d' % (int(
        drop * 100), ad_batch_size)
    if not isBN:
        sModelName += '_nobn'
    return sModelName, model
def u_net(Base,img_height, img_width, img_ch, batchNormalization, SDRate, spatial_dropout, final_neurons, final_afun, weighted):
    
    inputs = Input((img_height, img_width, img_ch))
    #in case we use weight maps we need two inputs and two outputs
    if weighted:
        inputs2 = Input((img_height, img_width, img_ch))
    ## Contraction
    # Conv Block 1
    
    c1 = Conv2D(filters=Base,
                     kernel_size=(3,3), strides=(1,1), padding='same')(inputs)
    
     #Add batch Normalization
    if batchNormalization:
        c1 = BatchNormalization(axis=-1)(c1)
    
    a1 = Activation('relu')(c1)
    
    #Add spatial Dropout
    if spatial_dropout:
        a1 = SpatialDropout2D(SDRate)(a1)
        
    c2 = Conv2D(filters=Base,
                     kernel_size=(3,3), strides=(1,1), padding='same')(a1)
    
     #Add batch Normalization
    if batchNormalization:
        c2 = BatchNormalization(axis=-1)(c2)
    
    a2 = Activation('relu')(c2)
    
    #Add spatial Dropout
    if spatial_dropout:
        a2 = SpatialDropout2D(SDRate)(a2)
        
    m1 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(a2)
        
    # Conv Block 2
    c3 = Conv2D(filters=Base*2,
                     kernel_size=(3,3), strides=(1,1), padding='same')(m1)
    
     #Add batch Normalization
    if batchNormalization:
        c3 = BatchNormalization(axis=-1)(c3)
    
    a3 = Activation('relu')(c3)
    
    #Add spatial Dropout
    if spatial_dropout:
        a3 = SpatialDropout2D(SDRate)(a3)
    
    c4 = Conv2D(filters=Base*2,
                     kernel_size=(3,3), strides=(1,1), padding='same')(a3)
    
     #Add batch Normalization
    if batchNormalization:
        c4 = BatchNormalization(axis=-1)(c4)
    
    a4 = Activation('relu')(c4)
    
    #Add spatial Dropout
    if spatial_dropout:
        a4 = SpatialDropout2D(SDRate)(a4)
    
    m2 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(a4)
    
    # Conv Block 3
    c5 = Conv2D(filters=Base*4, 
                     kernel_size=(3,3), strides=(1,1), padding='same')(m2)
    
     #Add batch Normalization
    if batchNormalization:
        c5 = BatchNormalization(axis=-1)(c5)
    
    a5 = Activation('relu')(c5)
    
    #Add spatial Dropout
    if spatial_dropout:
        a5 = SpatialDropout2D(SDRate)(a5)
        
    c6 = Conv2D(filters=Base*4,
                     kernel_size=(3,3), strides=(1,1), padding='same')(a5)
    
     #Add batch Normalization
    if batchNormalization:
          c6 = BatchNormalization(axis=-1)(c6)
    
    a6 = Activation('relu')(c6)
    
    #Add spatial Dropout
    if spatial_dropout:
        a6 = SpatialDropout2D(SDRate)(a6)
        
    m3 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(a6)
    
    # Conv Block 4
    c7 = Conv2D(filters=Base*8, 
                     kernel_size=(3,3), strides=(1,1), padding='same')(m3)
    
     #Add batch Normalization
    if batchNormalization:
        c7 = BatchNormalization(axis=-1)(c7)
    
    a7 = Activation('relu')(c7)
    
    #Add spatial Dropout
    if spatial_dropout:
        a7 = SpatialDropout2D(SDRate)(a7)
        
    c8 = Conv2D(filters=Base*8,
                     kernel_size=(3,3), strides=(1,1), padding='same')(a7)
    
     #Add batch Normalization
    if batchNormalization:
        c8 = BatchNormalization(axis=-1)(c8)
    
    a8 = Activation('relu')(c8)
    
    #Add spatial Dropout
    if spatial_dropout:
        a8 = SpatialDropout2D(SDRate)(a8)
        
    m4 = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(a8)
    
    ##Bottleneck
    # Conv Layer
    c9 = Conv2D(filters=Base*16, 
                     kernel_size=(3,3), strides=(1,1), padding='same')(m4)
    
     #Add batch Normalization
    if batchNormalization:
        c9 = BatchNormalization(axis=-1)(c9)
    
    a9 = Activation('relu')(c9)
    
    #Add spatial Dropout
    if spatial_dropout:
        a9 = SpatialDropout2D(SDRate)(a9)
        
    ##Expansion
    #Conv Block 1
    c10 = Conv2DTranspose(filters=Base*8,
                     kernel_size=(2,2), strides=(2,2), padding='same')(a9)
    c10 = concatenate([a8,c10])
    
    c11 = Conv2D(filters=Base*8,
                     kernel_size=(3,3), strides=(1,1), padding='same')(c10)
    
     #Add batch Normalization
    if batchNormalization:
        c11 = BatchNormalization(axis=-1)(c11)
    
    a10 = Activation('relu')(c11)
    
    #Add spatial Dropout
    if spatial_dropout:
        a10 = SpatialDropout2D(SDRate)(a10)
        
    c12 = Conv2D(filters=Base*8, 
                     kernel_size=(3,3), strides=(1,1), padding='same')(a10)
    
     #Add batch Normalization
    if batchNormalization:
        c12 = BatchNormalization(axis=-1)(c12)
    
    a11 = Activation('relu')(c12)
    
    #Add spatial Dropout
    if spatial_dropout:
        a11 = SpatialDropout2D(SDRate)(a11)
        
    
    #Conv Block 2
    c13 = Conv2DTranspose(filters=Base*4,
                     kernel_size=(2,2), strides=(2,2), padding='same')(a11)
    c13 = concatenate([a6,c13])
    
    c14 = Conv2D(filters=Base*4,
                     kernel_size=(3,3), strides=(1,1), padding='same')(c13)
    
     #Add batch Normalization
    if batchNormalization:
        c14 = BatchNormalization(axis=-1)(c14)
    
    a12 = Activation('relu')(c14)
    
    #Add spatial Dropout
    if spatial_dropout:
        a12 = SpatialDropout2D(SDRate)(a12)
        
    c15 = Conv2D(filters=Base*4, 
                     kernel_size=(3,3), strides=(1,1), padding='same')(a12)
    
     #Add batch Normalization
    if batchNormalization:
        c15 = BatchNormalization(axis=-1)(c15)
    
    a13 = Activation('relu')(c15)
    
    #Add spatial Dropout
    if spatial_dropout:
        a13 = SpatialDropout2D(SDRate)(a13)
        
    
    #Conv Block 3
    c16 = Conv2DTranspose(filters=Base*2,
                     kernel_size=(2,2), strides=(2,2), padding='same')(a13)
    c16 = concatenate([a4,c16])
    
    c17 = Conv2D(filters=Base*2, 
                     kernel_size=(3,3), strides=(1,1), padding='same')(c16)
    
     #Add batch Normalization
    if batchNormalization:
        c17 = BatchNormalization(axis=-1)(c17)
    
    a14 = Activation('relu')(c17)
    
    #Add spatial Dropout
    if spatial_dropout:
        a14 = SpatialDropout2D(SDRate)(a14)
        
    c18 = Conv2D(filters=Base*2,
                     kernel_size=(3,3), strides=(1,1), padding='same')(a14)
    
     #Add batch Normalization
    if batchNormalization:
        c18 = BatchNormalization(axis=-1)(c18)
    
    a15 = Activation('relu')(c18)
    
    #Add spatial Dropout
    if spatial_dropout:
        a15 = SpatialDropout2D(SDRate)(a15)
        
    
    #Conv Block 4
    c19 = Conv2DTranspose(filters=Base,
                     kernel_size=(2,2), strides=(2,2), padding='same')(a15)
    c19 = concatenate([a2,c19])
    
    c20 = Conv2D(filters=Base,
                     kernel_size=(3,3), strides=(1,1), padding='same')(c19)
    
     #Add batch Normalization
    if batchNormalization:
        c20 = BatchNormalization(axis=-1)(c20)
    
    a16 = Activation('relu')(c20)
    
    #Add spatial Dropout
    if spatial_dropout:
        a16 = SpatialDropout2D(SDRate)(a16)
        
    c21 = Conv2D(filters=Base,
                     kernel_size=(3,3), strides=(1,1), padding='same')(a16)
    
     #Add batch Normalization
    if batchNormalization:
        c21 = BatchNormalization(axis=-1)(c21)
    
    a17 = Activation('relu')(c21)
    
    #final layer
    c22 = Conv2D(final_neurons, kernel_size=(3,3), strides=(1,1), padding='same')(a17)
    a18 = Activation(final_afun)(c22)
    
    if weighted:
        model = Model([inputs,inputs2],a18)
        model.summary()
        return model, inputs2
    else:
        model = Model(inputs,a18)
        model.summary()
        return model
#Umformung der Liste mit den Testbildern in einen Tensor
Testlabels.pop(0)    
Testlabels = np.asarray(Testlabels)
Testbilder = np.asarray([Testbilder])
Testbilder = Testbilder.reshape(-1, 32, 32, 3)
#Umwandlung der Farbwerte in Gleitkommazahlen zwischen 0 und 1
Testbilder = Testbilder/255
Testbilder = np.asarray(Testbilder, dtype = "float32")
Testlabels = np.asarray(Testlabels, dtype= "float32")

#Zusammenstellen des Neuronalen Netzes
#zuerst Zusammenstellen der Filter mit Batchnormalisierung (3 Convolutional Filter, 2 Pooling Filter)

model = Sequential(name='CNN')
model.add(Conv2D(32, (3, 3), activation='selu', padding='same',input_shape=(32,32,3)))
model.add(BatchNormalization())
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(64, (2, 2)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(BatchNormalization())
#Umformung des Veränderten Tensors in einen langen Vektor
model.add(Flatten())

#Aufstellen der 3 Neuronenschichten mit 750, 256 und 43 Neuronen, Festlegen der Dropoutraten
示例#4
0
    def Build(input_shape=(66, 64, 3), classes=2):
        X_input = Input(input_shape)
        X = ZeroPadding2D((3, 3))(X_input)
        # stage1
        X = Conv2D(filters=64,
                   kernel_size=(7, 7),
                   strides=(2, 2),
                   name="conv1",
                   kernel_initializer=glorot_uniform(seed=0))(X)
        X = BatchNormalization(axis=3, name="bn_conv1")(X)
        X = Activation("relu")(X)
        X = MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(X)

        # stage2
        X = convolutional_block(X,
                                f=3,
                                filters=[64, 64, 256],
                                stage=2,
                                block="a",
                                s=1)
        X = identity_block(X, f=3, filters=[64, 64, 256], stage=2, block="b")
        X = identity_block(X, f=3, filters=[64, 64, 256], stage=2, block="c")

        # stage3
        X = convolutional_block(X,
                                f=3,
                                filters=[128, 128, 512],
                                stage=3,
                                block="a",
                                s=2)
        X = identity_block(X, f=3, filters=[128, 128, 512], stage=3, block="b")
        X = identity_block(X, f=3, filters=[128, 128, 512], stage=3, block="c")
        X = identity_block(X, f=3, filters=[128, 128, 512], stage=3, block="d")

        # stage4
        X = convolutional_block(X,
                                f=3,
                                filters=[256, 256, 1024],
                                stage=4,
                                block="a",
                                s=2)
        X = identity_block(X,
                           f=3,
                           filters=[256, 256, 1024],
                           stage=4,
                           block="b")
        X = identity_block(X,
                           f=3,
                           filters=[256, 256, 1024],
                           stage=4,
                           block="c")
        X = identity_block(X,
                           f=3,
                           filters=[256, 256, 1024],
                           stage=4,
                           block="d")
        X = identity_block(X,
                           f=3,
                           filters=[256, 256, 1024],
                           stage=4,
                           block="e")
        X = identity_block(X,
                           f=3,
                           filters=[256, 256, 1024],
                           stage=4,
                           block="f")

        # stage5
        X = convolutional_block(X,
                                f=3,
                                filters=[512, 512, 2048],
                                stage=5,
                                block="a",
                                s=2)
        X = identity_block(X,
                           f=3,
                           filters=[512, 512, 2048],
                           stage=5,
                           block="b")
        X = identity_block(X,
                           f=3,
                           filters=[512, 512, 2048],
                           stage=5,
                           block="c")

        # 均值池化层
        X = AveragePooling2D(pool_size=(2, 2), padding="same")(X)

        # 输出层
        X = Flatten()(X)
        X = Dense(classes,
                  activation="softmax",
                  name="fc" + str(classes),
                  kernel_initializer=glorot_uniform(seed=0))(X)

        # 创建模型
        model = Model(inputs=X_input, outputs=X)

        return model
示例#5
0
def convolutional_block(X, f, filters, stage, block, s=2):
    """
    实现卷积块(网络主道和带卷积的旁道)
    
    参数:
        X - 输入的tensor类型的变量,维度为( m, n_H_prev, n_W_prev, n_C_prev)
        f - 整数,指定主路径中间的CONV窗口的维度
        filters - 整数列表,定义了主路径每层的卷积层的过滤器数量
        stage - 整数,根据每层的位置来命名每一层,与block参数一起使用。
        block - 字符串,据每层的位置来命名每一层,与stage参数一起使用。
        s - 整数,指定要使用的步幅
    
    返回:
        X - 卷积块的输出,tensor类型,维度为(n_H, n_W, n_C)
    """

    #定义命名规则
    conv_name_base = "res" + str(stage) + block + "_branch"
    bn_name_base = "bn" + str(stage) + block + "_branch"

    #获取过滤器数量
    F1, F2, F3 = filters

    #保存输入数据
    X_shortcut = X

    #主路径
    ##主路径第一部分
    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(s, s),
               padding="valid",
               name=conv_name_base + "2a",
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2a")(X)
    X = Activation("relu")(X)

    ##主路径第二部分
    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding="same",
               name=conv_name_base + "2b",
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2b")(X)
    X = Activation("relu")(X)

    ##主路径第三部分
    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding="valid",
               name=conv_name_base + "2c",
               kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + "2c")(X)

    #捷径
    X_shortcut = Conv2D(filters=F3,
                        kernel_size=(1, 1),
                        strides=(s, s),
                        padding="valid",
                        name=conv_name_base + "1",
                        kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
    X_shortcut = BatchNormalization(axis=3,
                                    name=bn_name_base + "1")(X_shortcut)

    #最后一步
    X = Add()([X, X_shortcut])
    X = Activation("relu")(X)

    return X
def CNNResBlockModel(config):
    def regularization(lamda):
        if config.regularization_method == 'L2':
            return keras.regularizers.l2(lamda)
        elif config.regularization_method == 'L1':
            return keras.regularizers.l1(lamda)
        else:
            raise Exception('Use Only L2 / L1 regularization')
    def activation(activation_name, x):
        if activation_name == 'leaky_relu':
            return LeakyReLU(alpha=config.alpha)(x)
        else:
            return Activation(activation_name)(x)

    def highway_layer(value, gate_bias=-3):
        # https://towardsdatascience.com/review-highway-networks-gating-function-to-highway-image-classification-5a33833797b5
        nonlocal i_hidden  # to keep i_hidden "global" to all functions under CNNResBlockModel()
        dim = K.int_shape(value)[-1]
        # gate_bias_initializer = tensorflow.keras.initializers.Constant(gate_bias)
        # gate = Dense(units=dim, bias_initializer=gate_bias_initializer)(value)
        # gate = Activation("sigmoid")(gate)
        # TODO (just for yellow color...) NOTE: to keep dimensions matched, convolution gate instead of regular sigmoid
        # gate (T in paper)
        gate = Conv2D(size_list[i_hidden + config.CNN_ResBlock_conv_per_block - 1], kernel_size=filt_list[-1],
                      padding='same', activation='sigmoid',
                      bias_initializer=tensorflow.keras.initializers.Constant(gate_bias))(value)
        # negated (C in paper)
        negated_gate = Lambda(lambda x: 1.0 - x, output_shape=(size_list[-1],))(gate)
        # use ResBlock as the Transformation
        transformed = ResBlock(x=value)
        transformed_gated = Multiply()([gate, transformed])
        # UpSample value if needed
        if value.shape.as_list()[-1] != negated_gate.shape.as_list()[-1]:
            r = negated_gate.shape.as_list()[-1] / value.shape.as_list()[-1]
            assert not (bool(r % 1))
            value = tf.keras.layers.UpSampling3D(size=(1, 1, int(r)))(value)
        identity_gated = Multiply()([negated_gate, value])
        value = Add()([transformed_gated, identity_gated])
        return value

    def skip_connection_layer(value):
        nonlocal i_hidden
        # use ResBlock as the Transformation
        transformed = ResBlock(x=value)
        if value.shape.as_list()[-1] != transformed.shape.as_list()[-1]:
            r = transformed.shape.as_list()[-1] / value.shape.as_list()[-1]
            assert not (bool(r % 1))
            # apply convolution as transformation
            value = Conv2D(size_list[i_hidden - 1], kernel_size=filt_list[i_hidden - 1], padding='same')(value)
        value = Add()([value, transformed])
        return value

    def ResBlock(x):
        for i in range(config.CNN_ResBlock_conv_per_block):
            nonlocal i_hidden  # to keep i_hidden "global" to all functions under CNNResBlockModel()
            lamda_cnn = 0.0 if config.use_l2_in_cnn is False else lamda
            x = Conv2D(size_list[i_hidden], kernel_size=filt_list[i_hidden], padding='same',
                       bias_regularizer=regularization(lamda_cnn),
                       kernel_regularizer=regularization(lamda_cnn),
                       kernel_initializer=kernel_initalizer)(x)
            x = activation(activation_name, x)
            if config.use_batch_norm is True:
                x = BatchNormalization()(x)
            i_hidden = i_hidden + 1
        return x

    def ResBlockLane(x):
        nonlocal i_hidden
        # ResBlocks
        for i in range(len(config.CNN_ResBlock_highway)):
            if config.CNN_ResBlock_highway[i] == "Highway":
                x = highway_layer(value=x)
            elif config.CNN_ResBlock_highway[i] == "Skip":
                x = skip_connection_layer(value=x)
            elif config.CNN_ResBlock_highway[i] == "None":
                x = ResBlock(x=x)
            else:
                raise Exception('only Highway/Skip/None is allowed !')
            # MaxPool and Dropout
            if config.CNN_ResBlock_dropout[i] != 0:
                x = Dropout(rate=config.CNN_ResBlock_dropout[i])(x)
            x = MaxPooling2D(pool_size=pool_list[i])(x)
        return x

    global background_implicit_inference
    # parameters
    kernel_initalizer = config.kernel_initalizer  # default is 'glorot_uniform'
    lamda = config.Regularization_term
    p_dropout = config.dropout
    activation_name = config.activation
    filt_dim2_list = config.Filter_shape_dim1 if config.Filter_shape_symmetric else config.Filter_shape_dim2
    filt_list = [(x, y) for x, y in zip(config.Filter_shape_dim1, filt_dim2_list)]
    pool_list = [(x, y) for x, y in zip(config.Pool_shape_dim1, config.Pool_shape_dim2)]
    size_list = config.hidden_size
    dense_list = config.Dense_size
    input_shape = config.model_input_dim
    p_dropout_conv1d = config.CNN_ResBlock_dropout_conv1d
    p_dropout_after_all_conv2d = config.dropout_after_all_conv2d
    p_dropout_dense = config.Dense_dropout
    i_hidden = 0

    # Input Layer
    input_layer = Input(shape=input_shape)
    assert len(size_list) == len(filt_list)
    assert len(pool_list) == len(config.CNN_ResBlock_highway) == len(config.CNN_ResBlock_dropout)
    assert config.CNN_ResBlock_conv_per_block * len(config.CNN_ResBlock_highway) == len(size_list)
    assert len(config.Conv1D_size) == len(config.Conv1D_kernel)

    if config.ResBlockDouble:
        x1 = input_layer
        x1 = ResBlockLane(x1)
        i_hidden = 0 # zero the hidden sizes counter
        x2 = input_layer
        x2 = ResBlockLane(x2)
        x = Add()([x1, x2])
    else:
        x = input_layer
        # ResBlocks
        x = ResBlockLane(x)
    # Flatten
    x = Flatten()(x)
    if p_dropout_after_all_conv2d != 0:
        x = Dropout(rate=p_dropout_after_all_conv2d)(x)
    # Conv1D
    if len(config.Conv1D_size) != 0:
        x = tf.expand_dims(x, axis=-1)
    for i in range(len(config.Conv1D_size)):
        x = Conv1D(filters=config.Conv1D_size[i], kernel_size=config.Conv1D_kernel[i],
                   kernel_initializer=kernel_initalizer)(x)
        x = activation(activation_name, x)
        if config.use_batch_norm is True:
            x = BatchNormalization()(x)
        if p_dropout_conv1d[i] != 0.0:
            x = Dropout(rate=p_dropout_conv1d[1])(x)
    # post-Conv1D
    if len(config.Conv1D_size) != 0:
        x = MaxPooling1D(pool_size=config.Conv1D_pool)(x)
        # x = BatchNormalization()(x)
        x = Flatten()(x)

    # Dense
    for i in range(len(dense_list)):
        x = Dense(dense_list[i], kernel_regularizer=regularization(lamda), kernel_initializer=kernel_initalizer)(x)
        x = activation(activation_name, x)
        if config.use_batch_norm is True:
            x = BatchNormalization()(x)
        if p_dropout_dense[i] != 0.0:
            x = Dropout(rate=p_dropout_dense[i])(x)
    # x = Dropout(rate=p_dropout)(x)
    # x = BatchNormalization()(x)
    if config.learn_background:
        x = Dense(3, activation='softmax')(x)
    else:
        x = Dense(1, activation='sigmoid')(x)
    output_layer = x
    model = Model(input_layer, output_layer)
    if config.learn_background:
        if config.background_implicit_inference:
            background_implicit_inference = True
        model = BlockBackgroundModel(input_layer, output_layer)
    # else:
    #     model = Model(input_layer, output_layer)
    # model.summary()
    return model
示例#7
0
def u_net(img):
    inputs = Input(shape=img.shape[-3:])
    conv1_1 = Conv2D(16, (3, 3), padding='same')(inputs)
    bn1_1 = BatchNormalization(axis=3)(conv1_1)
    relu1_1 = Activation('relu')(bn1_1)
    conv1_2 = Conv2D(16, (3, 3), padding='same')(relu1_1)
    bn1_2 = BatchNormalization(axis=3)(conv1_2)
    relu1_2 = Activation('relu')(bn1_2)
    pool1 = MaxPooling2D(pool_size=(2, 2))(relu1_2)

    conv2_1 = Conv2D(32, (3, 3), padding='same')(pool1)
    bn2_1 = BatchNormalization(axis=3)(conv2_1)
    relu2_1 = Activation('relu')(bn2_1)
    conv2_2 = Conv2D(32, (3, 3), padding='same')(relu2_1)
    bn2_2 = BatchNormalization(axis=3)(conv2_2)
    relu2_2 = Activation('relu')(bn2_2)
    pool2 = MaxPooling2D(pool_size=(2, 2))(relu2_2)

    conv3_1 = Conv2D(64, (3, 3), padding='same')(pool2)
    bn3_1 = BatchNormalization(axis=3)(conv3_1)
    relu3_1 = Activation('relu')(bn3_1)
    conv3_2 = Conv2D(64, (3, 3), padding='same')(relu3_1)
    bn3_2 = BatchNormalization(axis=3)(conv3_2)
    relu3_2 = Activation('relu')(bn3_2)
    pool3 = MaxPooling2D(pool_size=(2, 2))(relu3_2)

    conv4_1 = Conv2D(128, (3, 3), padding='same')(pool3)
    bn4_1 = BatchNormalization(axis=3)(conv4_1)
    relu4_1 = Activation('relu')(bn4_1)
    conv4_2 = Conv2D(128, (3, 3), padding='same')(relu4_1)
    bn4_2 = BatchNormalization(axis=3)(conv4_2)
    relu4_2 = Activation('relu')(bn4_2)
    pool4 = MaxPooling2D(pool_size=(2, 2))(relu4_2)

    conv5_1 = Conv2D(256, (3, 3), padding='same')(pool4)
    bn5_1 = BatchNormalization(axis=3)(conv5_1)
    relu5_1 = Activation('relu')(bn5_1)
    conv5_2 = Conv2D(256, (3, 3), padding='same')(relu5_1)
    bn5_2 = BatchNormalization(axis=3)(conv5_2)
    relu5_2 = Activation('relu')(bn5_2)

    up6 = Concatenate()([
        Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same')(relu5_2),
        relu4_2
    ])
    conv6_1 = Conv2D(128, (3, 3), padding='same')(up6)
    bn6_1 = BatchNormalization(axis=3)(conv6_1)
    relu6_1 = Activation('relu')(bn6_1)
    conv6_2 = Conv2D(128, (3, 3), padding='same')(relu6_1)
    bn6_2 = BatchNormalization(axis=3)(conv6_2)
    relu6_2 = Activation('relu')(bn6_2)

    up7 = Concatenate()([
        Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same')(relu6_2),
        relu3_2
    ])
    conv7_1 = Conv2D(64, (3, 3), padding='same')(up7)
    bn7_1 = BatchNormalization(axis=3)(conv7_1)
    relu7_1 = Activation('relu')(bn7_1)
    conv7_2 = Conv2D(64, (3, 3), padding='same')(relu7_1)
    bn7_2 = BatchNormalization(axis=3)(conv7_2)
    relu7_2 = Activation('relu')(bn7_2)

    up8 = Concatenate()([
        Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same')(relu7_2),
        relu2_2
    ])
    conv8_1 = Conv2D(32, (3, 3), padding='same')(up8)
    bn8_1 = BatchNormalization(axis=3)(conv8_1)
    relu8_1 = Activation('relu')(bn8_1)
    conv8_2 = Conv2D(32, (3, 3), padding='same')(relu8_1)
    bn8_2 = BatchNormalization(axis=3)(conv8_2)
    relu8_2 = Activation('relu')(bn8_2)

    up9 = Concatenate()([
        Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same')(relu8_2),
        relu1_2
    ])
    conv9_1 = Conv2D(16, (3, 3), padding='same')(up9)
    bn9_1 = BatchNormalization(axis=3)(conv9_1)
    relu9_1 = Activation('relu')(bn9_1)
    conv9_2 = Conv2D(16, (3, 3), padding='same')(relu9_1)
    bn9_2 = BatchNormalization(axis=3)(conv9_2)
    relu9_2 = Activation('relu')(bn9_2)

    conv10 = Conv2D(1, (1, 1), activation='sigmoid')(relu9_2)

    model = Model(inputs=[inputs], outputs=[conv10])
    print(model.summary())

    return model
示例#8
0
def unet():
    inputs = Input(shape=(128, 128, 1))
    conv0 = Conv2D(32,
                   1,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(inputs)
    conv0 = Conv2D(32,
                   1,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv0 = BatchNormalization()(conv0)

    conv0 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv0 = Conv2D(64,
                   3,
                   activation='relu',
                   strides=2,
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv0 = BatchNormalization()(conv0)

    conv0 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv0 = Conv2D(64,
                   3,
                   activation='relu',
                   strides=2,
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv0 = BatchNormalization()(conv0)

    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv1)
    conv1 = BatchNormalization()(conv1)

    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   strides=2,
                   padding='same',
                   kernel_initializer='lecun_normal')(conv2)
    conv2 = BatchNormalization()(conv2)

    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   strides=2,
                   padding='same',
                   kernel_initializer='lecun_normal')(conv3)
    conv3 = BatchNormalization()(conv3)

    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   strides=2,
                   padding='same',
                   kernel_initializer='lecun_normal')(conv4)
    conv4 = BatchNormalization()(conv4)

    up5 = Conv2D(256,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='lecun_normal')(
                     UpSampling2D(size=(2, 2))(conv4))
    merge5 = Concatenate(axis=-1)([conv3, up5])
    conv5 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(merge5)
    conv5 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv5)
    conv5 = BatchNormalization()(conv5)

    up6 = Conv2D(128,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='lecun_normal')(
                     UpSampling2D(size=(2, 2))(conv5))
    merge6 = Concatenate(axis=-1)([conv2, up6])
    conv6 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(merge6)
    conv6 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv6)
    conv6 = BatchNormalization()(conv6)

    up7 = Conv2D(64,
                 2,
                 activation='relu',
                 padding='same',
                 kernel_initializer='lecun_normal')(
                     UpSampling2D(size=(2, 2))(conv6))
    merge7 = Concatenate(axis=-1)([conv1, up7])
    conv7 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(merge7)
    conv7 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv7)
    conv7 = BatchNormalization()(conv7)

    conv8 = Conv2D(1,
                   1,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv7)
    outputs = Conv2D(1,
                     1,
                     activation='tanh',
                     padding='same',
                     kernel_initializer='lecun_normal')(conv8)

    model = Model(inputs=inputs, outputs=outputs)
    return model
示例#9
0
def encoder(code_length=128):
    inputs = Input(shape=(128, 128, 1))
    conv0 = Conv2D(32,
                   1,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(inputs)
    conv0 = Conv2D(32,
                   1,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv0 = BatchNormalization()(conv0)

    conv0 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv0 = Conv2D(64,
                   3,
                   activation='relu',
                   strides=2,
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv0 = BatchNormalization()(conv0)

    conv0 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv0 = Conv2D(64,
                   3,
                   activation='relu',
                   strides=2,
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv0 = BatchNormalization()(conv0)

    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv0)
    conv1 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv1)
    conv1 = BatchNormalization()(conv1)

    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv1)
    conv2 = Conv2D(128,
                   3,
                   activation='relu',
                   strides=2,
                   padding='same',
                   kernel_initializer='lecun_normal')(conv2)
    conv2 = BatchNormalization()(conv2)

    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv2)
    conv3 = Conv2D(256,
                   3,
                   activation='relu',
                   strides=2,
                   padding='same',
                   kernel_initializer='lecun_normal')(conv3)
    conv3 = BatchNormalization()(conv3)

    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='lecun_normal')(conv3)
    conv4 = Conv2D(512,
                   3,
                   activation='relu',
                   strides=2,
                   padding='same',
                   kernel_initializer='lecun_normal')(conv4)
    conv4 = BatchNormalization()(conv4)

    dense5 = Flatten()(conv4)
    dense5 = Dense(code_length)(dense5)
    outputs = BatchNormalization()(dense5)

    model = Model(inputs=inputs, outputs=outputs, name='encoder')
    return model
示例#10
0
 def conv2d(x, filters, shape=(3, 3), **kwargs) :
     x = Conv2D(filters, shape, strides=(2, 2),padding='same')(x)
     x = BatchNormalization()(x)
     x = LeakyReLU()(x)
     x = Dropout(0.3)(x)
     return x
示例#11
0
 def deconv2d(x, filters, shape=(3, 3)):
     x = Conv2DTranspose(filters, shape, padding='same',strides=(2, 2))(x)
     x = BatchNormalization()(x)
     x = LeakyReLU()(x)
     x = Dropout(0.3)(x)
     return x
    def __init__(self,
                 weights='pascal_voc',
                 input_tensor=None,
                 input_shape=(512, 512, 3),
                 classes=21,
                 OS=8):
        super(Deeplabv3_plus, self).__init__()

        if input_tensor is None:
            self.img_input = Input(shape=input_shape)
        else:
            if not K.is_keras_tensor(input_tensor):
                self.img_input = Input(tensor=input_tensor, shape=input_shape)
            else:
                self.img_input = input_tensor

        self.input_tensor = input_tensor
        # OS == 8 params
        self.entry_block3_stride = 1
        self.middle_block_rate = 2  # ! Not mentioned in paper, but required
        self.exit_block_rates = (2, 4)
        self.atrous_rates = (12, 24, 36)

        # Layers
        self.entry_flow_conv1_1 = Conv2D(32, (3, 3),
                                         strides=(2, 2),
                                         use_bias=False,
                                         padding='same')
        self.entry_flow_conv1_1_BN = BatchNormalization()
        self.relu1 = Activation('relu')

        self.entry_flow_conv1_2 = conv2d_same(64,
                                              'entry_flow_conv1_2',
                                              kernel_size=3,
                                              stride=1)
        self.entry_flow_conv1_2_BN = BatchNormalization()
        self.relu2 = Activation('relu')

        self.entry_flow_block1 = xception_block([128, 128, 128],
                                                'entry_flow_block1',
                                                skip_connection_type='conv',
                                                stride=2,
                                                depth_activation=False)

        self.entry_flow_block2 = xception_block([256, 256, 256],
                                                'entry_flow_block2',
                                                skip_connection_type='conv',
                                                stride=2,
                                                depth_activation=False,
                                                return_skip=True)

        self.entry_flow_block3 = xception_block(
            [728, 728, 728],
            'entry_flow_block3',
            skip_connection_type='conv',
            stride=self.entry_block3_stride,
            depth_activation=False)

        self.middle_flow_unit = []
        for i in range(16):
            self.middle_flow_unit.append(
                xception_block([728, 728, 728],
                               'middle_flow_unit' + str(i),
                               skip_connection_type='sum',
                               stride=1,
                               rate=self.middle_block_rate,
                               depth_activation=False))

        self.exit_flow_block1 = xception_block([728, 1024, 1024],
                                               'middle_flow_unit' + str(i),
                                               skip_connection_type='conv',
                                               stride=1,
                                               rate=self.exit_block_rates[0],
                                               depth_activation=False)
        self.exit_flow_block2 = xception_block([1536, 1536, 2048],
                                               'middle_flow_unit' + str(i),
                                               skip_connection_type='none',
                                               stride=1,
                                               rate=self.exit_block_rates[1],
                                               depth_activation=True)
        # end of feature extractor

        # branching for Atrous Spatial Pyramid Pooling
        # simple 1x1
        self.aspp0 = Conv2D(256, (1, 1), padding='same', use_bias=False)
        self.aspp0_BN = BatchNormalization(epsilon=1e-5)
        self.aspp0_activation = Activation('relu')

        # rate = 6 (12)
        self.aspp1 = SepConv_BN(256,
                                'aspp1',
                                rate=self.atrous_rates[0],
                                depth_activation=True,
                                epsilon=1e-5)
        # rate = 12 (24)
        self.aspp2 = SepConv_BN(256,
                                'aspp2',
                                rate=self.atrous_rates[1],
                                depth_activation=True,
                                epsilon=1e-5)
        # rate = 18 (36)
        self.aspp3 = SepConv_BN(256,
                                'aspp3',
                                rate=self.atrous_rates[2],
                                depth_activation=True,
                                epsilon=1e-5)

        # Image Feature branch
        self.out_shape = int(np.ceil(input_shape[0] / OS))
        self.b4_ap = AveragePooling2D(pool_size=(self.out_shape,
                                                 self.out_shape))
        self.image_pooling = Conv2D(256, (1, 1),
                                    padding='same',
                                    use_bias=False)
        self.image_pooling_BN = BatchNormalization(epsilon=1e-5)
        self.relu3 = Activation('relu')
        self.b4_bu = BilinearUpsampling((self.out_shape, self.out_shape))

        # concatenate ASPP branches & project
        self.concat1 = Concatenate()
        self.concat_projection = Conv2D(256, (1, 1),
                                        padding='same',
                                        use_bias=False)
        self.concat_projection_BN = BatchNormalization(epsilon=1e-5)
        self.relu4 = Activation('relu')
        self.droput = Dropout(0.1)
        self.activation = Activation("softmax")

        # DeepLab v.3+ decoder

        # Feature projection
        # x4 (x2) block
        self.bu = BilinearUpsampling(
            output_size=(int(np.ceil(input_shape[0] / 4)),
                         int(np.ceil(input_shape[1] / 4))))
        self.feature_projection0 = Conv2D(48, (1, 1),
                                          padding='same',
                                          use_bias=False)
        self.feature_projection0_BN = BatchNormalization(epsilon=1e-5)
        self.relu5 = Activation('relu')
        self.concat2 = Concatenate()  # move to call
        self.decoder_conv0 = SepConv_BN(256,
                                        'decoder_conv0',
                                        depth_activation=True,
                                        epsilon=1e-5)
        self.decoder_conv1 = SepConv_BN(256,
                                        'decoder_conv1',
                                        depth_activation=True,
                                        epsilon=1e-5)

        self.logits1 = Conv2D(classes, (1, 1), padding='same')
        self.logits2 = BilinearUpsampling(output_size=(input_shape[0],
                                                       input_shape[1]))
示例#13
0
def stem_split_3k(x,
                  filters,
                  mode="conc",
                  kernel_size_1=(3, 3),
                  kernel_size_2=(5, 5),
                  kernel_size_3=(7, 7),
                  dilation_1=(1, 1),
                  dilation_2=(1, 1),
                  dilation_3=(1, 1),
                  padding="same",
                  strides=1):

    if mode == "conc":
        res_filters = filters
        skip_filters = np.uint64(filters * 3)
    elif mode == "add":
        res_filters = filters
        skip_filters = filters

    x1 = SeparableConv2D(res_filters,
                         kernel_size=kernel_size_1,
                         dilation_rate=dilation_1,
                         padding=padding,
                         strides=1,
                         depthwise_initializer=he_normal(seed=5),
                         pointwise_initializer=he_normal(seed=5),
                         bias_initializer='zeros')(x)
    x1 = BatchNormalization()(x1)
    # x1 = Activation("relu")(x1)
    x1 = LeakyReLU(alpha=0.1)(x1)

    res1 = SeparableConv2D(res_filters,
                           kernel_size=kernel_size_1,
                           dilation_rate=dilation_1,
                           padding=padding,
                           strides=1,
                           depthwise_initializer=he_normal(seed=5),
                           pointwise_initializer=he_normal(seed=5),
                           bias_initializer='zeros')(x1)

    x2 = SeparableConv2D(res_filters,
                         kernel_size=kernel_size_2,
                         dilation_rate=dilation_2,
                         padding=padding,
                         strides=1,
                         depthwise_initializer=he_normal(seed=5),
                         pointwise_initializer=he_normal(seed=5),
                         bias_initializer='zeros')(x)
    x2 = BatchNormalization()(x2)
    # x2 = Activation("relu")(x2)
    x2 = LeakyReLU(alpha=0.1)(x2)

    res2 = SeparableConv2D(res_filters,
                           kernel_size=kernel_size_2,
                           dilation_rate=dilation_2,
                           padding=padding,
                           strides=1,
                           depthwise_initializer=he_normal(seed=5),
                           pointwise_initializer=he_normal(seed=5),
                           bias_initializer='zeros')(x2)

    x3 = SeparableConv2D(res_filters,
                         kernel_size=kernel_size_3,
                         dilation_rate=dilation_3,
                         padding=padding,
                         strides=1,
                         depthwise_initializer=he_normal(seed=5),
                         pointwise_initializer=he_normal(seed=5),
                         bias_initializer='zeros')(x)
    x3 = BatchNormalization()(x3)
    # x3 = Activation("relu")(x3)
    x3 = LeakyReLU(alpha=0.1)(x3)

    res3 = SeparableConv2D(res_filters,
                           kernel_size=kernel_size_3,
                           dilation_rate=dilation_3,
                           padding=padding,
                           strides=1,
                           depthwise_initializer=he_normal(seed=5),
                           pointwise_initializer=he_normal(seed=5),
                           bias_initializer='zeros')(x3)

    shortcut = Conv2D(skip_filters,
                      kernel_size=(1, 1),
                      padding=padding,
                      strides=1,
                      kernel_initializer=he_normal(seed=5),
                      bias_initializer='zeros')(x)

    shortcut = BatchNormalization()(shortcut)

    if mode == "conc":
        res = Concatenate()([res1, res2, res3])
        output = Add()([shortcut, res])
    elif mode == "add":
        output = Add()([shortcut, res1, res2, res3])

    return output
示例#14
0
def smallpureCNNModelV2(num_classes=1, drop=0.25, isBN=True, ad_batch_size=1):
    model = Sequential()
    model.add(
        Conv2D(filters=64,
               kernel_size=(1, 7),
               padding='same',
               strides=(1, 1),
               input_shape=(ad_batch_size, 7, 1)))

    if isBN:
        model.add(BatchNormalization())
    model.add(ReLU(max_value=8))

    model.add(
        SeparableConv2D(filters=64,
                        kernel_size=(1, 7),
                        padding='same',
                        strides=(1, 1),
                        input_shape=(ad_batch_size, 7, 1)))

    if isBN:
        model.add(BatchNormalization())
    model.add(ReLU(max_value=8))
    # model.add(Dropout(drop))

    model.add(
        SeparableConv2D(filters=64,
                        kernel_size=(1, 7),
                        padding='same',
                        strides=(1, 1),
                        input_shape=(ad_batch_size, 7, 1)))

    if isBN:
        model.add(BatchNormalization())
    model.add(ReLU(max_value=8))

    model.add(
        SeparableConv2D(filters=512,
                        kernel_size=(1, 7),
                        padding='same',
                        strides=(1, 1),
                        input_shape=(ad_batch_size, 7, 1)))

    if isBN:
        model.add(BatchNormalization())
    model.add(ReLU(max_value=8))
    # model.add(Dropout(drop))

    model.add(GlobalAveragePooling2D())

    model.add(Flatten())

    model.add(Dense(num_classes))
    if isBN:
        model.add(BatchNormalization())

    model.summary()
    sModelName = 'smartcar_ad_pureCNN_drop_0%d_adSize_%d' % (int(
        drop * 100), ad_batch_size)
    if not isBN:
        sModelName += '_nobn'
    return sModelName, model
示例#15
0
def modular2d3dims(filters,
                   latentDim,
                   path,
                   batch=False,
                   dropout=False,
                   filter_size=3):

    checkpoint_dir = os.path.dirname(path)
    model = Sequential()
    #encoder
    #input = 28 x 28 x 1 (wide and thin)
    model.add(
        InputLayer(input_shape=(config.NUM_CHANNELS, config.IMG_HEIGHT,
                                config.IMG_WIDTH)))
    for f in filters:
        # print('assssdad')
        model.add(
            Conv2D(f,
                   filter_size,
                   strides=2,
                   activation='relu',
                   padding="same",
                   data_format="channels_first"))
        if (dropout): model.add(Dropout(0.2))
        if (batch): model.add(BatchNormalization())
        # model.add(MaxPooling2D(pool_size=(2, 2)))
    if (latentDim is not None):
        # model.add(Flatten())
        model.add(
            Conv2D(latentDim, (1, 1),
                   strides=1,
                   activation='relu',
                   padding="same",
                   data_format="channels_first"))
        if (batch): model.add(BatchNormalization())
    for f in reversed(filters):
        # apply a CONV_TRANSPOSE => RELU => BN operation
        model.add(
            Conv2DTranspose(f,
                            filter_size,
                            activation='relu',
                            strides=2,
                            padding="same",
                            data_format="channels_first"))
        if (dropout): model.add(Dropout(0.2))
        if (batch): model.add(BatchNormalization())

    # model.add(Reshape((20, config.IMG_HEIGHT, config.IMG_WIDTH)))
    model.add(
        Conv2D(config.NUM_CHANNELS,
               filter_size,
               activation='sigmoid',
               padding='same',
               data_format="channels_first"))
    # model.add(Cropping2D((1)))

    model.summary()

    model.compile(loss='mean_squared_error', optimizer=Adam())
    cp_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=path,
        # monitor='val_loss',
        save_weights_only=True,
        # save_best_only=True,
        verbose=1,
        save_freq='epoch')

    latest = tf.train.latest_checkpoint(checkpoint_dir)
    # print('latestdasdasdas')
    print(latest)
    if latest is not None:
        model.load_weights(latest)
        print('weights loaded')

    return model, cp_callback
示例#16
0
def __create_semantic_head(pyramid_dict,
                           input_target=None,
                           n_classes=3,
                           n_filters=128,
                           n_dense=128,
                           semantic_id=0,
                           ndim=2,
                           include_top=True,
                           target_level=2,
                           upsample_type='upsamplelike',
                           interpolation='bilinear',
                           **kwargs):
    """Creates a semantic head from a feature pyramid network.

    Args:
        pyramid_dict (dict): Dictionary of pyramid names and features.
        input_target (tensor): Optional tensor with the input image.
        n_classes (int): The number of classes to be predicted.
        n_filters (int): The number of convolutional filters.
        n_dense (int): Number of dense filters.
        semantic_id (int): ID of the semantic head.
        ndim (int): The spatial dimensions of the input data.
            Must be either 2 or 3.
        include_top (bool): Whether to include the final layer of the model
        target_level (int): The level we need to reach. Performs
            2x upsampling until we're at the target level.
        upsample_type (str): Choice of upsampling layer to use from
            ``['upsamplelike', 'upsampling2d', 'upsampling3d']``.
        interpolation (str): Choice of interpolation mode for upsampling
            layers from ``['bilinear', 'nearest']``.

    Raises:
        ValueError: ``ndim`` must be 2 or 3
        ValueError: ``interpolation`` not in ``['bilinear', 'nearest']``
        ValueError: ``upsample_type`` not in
            ``['upsamplelike','upsampling2d', 'upsampling3d']``

    Returns:
        tensorflow.keras.Layer: The semantic segmentation head
    """
    # Check input to ndims
    if ndim not in {2, 3}:
        raise ValueError('ndim must be either 2 or 3. '
                         'Received ndim = {}'.format(ndim))

    # Check input to interpolation
    acceptable_interpolation = {'bilinear', 'nearest'}
    if interpolation not in acceptable_interpolation:
        raise ValueError('Interpolation mode "{}" not supported. '
                         'Choose from {}.'.format(
                             interpolation, list(acceptable_interpolation)))

    # Check input to upsample_type
    acceptable_upsample = {'upsamplelike', 'upsampling2d', 'upsampling3d'}
    if upsample_type not in acceptable_upsample:
        raise ValueError('Upsample method "{}" not supported. '
                         'Choose from {}.'.format(upsample_type,
                                                  list(acceptable_upsample)))

    # Check that there is an input_target if upsamplelike is used
    if upsample_type == 'upsamplelike' and input_target is None:
        raise ValueError('upsamplelike requires an input_target.')

    conv = Conv2D if ndim == 2 else Conv3D
    conv_kernel = (1, ) * ndim

    if K.image_data_format() == 'channels_first':
        channel_axis = 1
    else:
        channel_axis = -1

    if n_classes == 1:
        include_top = False

    # Get pyramid names and features into list form
    pyramid_names = get_sorted_keys(pyramid_dict)
    pyramid_features = [pyramid_dict[name] for name in pyramid_names]

    # Reverse pyramid names and features
    pyramid_names.reverse()
    pyramid_features.reverse()

    # Previous method of building feature pyramids
    # semantic_features, semantic_names = [], []
    # for N, P in zip(pyramid_names, pyramid_features):
    #     # Get level and determine how much to upsample
    #     level = int(re.findall(r'\d+', N)[0])
    #
    #     n_upsample = level - target_level
    #     target = semantic_features[-1] if len(semantic_features) > 0 else None
    #
    #     # Use semantic upsample to get semantic map
    #     semantic_features.append(semantic_upsample(
    #         P, n_upsample, n_filters=n_filters, target=target, ndim=ndim,
    #         upsample_type=upsample_type, interpolation=interpolation,
    #         semantic_id=semantic_id))
    #     semantic_names.append('Q{}'.format(level))

    # Add all the semantic features
    # semantic_sum = semantic_features[0]
    # for semantic_feature in semantic_features[1:]:
    #     semantic_sum = Add()([semantic_sum, semantic_feature])

    # TODO: bad name but using the same name more clearly indicates
    # how to integrate the previous version
    semantic_sum = pyramid_features[-1]

    # Final upsampling
    # min_level = int(re.findall(r'\d+', pyramid_names[-1])[0])
    # n_upsample = min_level - target_level
    n_upsample = target_level
    x = semantic_upsample(
        semantic_sum,
        n_upsample,
        # n_filters=n_filters,  # TODO: uncomment and retrain
        target=input_target,
        ndim=ndim,
        upsample_type=upsample_type,
        semantic_id=semantic_id,
        interpolation=interpolation)

    # Apply conv in place of previous tensor product
    x = conv(n_dense,
             conv_kernel,
             strides=1,
             padding='same',
             name='conv_0_semantic_{}'.format(semantic_id))(x)
    x = BatchNormalization(
        axis=channel_axis,
        name='batch_normalization_0_semantic_{}'.format(semantic_id))(x)
    x = Activation('relu', name='relu_0_semantic_{}'.format(semantic_id))(x)

    # Apply conv and softmax layer
    x = conv(n_classes,
             conv_kernel,
             strides=1,
             padding='same',
             name='conv_1_semantic_{}'.format(semantic_id))(x)

    if include_top:
        x = Softmax(axis=channel_axis,
                    dtype=K.floatx(),
                    name='semantic_{}'.format(semantic_id))(x)
    else:
        x = Activation('relu',
                       dtype=K.floatx(),
                       name='semantic_{}'.format(semantic_id))(x)

    return x
示例#17
0
def conv3d(filters,
           latentDim,
           path,
           batch=False,
           dropout=False,
           filter_size=3):

    checkpoint_dir = os.path.dirname(path)
    model = Sequential()
    #encoder
    #input = 28 x 28 x 1 (wide and thin)
    model.add(
        InputLayer(input_shape=(config.NUM_CHANNELS, config.IMG_HEIGHT,
                                config.IMG_WIDTH, 1)))

    for f in filters:
        model.add(
            Conv3D(f,
                   filter_size,
                   strides=2,
                   activation='relu',
                   padding="same"))
        if (dropout): model.add(Dropout(0.2))
        if (batch): model.add(BatchNormalization())
        # model.add(MaxPool3D(pool_size=(1,2,2)))
    if (latentDim is not None):
        # model.add(Flatten())
        model.add(
            Conv3D(latentDim, 1, strides=1, activation='relu', padding="same"))
        if (batch): model.add(BatchNormalization())
    # model.add(Flatten())
    # model.add(Dense(latentDim))
    for f in reversed(filters):
        # apply a CONV_TRANSPOSE => RELU => BN operation
        model.add(
            Conv3DTranspose(f,
                            filter_size,
                            activation='relu',
                            strides=2,
                            padding="same"))
        if (dropout): model.add(Dropout(0.2))
        if (batch): model.add(BatchNormalization())

    model.add(Conv3D(1, filter_size, activation='sigmoid', padding='same'))
    if (config.NUM_CHANNELS % (2**len(filters)) != 0):
        dim = config.NUM_CHANNELS
        for i in range(len(filters)):
            if (dim % 2 != 0):
                dim = int(dim / 2)
                dim += 1
            else:
                dim = int(dim / 2)
        print(dim)
        croppingFactor = int(
            (dim * (2**len(filters)) - config.NUM_CHANNELS) / 2)
        model.add(
            Cropping3D(cropping=((croppingFactor, croppingFactor), (0, 0),
                                 (0, 0))))

    model.summary()

    model.compile(loss='mean_squared_error', optimizer=Adam())
    cp_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=path,
        # monitor='val_loss',
        save_weights_only=True,
        # save_best_only=True,
        verbose=1,
        save_freq='epoch')

    latest = tf.train.latest_checkpoint(checkpoint_dir)
    # print('latestdasdasdas')
    print(latest)
    if latest is not None:
        model.load_weights(latest)
        print('weights loaded')

    return model, cp_callback
def DR_Unet104(image_width, image_height, num_classes):
    f = [16, 32, 64, 128, 256, 512]
    inputs = tf.keras.layers.Input((image_width, image_height, 4))

    dropout = 0.5
    ###Encoder

    ## Input and Level 1
    e0 = inputs
    e0 = input_bottleneck_block(e0, filters=[16, 16, 64])
    e0 = bottleneck_block(e0, filters=[16, 16, 64])
    ## level 2
    e1 = bottleneck_downsample_block(e0, filters=[32, 32, 128])
    e1 = bottleneck_block(e1, filters=[32, 32, 128])
    e1 = bottleneck_block(e1, filters=[32, 32, 128])
    e1 = Dropout(dropout)(e1)

    ## level 3
    e2 = bottleneck_downsample_block(e1, filters=[64, 64, 256])
    e2 = bottleneck_block(e2, filters=[64, 64, 256])
    e2 = bottleneck_block(e2, filters=[64, 64, 256])
    e2 = Dropout(dropout)(e2)

    ## level 4
    e3 = bottleneck_downsample_block(e2, filters=[128, 128, 512])
    e3 = bottleneck_block(e3, filters=[128, 128, 512])
    e3 = bottleneck_block(e3, filters=[128, 128, 512])
    e3 = bottleneck_block(e3, filters=[128, 128, 512])
    e3 = bottleneck_block(e3, filters=[128, 128, 512])
    e3 = Dropout(dropout)(e3)
    e3 = ZeroPadding2D((1, 1))(e3)

    ## level 5
    e4 = bottleneck_downsample_block(e3, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = bottleneck_block(e4, filters=[256, 256, 1024])
    e4 = Dropout(dropout)(e4)

    ### Bridge
    e5 = bottleneck_downsample_block(e4, filters=[512, 512, 2048])
    e5 = bottleneck_block(e5, filters=[512, 512, 2048])
    e5 = bottleneck_block(e5, filters=[512, 512, 2048])
    e5 = bottleneck_block(e5, filters=[512, 512, 2048])
    e5 = Dropout(dropout)(e5)

    ### Decoder

    #Level 5
    u0 = upsample_and_concatenation(e5, e4, 1024)
    d0 = residual_block(u0, f[5])
    d0 = Dropout(dropout)(d0)

    #Level 4
    u1 = upsample_and_concatenation(d0, e3, 512)
    u1 = Cropping2D((1, 1))(u1)
    d1 = residual_block(u1, f[4])
    d1 = Dropout(dropout)(d1)

    #Level 3
    u2 = upsample_and_concatenation(d1, e2, 256)
    d2 = residual_block(u2, f[3])
    d2 = Dropout(dropout)(d2)

    #Level 2
    u3 = upsample_and_concatenation(d2, e1, 128)
    d3 = residual_block(u3, f[2])
    d3 = Dropout(dropout)(d3)

    #Level 1
    u4 = upsample_and_concatenation(d3, e0, 64)
    d4 = residual_block(u4, f[1])
    X = BatchNormalization(axis=3)(d4)
    X = Activation('relu')(X)

    #Output layer
    outputs = Conv2D(num_classes, (1, 1), name='output_layer')(X)
    model = Model(inputs=[inputs], outputs=[outputs], name='DR_Unet104')

    return model
def CNNResBlockModelComplex(config):
    def activation(activation_name, x):
        if activation_name == 'leaky_relu':
            return LeakyReLU(alpha=config.alpha)(x)
        else:
            return Activation(activation_name)(x)

    def highway_layer(value, gate_bias=-3):
        # https://towardsdatascience.com/review-highway-networks-gating-function-to-highway-image-classification-5a33833797b5
        nonlocal i_hidden  # to keep i_hidden "global" to all functions under CNNResBlockModel()
        dim = K.int_shape(value)[-1]
        # gate_bias_initializer = tensorflow.keras.initializers.Constant(gate_bias)
        # gate = Dense(units=dim, bias_initializer=gate_bias_initializer)(value)
        # gate = Activation("sigmoid")(gate)
        # TODO (just for yellow color...) NOTE: to keep dimensions matched, convolution gate instead of regular sigmoid
        # gate (T in paper)
        gate = Conv2D(size_list[i_hidden + config.CNN_ResBlock_conv_per_block - 1], kernel_size=filt_list[-1],
                      padding='same', activation='sigmoid',
                      bias_initializer=tensorflow.keras.initializers.Constant(gate_bias))(value)
        # negated (C in paper)
        negated_gate = Lambda(lambda x: 1.0 - x, output_shape=(size_list[-1],))(gate)
        # use ResBlock as the Transformation
        transformed = ResBlock(x=value)
        transformed_gated = Multiply()([gate, transformed])
        # UpSample value if needed
        if value.shape.as_list()[-1] != negated_gate.shape.as_list()[-1]:
            r = negated_gate.shape.as_list()[-1] / value.shape.as_list()[-1]
            assert not (bool(r % 1))
            value = tf.keras.layers.UpSampling3D(size=(1, 1, int(r)))(value)
        identity_gated = Multiply()([negated_gate, value])
        value = Add()([transformed_gated, identity_gated])
        return value

    def ResBlock(x):
        for i in range(config.CNN_ResBlock_conv_per_block):
            nonlocal i_hidden  # to keep i_hidden "global" to all functions under CNNResBlockModel()
            lamda_cnn = 0.0 if config.use_l2_in_cnn is False else lamda
            x = Conv2D(size_list[i_hidden], kernel_size=filt_list[i_hidden], padding='same',
                       bias_regularizer=keras.regularizers.l2(lamda_cnn),
                       kernel_regularizer=keras.regularizers.l2(lamda_cnn))(x)
            x = activation(activation_name, x)
            x = BatchNormalization()(x)
            i_hidden = i_hidden + 1
        return x

    if config.with_iq_matrices is False:
        raise Exception('This model support only operation for IQ representation')
    global background_implicit_inference
    # parameters
    lamda = config.Regularization_term
    p_dropout = config.dropout
    activation_name = config.activation
    filt_dim2_list = config.Filter_shape_dim1 if config.Filter_shape_symmetric else config.Filter_shape_dim2
    filt_list = [(x, y) for x, y in zip(config.Filter_shape_dim1, filt_dim2_list)]
    pool_list = [(x, y) for x, y in zip(config.Pool_shape_dim1, config.Pool_shape_dim2)]
    size_list = config.hidden_size
    dense_list = config.Dense_size
    input_shape = config.model_input_dim
    p_dropout_conv1d = config.CNN_ResBlock_dropout_conv1d
    p_dropout_after_all_conv2d = config.dropout_after_all_conv2d
    i_hidden = 0

    # Input Layer
    input_layer = Input(shape=input_shape)
    assert len(size_list) == len(filt_list)
    assert len(pool_list) == len(config.CNN_ResBlock_highway) == len(config.CNN_ResBlock_dropout)
    assert config.CNN_ResBlock_conv_per_block * len(config.CNN_ResBlock_highway) == len(size_list)
    assert len(config.Conv1D_size) == len(config.Conv1D_kernel)

    x = input_layer
    real_part = tf.expand_dims(x[:, :, :, 0], axis=-1)
    imag_part = tf.expand_dims(x[:, :, :, 1], axis=-1)

    real_part_output = Conv2D(size_list[0], kernel_size=filt_list[0], padding='same')(real_part)
    imag_part_output = Conv2D(size_list[0], kernel_size=filt_list[0], padding='same')(imag_part)

    real = tf.expand_dims(real_part_output, axis=-1)
    imag = tf.expand_dims(imag_part_output, axis=-1)
    filter_output = tf.concat([real, imag], axis=-1)
    x = complex_activation()(filter_output)
    # ResBlocks
    for i in range(len(config.CNN_ResBlock_highway)):
        if config.CNN_ResBlock_highway[i]:
            # True = use Highway
            x = highway_layer(value=x)
        else:
            # False = don't use Highway
            x = ResBlock(x=x)
        # MaxPool and Dropout
        if config.CNN_ResBlock_dropout[i] != 0:
            x = Dropout(rate=config.CNN_ResBlock_dropout[i])(x)
        x = MaxPooling2D(pool_size=pool_list[i])(x)
    # Flatten
    x = Flatten()(x)

    # Conv1D
    if len(config.Conv1D_size) != 0:
        x = tf.expand_dims(x, axis=-1)
    for i in range(len(config.Conv1D_size)):
        x = Conv1D(filters=config.Conv1D_size[i], kernel_size=config.Conv1D_kernel[i])(x)
        x = activation(activation_name, x)
        x = BatchNormalization()(x)
        if p_dropout_conv1d[i] != 0.0:
            x = Dropout(rate=p_dropout_conv1d[1])(x)
    # post-Conv1D
    if len(config.Conv1D_size) != 0:
        x = MaxPooling1D(pool_size=config.Conv1D_pool)(x)
        # x = BatchNormalization()(x)
        x = Flatten()(x)

    # Dense
    for i in range(len(dense_list)):
        x = Dense(dense_list[i], kernel_regularizer=keras.regularizers.l2(lamda))(x)
        x = activation(activation_name, x)
        if p_dropout_after_all_conv2d != 0 and len(config.Conv1D_size) == 0:
            x = Dropout(rate=p_dropout_after_all_conv2d)(x)
        x = BatchNormalization()(x)
    x = Dropout(rate=p_dropout)(x)
    # x = BatchNormalization()(x)
    if config.learn_background:
        x = Dense(3, activation='softmax')(x)
    else:
        x = Dense(1, activation='sigmoid')(x)
    output_layer = x
    model = Model(input_layer, output_layer)
    if config.learn_background:
        if config.background_implicit_inference:
            background_implicit_inference = True
        model = BlockBackgroundModel(input_layer, output_layer)
    # else:
    #     model = Model(input_layer, output_layer)
    # model.summary()
    return model
    def __init_model__(self):
        max_transitions = np.min(
            [
                image_utils.n_downsample(self.train_generator.height),
                image_utils.n_downsample(self.train_generator.width),
            ]
        )

        n_transitions = self.n_transitions
        if isinstance(n_transitions, (int, np.integer)):
            if n_transitions == 0:
                raise ValueError("n_transitions cannot equal zero")
            if n_transitions < 0:
                n_transitions += 1
                n_transitions = max_transitions - np.abs(n_transitions)
                self.n_transitions = n_transitions
            elif 0 < n_transitions <= max_transitions:
                self.n_transitions = n_transitions
            else:
                raise ValueError(
                    "n_transitions must be in range {0} "
                    "< n_transitions <= "
                    "{1}".format(-max_transitions + 1, max_transitions)
                )
        else:
            raise TypeError(
                "n_transitions must be integer in range "
                "{0} < n_transitions <= "
                "{1}".format(-max_transitions + 1, max_transitions)
            )

        if self.train_generator.downsample_factor < 2:
            raise ValueError(
                "StackedDenseNet is only compatible with `downsample_factor` >= 2."
                "Adjust the TrainingGenerator or choose a different model."
            )
        if n_transitions <= self.train_generator.downsample_factor:
            raise ValueError(
                "`n_transitions` <= `downsample_factor`. Increase `n_transitions` or decrease `downsample_factor`."
                " If `n_transitions` is -1 (the default), check that your image resolutions can be repeatedly downsampled (are divisible by 2 repeatedly)."
            )
        if self.pretrained:
            if self.input_shape[-1] is 1:
                inputs = Concatenate()([self.inputs] * 3)
                input_shape = self.input_shape[:-1] + (3,)
            else:
                inputs = self.inputs
                input_shape = self.input_shape
            normalized = ImageNetPreprocess("densenet121")(inputs)
            front_outputs = ImageNetFrontEnd(
                input_shape=input_shape,
                n_downsample=self.train_generator.downsample_factor,
                compression_factor=self.compression_factor,
            )(normalized)
        else:
            normalized = ImageNormalization()(self.inputs)
            front_outputs = FrontEnd(
                growth_rate=self.growth_rate,
                n_downsample=self.train_generator.downsample_factor,
                compression_factor=self.compression_factor,
                bottleneck_factor=self.bottleneck_factor,
            )(normalized)
        n_downsample = self.n_transitions - self.train_generator.downsample_factor
        outputs = front_outputs
        model_outputs = OutputChannels(
            self.train_generator.n_output_channels, name="output_0"
        )(outputs)

        model_outputs_list = [model_outputs]
        outputs.append(BatchNormalization()(model_outputs))
        for idx in range(self.n_stacks):
            outputs = DenseNet(
                growth_rate=self.growth_rate,
                n_downsample=self.n_transitions
                - self.train_generator.downsample_factor,
                downsample_factor=self.train_generator.downsample_factor,
                compression_factor=self.compression_factor,
                bottleneck_factor=self.bottleneck_factor,
            )(outputs)
            outputs.append(Concatenate()(front_outputs))
            outputs.append(BatchNormalization()(model_outputs))
            model_outputs = OutputChannels(
                self.train_generator.n_output_channels, name="output_" + str(idx + 1)
            )(outputs)
            model_outputs_list.append(model_outputs)

        self.train_model = Model(
            self.inputs, model_outputs_list, name=self.__class__.__name__
        )
def Attention_U_Net_2D(image_shape,
                       activation='elu',
                       feature_maps=[16, 32, 64, 128, 256],
                       depth=4,
                       drop_values=[0.1, 0.1, 0.2, 0.2, 0.3],
                       spatial_dropout=False,
                       batch_norm=False,
                       k_init='he_normal',
                       loss_type="bce",
                       optimizer="sgd",
                       lr=0.002,
                       n_classes=1):
    """Create 2D U-Net with Attention blocks. 

       Based on `Attention U-Net: Learning Where to Look for the Pancreas <https://arxiv.org/abs/1804.03999>`_.
                                                                                
       Parameters
       ----------
       image_shape : 2D tuple
           Dimensions of the input image.              
                                                                                
       activation : str, optional
           Keras available activation type.        
                                                                           
       feature_maps : array of ints, optional
           Feature maps to use on each level. Must have the same length as the 
           ``depth+1``.            
                                                                           
       depth : int, optional
           Depth of the network.                        
                                                                           
       drop_values : float, optional
           Dropout value to be fixed. If no value is provided the default
           behaviour will be to select a piramidal value starting from ``0.1`` 
           and reaching ``0.3`` value.
                                                                           
       spatial_dropout : bool, optional
           Use spatial dropout instead of the `normal` dropout.                                               
                                                                           
       batch_norm : bool, optional
           Make batch normalization.      
                                                                           
       k_init : string, optional
           Kernel initialization for convolutional layers.                                                         
                                                                           
       loss_type : str, optional
           Loss type to use, three type available: ``bce`` (Binary Cross Entropy)
           , ``w_bce`` (Weighted BCE, based on weigth maps) and ``w_bce_dice``
           (Weighted loss: ``weight1*BCE + weight2*Dice``).                                              
                                                                           
       optimizer : str, optional
           Optimizer used to minimize the loss function. Posible options: 
           ``sgd`` or ``adam``.                 
                                                                           
       lr : float, optional
           Learning rate value.                          
        
       n_classes: int, optional
           Number of classes.
                                                                           
       Returns
       -------                                                                 
       model : Keras model
           Model containing the U-Net.              


       Example
       -------

       Calling this function with its default parameters returns the following  
       network:                                                                 
                                                                                
       .. image:: img/unet.png                                                  
           :width: 100%                                                         
           :align: center                                                       
                                                                                
       Image created with `PlotNeuralNet <https://github.com/HarisIqbal88/PlotNeuralNet>`_.

       That networks incorporates in skip connecions Attention Gates (AG), which
       can be seen as follows:

       .. image:: img/attention_gate.png
           :width: 100%
           :align: center
       
       Image extracted from `Attention U-Net: Learning Where to Look for the Pancreas <https://arxiv.org/abs/1804.03999>`_.
    """

    if len(feature_maps) != depth + 1:
        raise ValueError("feature_maps dimension must be equal depth+1")
    if len(drop_values) != depth + 1:
        raise ValueError("'drop_values' dimension must be equal depth+1")

    dinamic_dim = (None, ) * (len(image_shape) - 1) + (image_shape[-1], )
    x = Input(dinamic_dim)
    #x = Input(image_shape)
    inputs = x

    if loss_type == "w_bce":
        weights = Input(image_shape)

    # List used to access layers easily to make the skip connections of the U-Net
    l = []

    # ENCODER
    for i in range(depth):
        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        if drop_values is not None:
            if spatial_dropout:
                x = SpatialDropout2D(drop_values[i])(x)
            else:
                x = Dropout(drop_values[i])(x)
        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)

        l.append(x)

        x = MaxPooling2D((2, 2))(x)

    # BOTTLENECK
    x = Conv2D(feature_maps[depth], (3, 3),
               activation=None,
               kernel_initializer=k_init,
               padding='same')(x)
    x = BatchNormalization()(x) if batch_norm else x
    x = Activation(activation)(x)
    if drop_values is not None:
        if spatial_dropout:
            x = SpatialDropout2D(drop_values[depth])(x)
        else:
            x = Dropout(drop_values[depth])(x)
    x = Conv2D(feature_maps[depth], (3, 3),
               activation=None,
               kernel_initializer=k_init,
               padding='same')(x)
    x = BatchNormalization()(x) if batch_norm else x
    x = Activation(activation)(x)

    # DECODER
    for i in range(depth - 1, -1, -1):
        x = Conv2DTranspose(feature_maps[i], (2, 2),
                            strides=(2, 2),
                            padding='same')(x)
        attn = AttentionBlock(x, l[i], feature_maps[i], batch_norm)
        x = concatenate([x, attn])
        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)
        if drop_values is not None:
            if spatial_dropout:
                x = SpatialDropout2D(drop_values[i])(x)
            else:
                x = Dropout(drop_values[i])(x)

        x = Conv2D(feature_maps[i], (3, 3),
                   activation=None,
                   kernel_initializer=k_init,
                   padding='same')(x)
        x = BatchNormalization()(x) if batch_norm else x
        x = Activation(activation)(x)

    outputs = Conv2D(n_classes, (1, 1), activation='sigmoid')(x)

    # Loss type
    if loss_type == "w_bce":
        model = Model(inputs=[inputs, weights], outputs=[outputs])
    else:
        model = Model(inputs=[inputs], outputs=[outputs])

    # Select the optimizer
    if optimizer == "sgd":
        opt = tf.keras.optimizers.SGD(lr=lr,
                                      momentum=0.99,
                                      decay=0.0,
                                      nesterov=False)
    elif optimizer == "adam":
        opt = tf.keras.optimizers.Adam(lr=lr,
                                       beta_1=0.9,
                                       beta_2=0.999,
                                       epsilon=None,
                                       decay=0.0,
                                       amsgrad=False)
    else:
        raise ValueError("Error: optimizer value must be 'sgd' or 'adam'")

    # Compile the model
    if loss_type == "bce":
        if n_classes > 1:
            model.compile(optimizer=opt,
                          loss='categorical_crossentropy',
                          metrics=[jaccard_index_softmax])
        else:
            model.compile(optimizer=opt,
                          loss='binary_crossentropy',
                          metrics=[jaccard_index])
    elif loss_type == "w_bce":
        model.compile(optimizer=opt,
                      loss=binary_crossentropy_weighted(weights),
                      metrics=[jaccard_index])
    elif loss_type == "w_bce_dice":
        model.compile(optimizer=opt,
                      loss=weighted_bce_dice_loss(w_dice=0.66, w_bce=0.33),
                      metrics=[jaccard_index])
    else:
        raise ValueError("'loss_type' must be 'bce', 'w_bce' or 'w_bce_dice'")

    return model
print(x_train.shape, y_train.shape) #(160, 150, 150, 3)
print(x_test.shape, y_test.shape) # (120, 150, 150, 3)


#1.1 전처리 / minmax, train_test_split
from sklearn.model_selection import train_test_split
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, train_size = 0.8, shuffle = True, random_state = 66)


#2. 모델링
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Input, LSTM, Conv2D, MaxPooling2D, Dropout, Flatten
input1 = Input(shape=(x_train.shape[1], x_train.shape[2] ,x_train.shape[3]))
x = Conv2D(64, 2, activation='relu')(input1)
x = Conv2D(128, 2, activation='relu')(x)
x = BatchNormalization()(x)
x = Conv2D(64, 2, activation='relu')(x)
x = BatchNormalization()(x)
x = MaxPooling2D(2)(x)
x = Flatten()(x)

x = Dense(32, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(64, activation='relu')(x) 
x = BatchNormalization()(x)
x = Dense(40, activation='relu')(x)
x = BatchNormalization()(x)
x = Dense(40, activation='relu')(x)
outputs = Dense(1, activation= 'sigmoid')(x)
model = Model(inputs = input1, outputs = outputs)
model.summary()
示例#23
0
def identity_block(X, f, filters, stage, block):
    """
    实现恒等块(包括了网络的主道和shortcut部分 构成恒等模块)
    参数:
        X - 输入的tensor类型的数据,维度为( m, n_H_prev, n_W_prev, n_H_prev )
        f - 整数,指定主路径中间的CONV窗口的维度
        filters - 整数列表,定义了主路径每层的卷积层的过滤器数量
        stage - 整数,根据每层的位置来命名每一层,与block参数一起使用。
        block - 字符串,据每层的位置来命名每一层,与stage参数一起使用。
        
    返回:
        X - 恒等块的输出,tensor类型,维度为(n_H, n_W, n_C)
    
    """

    #定义命名规则
    conv_name_base = "res" + str(stage) + block + "_branch"
    bn_name_base = "bn" + str(stage) + block + "_branch"

    #获取过滤器
    F1, F2, F3 = filters  #每一个主通道有三个过滤器

    #保存输入数据,将会用于为主路径添加捷径
    X_shortcut = X

    #主路径的第一部分
    ##卷积层
    X = Conv2D(filters=F1,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding="valid",
               name=conv_name_base + "2a",
               kernel_initializer=glorot_uniform(seed=0))(X)
    ##归一化
    X = BatchNormalization(axis=3, name=bn_name_base + "2a")(X)
    ##使用ReLU激活函数
    X = Activation("relu")(X)

    #主路径的第二部分
    ##卷积层
    X = Conv2D(filters=F2,
               kernel_size=(f, f),
               strides=(1, 1),
               padding="same",
               name=conv_name_base + "2b",
               kernel_initializer=glorot_uniform(seed=0))(X)
    ##归一化
    X = BatchNormalization(axis=3, name=bn_name_base + "2b")(X)
    ##使用ReLU激活函数
    X = Activation("relu")(X)

    #主路径的第三部分
    ##卷积层
    X = Conv2D(filters=F3,
               kernel_size=(1, 1),
               strides=(1, 1),
               padding="valid",
               name=conv_name_base + "2c",
               kernel_initializer=glorot_uniform(seed=0))(X)
    ##归一化
    X = BatchNormalization(axis=3, name=bn_name_base + "2c")(X)
    ##没有ReLU激活函数

    #最后一步:
    ##将捷径与输入加在一起
    X = Add()([X, X_shortcut])
    ##使用ReLU激活函数
    X = Activation("relu")(X)

    return X
示例#24
0
def define_generator(input, depth=256, dim=4):
    #Initialisation
    model = Sequential(name="Generator")
    init = RandomNormal(stddev=0.02)
    nodes = 64 * dim**2
    model.add(Dense(nodes, input_dim=100, kernel_initializer=init))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Reshape((dim, dim, 64)))
    #upsample to 8x8
    model.add(
        Conv2DTranspose(128, (dim, dim),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    #upsample to 16x16
    model.add(
        Conv2DTranspose(256, (dim, dim),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    #upsample to 32x32
    model.add(
        Conv2DTranspose(256, (dim, dim),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    #upsample to 64x64
    model.add(
        Conv2DTranspose(256, (dim, dim),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    #Upsample to 128x128
    model.add(
        Conv2DTranspose(256, (dim, dim),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU(alpha=0.2))
    #final output to 128x128x3
    model.add(
        Conv2D(3, (3, 3),
               padding='same',
               activation='tanh',
               kernel_initializer=init))
    return model
示例#25
0
import matplotlib.pyplot as plt
from tqdm import tqdm


(x_train, y_train), (x_test, y_test) = load_data()
x_train = x_train / 255
x_test = x_test / 255
x_train = np.expand_dims(x_train, axis=-1)
x_test = np.expand_dims(x_test, axis=-1)


generator = Sequential()

generator.add(Dense(128*7*7, input_dim=100, activation=relu))
generator.add(Reshape((7, 7, 128)))
generator.add(BatchNormalization())

generator.add(Conv2DTranspose(128, (3,3), strides=(2,2), padding="same", activation=relu))
generator.add(Conv2DTranspose(128, (3,3), strides=(2,2), padding="same", activation=relu))
generator.add(BatchNormalization())
generator.add(Dropout(0.3))

generator.add(Conv2D(64, (3,3), padding="same", activation=relu))
generator.add(Conv2D(1, (3,3), padding="same", activation=tanh))

generator.summary()


discriminator = Sequential()

discriminator.add(Conv2D(64, (3,3), padding="same", input_shape=(28,28,1), activation=leaky_relu))
示例#26
0
def final2DStacked(path):
    checkpoint_dir = os.path.dirname(path)
    model = Sequential()
    model.add(
        InputLayer(input_shape=(config.NUM_CHANNELS, config.IMG_HEIGHT,
                                config.IMG_WIDTH)))

    model.add(
        Conv2D(128, (3, 3),
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10),
               data_format="channels_first"))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(64, (3, 3),
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10),
               data_format="channels_first"))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(
        Conv2D(16, (1, 1),
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10),
               data_format="channels_first"))

    model.add(
        Conv2DTranspose(64, (3, 3),
                        strides=2,
                        padding="same",
                        kernel_regularizer=tf.keras.regularizers.l2(1e-10),
                        data_format="channels_first"))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(
        Conv2DTranspose(128, (3, 3),
                        strides=2,
                        padding="same",
                        kernel_regularizer=tf.keras.regularizers.l2(1e-10),
                        data_format="channels_first"))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(Conv2D(config.NUM_CHANNELS, (3, 3),
                     activation='sigmoid',
                     padding='same'),
              data_format="channels_first")
    model.summary()

    model.compile(loss='mean_squared_error', optimizer=Adam())
    cp_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=path,
        # monitor='val_loss',
        save_weights_only=True,
        # save_best_only=True,
        verbose=1,
        save_freq='epoch')

    latest = tf.train.latest_checkpoint(checkpoint_dir)
    # print('latestdasdasdas')
    print(latest)
    if latest is not None:
        model.load_weights(latest)
        print('weights loaded')
    model.save(path + 'model.h5')

    return model, cp_callback
示例#27
0
def SENET50(include_top=True,
            weights='vggface',
            input_tensor=None,
            input_shape=None,
            pooling=None,
            classes=8631):
    input_shape = _obtain_input_shape(input_shape,
                                      default_size=224,
                                      min_size=197,
                                      data_format=K.image_data_format(),
                                      require_flatten=include_top,
                                      weights=weights)

    if input_tensor is None:
        img_input = Input(shape=input_shape)
    else:
        if not K.is_keras_tensor(input_tensor):
            img_input = Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor
    if K.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    bn_eps = 0.0001

    x = Conv2D(64, (7, 7),
               use_bias=False,
               strides=(2, 2),
               padding='same',
               name='conv1/7x7_s2')(img_input)
    x = BatchNormalization(axis=bn_axis,
                           name='conv1/7x7_s2/bn',
                           epsilon=bn_eps)(x)
    x = Activation('relu')(x)
    x = MaxPooling2D((3, 3), strides=(2, 2))(x)

    x = senet_conv_block(x, 3, [64, 64, 256], stage=2, block=1, strides=(1, 1))
    x = senet_identity_block(x, 3, [64, 64, 256], stage=2, block=2)
    x = senet_identity_block(x, 3, [64, 64, 256], stage=2, block=3)

    x = senet_conv_block(x, 3, [128, 128, 512], stage=3, block=1)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=2)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=3)
    x = senet_identity_block(x, 3, [128, 128, 512], stage=3, block=4)

    x = senet_conv_block(x, 3, [256, 256, 1024], stage=4, block=1)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=2)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=3)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=4)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=5)
    x = senet_identity_block(x, 3, [256, 256, 1024], stage=4, block=6)

    x = senet_conv_block(x, 3, [512, 512, 2048], stage=5, block=1)
    x = senet_identity_block(x, 3, [512, 512, 2048], stage=5, block=2)
    x = senet_identity_block(x, 3, [512, 512, 2048], stage=5, block=3)

    x = AveragePooling2D((7, 7), name='avg_pool')(x)

    if include_top:
        x = Flatten()(x)
        x = Dense(classes, activation='softmax', name='classifier')(x)
    else:
        if pooling == 'avg':
            x = GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = Model(inputs, x, name='vggface_senet50')

    # load weights
    if weights == 'vggface':
        if include_top:
            weights_path = get_file('rcmalli_vggface_tf_senet50.h5',
                                    SENET50_WEIGHTS_PATH,
                                    cache_subdir=VGGFACE_DIR)
        else:
            weights_path = get_file('rcmalli_vggface_tf_notop_senet50.h5',
                                    SENET50_WEIGHTS_PATH_NO_TOP,
                                    cache_subdir=VGGFACE_DIR)
        model.load_weights(weights_path)

        if K.image_data_format() == 'channels_first' and K.backend(
        ) == 'tensorflow':
            warnings.warn('You are using the TensorFlow backend, yet you '
                          'are using the Theano '
                          'image data format convention '
                          '(`image_data_format="channels_first"`). '
                          'For best performance, set '
                          '`image_data_format="channels_last"` in '
                          'your Keras config '
                          'at ~/.keras/keras.json.')
    elif weights is not None:
        model.load_weights(weights)

    return model
示例#28
0
def final3D(path):
    checkpoint_dir = os.path.dirname(path)
    model = Sequential()
    #encoder
    #input = 28 x 28 x 1 (wide and thin)
    model.add(
        InputLayer(input_shape=(config.NUM_CHANNELS, config.IMG_HEIGHT,
                                config.IMG_WIDTH, 1)))

    model.add(
        Conv3D(128,
               3,
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10)))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(MaxPool3D(pool_size=(1, 2, 2)))

    model.add(
        Conv3D(64,
               3,
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10)))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(MaxPool3D(pool_size=(2, 2, 2)))

    model.add(
        Conv3D(32,
               1,
               padding="same",
               kernel_regularizer=tf.keras.regularizers.l2(1e-10)))
    model.add(BatchNormalization())
    model.add(ReLU())

    model.add(
        Conv3D(64,
               3,
               padding="same",
               stride=(2, 2, 2),
               kernel_regularizer=tf.keras.regularizers.l2(1e-10)))
    model.add(BatchNormalization())
    model.add(ReLU())
    # model.add(UpSampling3D(size=(2,2,2)))

    model.add(
        Conv3D(128,
               3,
               padding="same",
               stride=(1, 2, 2),
               kernel_regularizer=tf.keras.regularizers.l2(1e-10)))
    model.add(BatchNormalization())
    model.add(ReLU())
    model.add(UpSampling3D(size=(1, 2, 2)))
    model.add(Conv3D(1, 3, activation='sigmoid', padding='same'))

    model.compile(loss='mean_squared_error', optimizer=Adam())
    cp_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=path,
        # monitor='val_loss',
        save_weights_only=True,
        # save_best_only=True,
        verbose=1,
        save_freq='epoch')

    latest = tf.train.latest_checkpoint(checkpoint_dir)
    # print('latestdasdasdas')
    print(latest)
    if latest is not None:
        model.load_weights(latest)
        print('weights loaded')
    model.save(path + 'model.h5')

    return model, cp_callback
示例#29
0
def build_encoder_decoder():
    # Encoder
    input_tensor = Input(shape=(320, 320, 4))
    x = Conv2D(64, (3, 3), padding='same', activation='relu',
               name='conv1_1')(input_tensor)
    x = Conv2D(64, (3, 3), padding='same', activation='relu',
               name='conv1_2')(x)
    orig_1 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(128, (3, 3), padding='same', activation='relu',
               name='conv2_1')(x)
    x = Conv2D(128, (3, 3), padding='same', activation='relu',
               name='conv2_2')(x)
    orig_2 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(256, (3, 3), padding='same', activation='relu',
               name='conv3_1')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu',
               name='conv3_2')(x)
    x = Conv2D(256, (3, 3), padding='same', activation='relu',
               name='conv3_3')(x)
    orig_3 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)
    inputs_size = x.get_shape()[1:3]

    conv_4_1x1 = SeparableConv2D(512, (1, 1),
                                 activation='relu',
                                 padding='same',
                                 name='conv4_1x1')(x)
    conv_4_3x3_1 = SeparableConv2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   dilation_rate=ATROUS_RATES[0],
                                   name='conv4_3x3_1')(x)
    conv_4_3x3_2 = SeparableConv2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   dilation_rate=ATROUS_RATES[1],
                                   name='conv4_3x3_2')(x)
    conv_4_3x3_3 = SeparableConv2D(512, (3, 3),
                                   activation='relu',
                                   padding='same',
                                   dilation_rate=ATROUS_RATES[2],
                                   name='conv4_3x3_3')(x)
    # Image average pooling
    image_level_features = Lambda(
        lambda x: tf.reduce_mean(x, [1, 2], keepdims=True),
        name='global_average_pooling')(x)
    image_level_features = Conv2D(
        512, (1, 1),
        activation='relu',
        padding='same',
        name='image_level_features_conv_1x1')(image_level_features)
    image_level_features = Lambda(lambda x: tf.image.resize(x, inputs_size),
                                  name='upsample_1')(image_level_features)
    # Concat
    x = Concatenate(axis=3)([
        conv_4_1x1, conv_4_3x3_1, conv_4_3x3_2, conv_4_3x3_3,
        image_level_features
    ])
    x = Conv2D(512, (1, 1),
               activation='relu',
               padding='same',
               name='conv_1x1_1_concat')(x)
    x = Conv2D(512, (1, 1),
               activation='relu',
               padding='same',
               name='conv_1x1_2_concat')(x)
    orig_4 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_1')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_2')(x)
    x = Conv2D(512, (3, 3), activation='relu', padding='same',
               name='conv5_3')(x)
    orig_5 = x
    x = MaxPooling2D((2, 2), strides=(2, 2))(x)

    # Decoder
    #
    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_5)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_5)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='deconv5_1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='deconv5_2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(512, (3, 3),
               activation='relu',
               padding='same',
               name='deconv5_3',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_4)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_4)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='deconv4_1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='deconv4_2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(256, (3, 3),
               activation='relu',
               padding='same',
               name='deconv4_3',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_3)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_3)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='deconv3_1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='deconv3_2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(128, (3, 3),
               activation='relu',
               padding='same',
               name='deconv3_3',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_2)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_2)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='deconv2_1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='deconv2_2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = UpSampling2D(size=(2, 2))(x)
    the_shape = K.int_shape(orig_1)
    shape = (1, the_shape[1], the_shape[2], the_shape[3])
    origReshaped = Reshape(shape)(orig_1)
    xReshaped = Reshape(shape)(x)
    together = Concatenate(axis=1)([origReshaped, xReshaped])
    x = Unpooling()(together)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='deconv1_1',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)
    x = Conv2D(64, (3, 3),
               activation='relu',
               padding='same',
               name='deconv1_2',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)
    x = BatchNormalization()(x)

    x = Conv2D(1, (3, 3),
               activation='sigmoid',
               padding='same',
               name='pred',
               kernel_initializer='he_normal',
               bias_initializer='zeros')(x)

    model = Model(inputs=input_tensor, outputs=x)
    return model
示例#30
0
def identity_block(X, f, filters, stage, block):
    """
    Implementation of the identity block as defined in Figure 3

    Arguments:
    X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev)
    f -- integer, specifying the shape of the middle CONV's window for the main path
    filters -- python list of integers, defining the number of filters in the CONV layers of the main path
    stage -- integer, used to name the layers, depending on their position in the network
    block -- string/character, used to name the layers, depending on their position in the network

    Returns:
    X -- output of the identity block, tensor of shape (n_H, n_W, n_C)
    """

    # defining name basis
    conv_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'

    # Retrieve Filters
    F1, F2, F3 = filters

    # Save the input value. You'll need this later to add back to the main path.
    X_shortcut = X

    # First component of main path
    X = SeparableConv2D(filters=F1,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding='valid',
                        name=conv_name_base + '2a',
                        kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = ReLU(max_value=6)(X)

    ### START CODE HERE ###

    # Second component of main path (≈3 lines)
    X = SeparableConv2D(filters=F2,
                        kernel_size=(f, f),
                        strides=(1, 1),
                        padding='same',
                        name=conv_name_base + '2b',
                        kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = ReLU(max_value=6)(X)

    # Third component of main path (≈2 lines)
    X = SeparableConv2D(filters=F3,
                        kernel_size=(1, 1),
                        strides=(1, 1),
                        padding='valid',
                        name=conv_name_base + '2c',
                        kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)

    # Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines)
    X = add([X, X_shortcut])
    X = ReLU(max_value=6)(X)

    ### END CODE HERE ###

    return X