Beispiel #1
0
def conv2_block(input, k=1, dropout=0.0):
    init = input

    channel_axis = 1 if K.image_dim_ordering() == "th" else -1

    x = BatchRenormalization(axis=channel_axis,
                             momentum=0.1,
                             epsilon=1e-5,
                             gamma_init='uniform')(input)
    x = Activation('relu')(x)
    x = Convolution2D(32 * k, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      use_bias=False)(x)

    if dropout > 0.0: x = Dropout(dropout)(x)

    x = BatchRenormalization(axis=channel_axis,
                             momentum=0.1,
                             epsilon=1e-5,
                             gamma_init='uniform')(x)
    x = Activation('relu')(x)
    x = Convolution2D(32 * k, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      use_bias=False)(x)

    m = Add()([init, x])
    return m
def ___conv4_block(input, k=1, dropout=0.0, mode=0):
    init = input

    channel_axis = 1 if K.image_dim_ordering() == "th" else -1

    # Check if input number of filters is same as 64 * k, else create convolution2d for this input
    if K.image_dim_ordering() == "th":
        if init._keras_shape[1] != 64 * k:
            init = Convolution2D(64 * k, 1, 1, activation='linear', border_mode='same')(init)
    else:
        if init._keras_shape[-1] != 64 * k:
            init = Convolution2D(64 * k, 1, 1, activation='linear', border_mode='same')(init)

    x = Convolution2D(64 * k, 3, 3, border_mode='same')(input)
    x = BatchRenormalization(axis=channel_axis, mode=mode)(x)
    x = Activation('relu')(x)

    if dropout > 0.0:
        x = Dropout(dropout)(x)

    x = Convolution2D(64 * k, 3, 3, border_mode='same')(x)
    x = BatchRenormalization(axis=channel_axis, mode=mode)(x)
    x = Activation('relu')(x)

    m = merge([init, x], mode='sum')
    return m
Beispiel #3
0
def expand_conv(init, base, k, strides=(1, 1)):
    x = Convolution2D(base * k, (3, 3),
                      padding='same',
                      strides=strides,
                      kernel_initializer='he_normal',
                      use_bias=False)(init)

    channel_axis = 1 if K.image_data_format() == "channels_first" else -1

    x = BatchRenormalization(axis=channel_axis,
                             momentum=0.1,
                             epsilon=1e-5,
                             gamma_init='uniform')(x)
    x = Activation('relu')(x)

    x = Convolution2D(base * k, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      use_bias=False)(x)

    skip = Convolution2D(base * k, (1, 1),
                         padding='same',
                         strides=strides,
                         kernel_initializer='he_normal',
                         use_bias=False)(init)

    m = Add()([x, skip])

    return m
def __conv1_block(input, mode=0):
    x = Convolution2D(16, 3, 3, border_mode='same')(input)

    channel_axis = 1 if K.image_dim_ordering() == "th" else -1

    x = BatchRenormalization(axis=channel_axis, mode=mode)(x)
    x = Activation('relu')(x)
    return x
Beispiel #5
0
def inceptionv3(img_dim):
    input_tensor = Input(shape=img_dim)
    base_model = InceptionV3(include_top=False,
                   weights='imagenet',
                   input_shape=img_dim)
    bn = BatchRenormalization()(input_tensor)
    x = base_model(bn)
    x = GlobalAveragePooling2D()(x)
    x = Dropout(0.5)(x)
    output = Dense(1, activation='sigmoid')(x)
    model = Model(input_tensor, output)
    return model
Beispiel #6
0
def initial_conv(input):
    x = Convolution2D(16, (3, 3),
                      padding='same',
                      kernel_initializer='he_normal',
                      use_bias=False)(input)

    channel_axis = 1 if K.image_data_format() == "channels_first" else -1

    x = BatchRenormalization(axis=channel_axis,
                             momentum=0.1,
                             epsilon=1e-5,
                             gamma_init='uniform')(x)
    x = Activation('relu')(x)
    return x
Beispiel #7
0
def BRDNet(): #original format def BRDNet(), data is used to obtain the reshape of input data
    inpt = Input(shape=(None,None,1)) #if the image is 3, it is color image. If the image is 1, it is gray color, 201807082123tcw
    # 1st layer, Conv+relu
    x = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='same')(inpt)
    x = BatchRenormalization(axis=-1, epsilon=1e-3)(x)
    x = Activation('relu')(x)
    # 15 layers, Conv+BN+relu
    for i in range(7):
        x = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='same')(x)
        x = BatchRenormalization(axis=-1, epsilon=1e-3)(x)
        x = Activation('relu')(x)   
    # last layer, Conv 
    for i in range(8):
        x = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='same')(x)
        x = BatchRenormalization(axis=-1, epsilon=1e-3)(x)
        x = Activation('relu')(x) 
    x = Conv2D(filters=1, kernel_size=(3,3), strides=(1,1), padding='same')(x) #gray is 1 color is 3
    x = Subtract()([inpt, x])   # input - noise
    y = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='same')(inpt)
    y = BatchRenormalization(axis=-1, epsilon=1e-3)(y)
    y = Activation('relu')(y)
    # 15 layers, Conv+BN+relu
    for i in range(7):
        y = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1),dilation_rate=(2,2), padding='same')(y)
        y = Activation('relu')(y)   
    y = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='same')(y)
    y = BatchRenormalization(axis=-1, epsilon=1e-3)(y)
    y = Activation('relu')(y) 
    for i in range(6):
        y = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1),dilation_rate=(2,2), padding='same')(y)
        y = Activation('relu')(y)
    y = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding='same')(y)
    y = BatchRenormalization(axis=-1, epsilon=1e-3)(y)
    y = Activation('relu')(y)    
    y = Conv2D(filters=1, kernel_size=(3,3), strides=(1,1), padding='same')(y)#gray is 1 color is 3
    y = Subtract()([inpt, y])   # input - noise
    o = concatenate([x,y],axis=-1)
    z = Conv2D(filters=1, kernel_size=(3,3), strides=(1,1), padding='same')(o)#gray is 1 color is 3
    z=  Subtract()([inpt, z])
    model = Model(inputs=inpt, outputs=z)
    return model
def get_model(model_nr=1,
              kernel_size=(3, 3),
              pool_size=(4, 4),
              first_filters=32,
              second_filters=64,
              conv=False):

    # build the model
    model = Sequential()

    if model_nr == 1:  #No normalization
        model.add(
            Conv2D(first_filters,
                   kernel_size,
                   activation='relu',
                   padding='same',
                   input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
        model.add(MaxPool2D(pool_size=pool_size))

        model.add(
            Conv2D(second_filters,
                   kernel_size,
                   activation='relu',
                   padding='same'))
        model.add(MaxPool2D(pool_size=pool_size))

        model.add(Flatten())
        model.add(Dense(64, activation='relu'))
        model.add(Dense(1, activation='sigmoid'))

    elif model_nr == 2:  #Batch renormalization
        model.add(
            Conv2D(first_filters,
                   kernel_size,
                   padding='same',
                   input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)))
        model.add(BatchRenormalization())
        model.add(Activation("relu"))
        model.add(MaxPool2D(pool_size=pool_size))

        model.add(Conv2D(second_filters, kernel_size, padding='same'))
        model.add(BatchRenormalization())
        model.add(Activation("relu"))
        model.add(MaxPool2D(pool_size=pool_size))

        model.add(Flatten())

        model.add(Dense(64, init='uniform'))
        model.add(BatchRenormalization())
        model.add(Activation("relu"))

        model.add(Dense(1, init='uniform'))
        model.add(BatchRenormalization())
        model.add(Activation("sigmoid"))

#    if model_nr == 3: #Batch Instance normalization
#       model opbouw van het enige voorbeeld is totaal anders
#       vb: http://easy-tensorflow.com/tf-tutorials/convolutional-neural-nets-cnns?view=category&id=91

    model.compile(SGD(lr=0.01, momentum=0.95),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
def get_unet_renorm_1280(input_shape=(1280, 1280, 3),
                  num_classes=1):
    inputs = Input(shape=input_shape)
    # 2048, 1280

    down0c = Conv2D(4, (3, 3), padding='same')(inputs)
    down0c = BatchRenormalization()(down0c)
    down0c = Activation('relu')(down0c)
    down0c = Conv2D(4, (3, 3), padding='same')(down0c)
    down0c = BatchRenormalization()(down0c)
    down0c = Activation('relu')(down0c)
    down0c_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0c)
    # 1024, 640
    
    down0b = Conv2D(8, (3, 3), padding='same')(down0c_pool)
    down0b = BatchRenormalization()(down0b)
    down0b = Activation('relu')(down0b)
    down0b = Conv2D(8, (3, 3), padding='same')(down0b)
    down0b = BatchRenormalization()(down0b)
    down0b = Activation('relu')(down0b)
    down0b_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0b)
    # 512, 320

    down0a = Conv2D(16, (3, 3), padding='same')(down0b_pool)
    down0a = BatchRenormalization()(down0a)
    down0a = Activation('relu')(down0a)
    down0a = Conv2D(16, (3, 3), padding='same')(down0a)
    down0a = BatchRenormalization()(down0a)
    down0a = Activation('relu')(down0a)
    down0a_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0a)
    # 256, 160

    down0 = Conv2D(32, (3, 3), padding='same')(down0a_pool)
    down0 = BatchRenormalization()(down0)
    down0 = Activation('relu')(down0)
    down0 = Conv2D(32, (3, 3), padding='same')(down0)
    down0 = BatchRenormalization()(down0)
    down0 = Activation('relu')(down0)
    down0_pool = MaxPooling2D((2, 2), strides=(2, 2))(down0)
    # 128, 80

    down1 = Conv2D(64, (3, 3), padding='same')(down0_pool)
    down1 = BatchRenormalization()(down1)
    down1 = Activation('relu')(down1)
    down1 = Conv2D(64, (3, 3), padding='same')(down1)
    down1 = BatchRenormalization()(down1)
    down1 = Activation('relu')(down1)
    down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
    # 64, 40

    down2 = Conv2D(128, (3, 3), padding='same')(down1_pool)
    down2 = BatchRenormalization()(down2)
    down2 = Activation('relu')(down2)
    down2 = Conv2D(128, (3, 3), padding='same')(down2)
    down2 = BatchRenormalization()(down2)
    down2 = Activation('relu')(down2)
    down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
    # 32, 20

    down3 = Conv2D(256, (3, 3), padding='same')(down2_pool)
    down3 = BatchRenormalization()(down3)
    down3 = Activation('relu')(down3)
    down3 = Conv2D(256, (3, 3), padding='same')(down3)
    down3 = BatchRenormalization()(down3)
    down3 = Activation('relu')(down3)
    down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
    # 16, 10

    down4 = Conv2D(512, (3, 3), padding='same')(down3_pool)
    down4 = BatchRenormalization()(down4)
    down4 = Activation('relu')(down4)
    down4 = Conv2D(512, (3, 3), padding='same')(down4)
    down4 = BatchRenormalization()(down4)
    down4 = Activation('relu')(down4)
    down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)
    # 8, 5

    center = Conv2D(1024, (3, 3), padding='same')(down4_pool)
    center = BatchRenormalization()(center)
    center = Activation('relu')(center)
    center = Conv2D(1024, (3, 3), padding='same')(center)
    center = BatchRenormalization()(center)
    center = Activation('relu')(center)
    # center

    up4 = UpSampling2D((2, 2))(center)
    up4 = concatenate([down4, up4], axis=3)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchRenormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchRenormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(512, (3, 3), padding='same')(up4)
    up4 = BatchRenormalization()(up4)
    up4 = Activation('relu')(up4)
    # 16

    up3 = UpSampling2D((2, 2))(up4)
    up3 = concatenate([down3, up3], axis=3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchRenormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchRenormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(256, (3, 3), padding='same')(up3)
    up3 = BatchRenormalization()(up3)
    up3 = Activation('relu')(up3)
    # 32

    up2 = UpSampling2D((2, 2))(up3)
    up2 = concatenate([down2, up2], axis=3)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchRenormalization()(up2)
    up2 = Activation('relu')(up2)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchRenormalization()(up2)
    up2 = Activation('relu')(up2)
    up2 = Conv2D(128, (3, 3), padding='same')(up2)
    up2 = BatchRenormalization()(up2)
    up2 = Activation('relu')(up2)
    # 64

    up1 = UpSampling2D((2, 2))(up2)
    up1 = concatenate([down1, up1], axis=3)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchRenormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchRenormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(64, (3, 3), padding='same')(up1)
    up1 = BatchRenormalization()(up1)
    up1 = Activation('relu')(up1)
    # 128

    up0 = UpSampling2D((2, 2))(up1)
    up0 = concatenate([down0, up0], axis=3)
    up0 = Conv2D(32, (3, 3), padding='same')(up0)
    up0 = BatchRenormalization()(up0)
    up0 = Activation('relu')(up0)
    up0 = Conv2D(32, (3, 3), padding='same')(up0)
    up0 = BatchRenormalization()(up0)
    up0 = Activation('relu')(up0)
    up0 = Conv2D(32, (3, 3), padding='same')(up0)
    up0 = BatchRenormalization()(up0)
    up0 = Activation('relu')(up0)
    # 256

    up0a = UpSampling2D((2, 2))(up0)
    up0a = concatenate([down0a, up0a], axis=3)
    up0a = Conv2D(16, (3, 3), padding='same')(up0a)
    up0a = BatchRenormalization()(up0a)
    up0a = Activation('relu')(up0a)
    up0a = Conv2D(16, (3, 3), padding='same')(up0a)
    up0a = BatchRenormalization()(up0a)
    up0a = Activation('relu')(up0a)
    up0a = Conv2D(16, (3, 3), padding='same')(up0a)
    up0a = BatchRenormalization()(up0a)
    up0a = Activation('relu')(up0a)
    # 512

    up0b = UpSampling2D((2, 2))(up0a)
    up0b = concatenate([down0b, up0b], axis=3)
    up0b = Conv2D(8, (3, 3), padding='same')(up0b)
    up0b = BatchRenormalization()(up0b)
    up0b = Activation('relu')(up0b)
    up0b = Conv2D(8, (3, 3), padding='same')(up0b)
    up0b = BatchRenormalization()(up0b)
    up0b = Activation('relu')(up0b)
    up0b = Conv2D(8, (3, 3), padding='same')(up0b)
    up0b = BatchRenormalization()(up0b)
    up0b = Activation('relu')(up0b)
    # 1024
    
    up0c = UpSampling2D((2, 2))(up0b)
    up0c = concatenate([down0c, up0c], axis=3)
    up0c = Conv2D(8, (3, 3), padding='same')(up0c)
    up0c = BatchRenormalization()(up0c)
    up0c = Activation('relu')(up0c)
    up0c = Conv2D(8, (3, 3), padding='same')(up0c)
    up0c = BatchRenormalization()(up0c)
    up0c = Activation('relu')(up0c)
    up0c = Conv2D(8, (3, 3), padding='same')(up0c)
    up0c = BatchRenormalization()(up0c)
    up0c = Activation('relu')(up0c)
    # 2048

    classify = Conv2D(num_classes, (1, 1), activation='sigmoid')(up0c)

    model = Model(inputs=inputs, outputs=classify)

    model.compile(optimizer=RMSprop(lr=0.0001), loss=bce_dice_loss, metrics=[dice_loss])

    return model
Beispiel #10
0
def create_wide_residual_network(input_dim,
                                 nb_classes=100,
                                 N=2,
                                 k=1,
                                 dropout=0.0,
                                 verbose=1):
    """
    Creates a Wide Residual Network with specified parameters

    :param input: Input Keras object
    :param nb_classes: Number of output classes
    :param N: Depth of the network. Compute N = (n - 4) / 6.
              Example : For a depth of 16, n = 16, N = (16 - 4) / 6 = 2
              Example2: For a depth of 28, n = 28, N = (28 - 4) / 6 = 4
              Example3: For a depth of 40, n = 40, N = (40 - 4) / 6 = 6
    :param k: Width of the network.
    :param dropout: Adds dropout if value is greater than 0.0
    :param verbose: Debug info to describe created WRN
    :return:
    """
    channel_axis = 1 if K.image_data_format() == "channels_first" else -1

    ip = Input(shape=input_dim)

    x = initial_conv(ip)
    nb_conv = 4

    x = expand_conv(x, 16, k)

    for i in range(N - 1):
        x = conv1_block(x, k, dropout)
        nb_conv += 2

    x = BatchRenormalization(axis=channel_axis,
                             momentum=0.1,
                             epsilon=1e-5,
                             gamma_init='uniform')(x)
    x = Activation('relu')(x)

    x = expand_conv(x, 32, k, strides=(2, 2))

    for i in range(N - 1):
        x = conv2_block(x, k, dropout)
        nb_conv += 2

    x = BatchRenormalization(axis=channel_axis,
                             momentum=0.1,
                             epsilon=1e-5,
                             gamma_init='uniform')(x)
    x = Activation('relu')(x)

    x = expand_conv(x, 64, k, strides=(2, 2))

    for i in range(N - 1):
        x = conv3_block(x, k, dropout)
        nb_conv += 2

    x = AveragePooling2D((8, 8))(x)
    x = Flatten()(x)

    x = Dense(nb_classes, activation='softmax')(x)

    model = Model(ip, x)

    if verbose: print("Wide Residual Network-%d-%d created." % (nb_conv, k))
    return model