Beispiel #1
0
def Xception(include_top=True,
             input_shape=(224, 224, 3),
             pooling=None,
             classes=1000):

    img_input = layers.Input(shape=input_shape)
    channel_axis = -1

    x = layers.Conv2D(32, (3, 3),
                      strides=(2, 2),
                      use_bias=False,
                      name='block1_conv1')(img_input)
    x = layers.BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x)
    x = layers.Activation('relu', name='block1_conv1_act')(x)
    x = layers.Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x)
    x = layers.Activation('relu', name='block1_conv2_act')(x)

    residual = layers.Conv2D(128, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.SeparableConv2D(128, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block2_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block2_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block2_sepconv2_act')(x)
    x = layers.SeparableConv2D(128, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block2_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block2_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv2D(256, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.Activation('relu', name='block3_sepconv1_act')(x)
    x = layers.SeparableConv2D(256, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block3_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block3_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block3_sepconv2_act')(x)
    x = layers.SeparableConv2D(256, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block3_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block3_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv2D(728, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.Activation('relu', name='block4_sepconv1_act')(x)
    x = layers.SeparableConv2D(728, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block4_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block4_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block4_sepconv2_act')(x)
    x = layers.SeparableConv2D(728, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block4_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block4_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = layers.Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = layers.SeparableConv2D(728, (3, 3),
                                   padding='same',
                                   use_bias=False,
                                   name=prefix + '_sepconv1')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      name=prefix + '_sepconv1_bn')(x)
        x = layers.Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = layers.SeparableConv2D(728, (3, 3),
                                   padding='same',
                                   use_bias=False,
                                   name=prefix + '_sepconv2')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      name=prefix + '_sepconv2_bn')(x)
        x = layers.Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = layers.SeparableConv2D(728, (3, 3),
                                   padding='same',
                                   use_bias=False,
                                   name=prefix + '_sepconv3')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      name=prefix + '_sepconv3_bn')(x)

        x = layers.add([x, residual])

    residual = layers.Conv2D(1024, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.Activation('relu', name='block13_sepconv1_act')(x)
    x = layers.SeparableConv2D(728, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block13_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block13_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block13_sepconv2_act')(x)
    x = layers.SeparableConv2D(1024, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block13_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block13_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='block13_pool')(x)
    x = layers.add([x, residual])

    x = layers.SeparableConv2D(1536, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block14_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block14_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv1_act')(x)

    x = layers.SeparableConv2D(2048, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block14_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block14_sepconv2_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv2_act')(x)

    if include_top:
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D()(x)

    # Create model.
    model = models.Model(img_input, x, name='xception')

    return model
def add_res_up(x, x_res, filters):
    x_res = UpSampling2D((2, 2))(x_res)
    residual = layers.Conv2D(filters, 1, padding="same")(x_res)
    x = layers.add([x, residual])
    return x
Beispiel #3
0
def identity_block_2D(input_tensor,
                      kernel_size,
                      filters,
                      stage,
                      block,
                      trainable=True):
    """The identity block is the block that has no conv layer at shortcut.
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filterss of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    # Returns
        Output tensor for the block.
    """
    filters1, filters2, filters3 = filters
    bn_axis = 3

    conv_name_1 = 'conv' + str(stage) + '_' + str(block) + '_1x1_reduce'
    bn_name_1 = 'conv' + str(stage) + '_' + str(block) + '_1x1_reduce/bn'
    x = Conv2D(
        filters1,
        (1, 1),
        kernel_initializer='orthogonal',
        use_bias=False,
        trainable=trainable,
        kernel_regularizer=l2(weight_decay),
        name=conv_name_1,
    )(input_tensor)
    x = BatchNormalization(axis=bn_axis, trainable=trainable,
                           name=bn_name_1)(x)
    x = Activation('relu')(x)

    conv_name_2 = 'conv' + str(stage) + '_' + str(block) + '_3x3'
    bn_name_2 = 'conv' + str(stage) + '_' + str(block) + '_3x3/bn'
    x = Conv2D(
        filters2,
        kernel_size,
        padding='same',
        kernel_initializer='orthogonal',
        use_bias=False,
        trainable=trainable,
        kernel_regularizer=l2(weight_decay),
        name=conv_name_2,
    )(x)
    x = BatchNormalization(axis=bn_axis, trainable=trainable,
                           name=bn_name_2)(x)
    x = Activation('relu')(x)

    conv_name_3 = 'conv' + str(stage) + '_' + str(block) + '_1x1_increase'
    bn_name_3 = 'conv' + str(stage) + '_' + str(block) + '_1x1_increase/bn'
    x = Conv2D(
        filters3,
        (1, 1),
        kernel_initializer='orthogonal',
        use_bias=False,
        trainable=trainable,
        kernel_regularizer=l2(weight_decay),
        name=conv_name_3,
    )(x)
    x = BatchNormalization(axis=bn_axis, trainable=trainable,
                           name=bn_name_3)(x)

    x = layers.add([x, input_tensor])
    x = Activation('relu')(x)
    return x
def Xception_model(img_input, pooling=None):

    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1 #???

    x = layers.Conv2D(32, (3, 3),
                      strides=(2, 2),
                      use_bias=False,
                      name='block1_conv1')(img_input)
    x = layers.BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x)
    x = layers.Activation('relu', name='block1_conv1_act')(x)
    x = layers.Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x)
    x = layers.Activation('relu', name='block1_conv2_act')(x)

    residual = layers.Conv2D(128, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.SeparableConv2D(128, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block2_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block2_sepconv2_act')(x)
    x = layers.SeparableConv2D(128, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block2_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block2_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv2D(256, (1, 1), strides=(2, 2),
                             padding='same', use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.Activation('relu', name='block3_sepconv1_act')(x)
    x = layers.SeparableConv2D(256, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block3_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block3_sepconv2_act')(x)
    x = layers.SeparableConv2D(256, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block3_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block3_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3), strides=(2, 2),
                            padding='same',
                            name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv2D(728, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.Activation('relu', name='block4_sepconv1_act')(x)
    x = layers.SeparableConv2D(728, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block4_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block4_sepconv2_act')(x)
    x = layers.SeparableConv2D(728, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block4_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block4_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3), strides=(2, 2),
                            padding='same',
                            name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = layers.Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = layers.SeparableConv2D(728, (3, 3),
                                   padding='same',
                                   use_bias=False,
                                   name=prefix + '_sepconv1')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      name=prefix + '_sepconv1_bn')(x)
        x = layers.Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = layers.SeparableConv2D(728, (3, 3),
                                   padding='same',
                                   use_bias=False,
                                   name=prefix + '_sepconv2')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      name=prefix + '_sepconv2_bn')(x)
        x = layers.Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = layers.SeparableConv2D(728, (3, 3),
                                   padding='same',
                                   use_bias=False,
                                   name=prefix + '_sepconv3')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      name=prefix + '_sepconv3_bn')(x)

        x = layers.add([x, residual])

    residual = layers.Conv2D(1024, (1, 1), strides=(2, 2),
                             padding='same', use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.Activation('relu', name='block13_sepconv1_act')(x)
    x = layers.SeparableConv2D(728, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block13_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block13_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block13_sepconv2_act')(x)
    x = layers.SeparableConv2D(1024, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block13_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block13_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='block13_pool')(x)
    x = layers.add([x, residual])

    x = layers.SeparableConv2D(1536, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block14_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block14_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv1_act')(x)

    x = layers.SeparableConv2D(2048, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block14_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block14_sepconv2_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv2_act')(x)

    if pooling == 'avg':
        x = layers.GlobalAveragePooling2D()(x)
    elif pooling == 'max':
        x = layers.GlobalMaxPooling2D()(x)

    return x
Beispiel #5
0
def Unet_3D(input_arr, n_filters=8, dropout=0.2, batch_norm=True):

    ### down-sampling
    conv1 = conv_BN_block(input_arr=input_arr,
                          num_filters=n_filters,
                          kernel_size=3,
                          batch_norm=batch_norm)
    mp1 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(conv1)
    dp1 = Dropout(dropout)(mp1)

    conv2 = conv_BN_block(dp1, n_filters * 2, 3, batch_norm)
    mp2 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(conv2)
    dp2 = Dropout(dropout)(mp2)

    conv3 = conv_BN_block(dp2, n_filters * 4, 3, batch_norm)
    mp3 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(conv3)
    dp3 = Dropout(dropout)(mp3)

    conv4 = conv_BN_block(dp3, n_filters * 8, 3, batch_norm)
    mp4 = MaxPooling3D(pool_size=(2, 2, 2), strides=2)(conv4)
    dp4 = Dropout(dropout)(mp4)

    conv5 = conv_BN_block(dp4, n_filters * 16, 3, batch_norm)

    conv6 = Conv3D(n_filters * 16,
                   kernel_size=(2, 2, 2),
                   strides=(2, 2, 2),
                   padding='same')(conv5)
    dp6 = Dropout(dropout)(conv6)

    conv7 = conv_BN_block(dp6, n_filters * 16, 5, True)

    ### up-sampling
    up1 = Conv3DTranspose(n_filters * 8, (2, 2, 2),
                          strides=(2, 2, 2),
                          padding='same')(conv7)
    conc1 = concatenate([up1, conv5])
    conv8 = conv_BN_block(conc1, n_filters * 16, 5, True)
    dp7 = Dropout(dropout)(conv8)

    up2 = Conv3DTranspose(n_filters * 4, (2, 2, 2),
                          strides=(2, 2, 2),
                          padding='same')(dp7)
    conc2 = concatenate([up2, conv4])
    conv9 = conv_BN_block(conc2, n_filters * 8, 5, True)
    dp8 = Dropout(dropout)(conv9)

    up3 = Conv3DTranspose(n_filters * 2, (2, 2, 2),
                          strides=(2, 2, 2),
                          padding='same')(dp8)
    conc3 = concatenate([up3, conv3])
    conv10 = conv_BN_block(conc3, n_filters * 4, 5, True)
    dp9 = Dropout(dropout)(conv10)

    up4 = Conv3DTranspose(n_filters, (2, 2, 2),
                          strides=(2, 2, 2),
                          padding='same')(dp9)
    conc4 = concatenate([up4, conv2])
    conv11 = conv_BN_block(conc4, n_filters * 4, 5, True)
    dp10 = Dropout(dropout)(conv11)

    up5 = Conv3DTranspose(n_filters, (2, 2, 2),
                          strides=(2, 2, 2),
                          padding='same')(dp10)
    conc5 = concatenate([up5, conv1])
    conv12 = Conv3D(n_filters * 2,
                    kernel_size=(5, 5, 5),
                    strides=(1, 1, 1),
                    padding='same')(conc5)
    dp11 = Dropout(dropout)(conv12)

    add1 = add([dp11, conc5])
    relu_layer = relu(add1, max_value=1)
    outputs = Conv3D(1, (1, 1, 1), activation='relu')(relu_layer)

    model = Model(inputs=input_arr, outputs=outputs)

    return model
Beispiel #6
0
 def f(input1, input2):
     res1 = basic_residual_block(filters=filters, init_strides=init_strides)(input1)
     res2 = basic_residual_block(filters=filters, init_strides=init_strides)(input2)
     sum_res = add([res1, res2])
     #final_res = basic_residual_block(filters=filters, init_strides=init_strides)(sum_res)
     return sum_res
Beispiel #7
0
def _residual_module(inputs,
                     filters,
                     stride,
                     channel_dim,
                     reduce_dim=False,
                     reg=0.0001,
                     bn_eps=2e-5,
                     bn_momentum=0.9):
    """
    Definicija rezidualnog modula, pri čemu se modul sastoji 
    od bottleneck grane i preskočne grane. Ova funkcija je privatna jer 
    samostalno ne radi ako je reduce_dim=False za prvi blok, zbog
    nekompatibilnih dimenzija u layers.add sloju.
    
    Argumenti:
    ----------
    inputs: tensor
        Ulaz u rezidualni blok.
        
    filters: int
        Broj filtera koji će učiti zadnji konvolucijski sloj u bottelneck-u.
    
    stride: int
        Posmak u operaciji konvolucije.
    
    channel_dim: int
        Koja os predstavlja oznaku kanala (obično je prva ili zadnja). Bitno
        kod primjene BatchNorm-a.
    
    reduce_dim: bool
        Da li se u rezidualno bloku treba napraviti smanjenje prostorne dimenzionalnosti. 
        Preddefinirana vrijednost je False.
    
    reg: float
        Stupanje regularizacije za sve konvolucijske slojeve. Preddefinirana
        vrijednost je 0.0001.
    
    bn_eps: float
        Prilikom normalizacije u BN slojevima konstanta koja osigurava da 
        ne dođe do dijeljenja sa nulom. Preddefinirana vrijednost je 2e-5.
    
    bn_momentum: float
        Momentum pomičnog prosjeka kod BN slojeva. Preddefinirana 
        vrijednost je 0.9.

    Povratna vrijednost:
    --------------------
    output: tensor
    Transformirani ulazni podatci.
    
    """
    #Inicijalizacija preskočne veze
    shortcut = inputs

    #Prvi sloj sa 1x1 filterima i pred-aktivacijom(bn + relu)
    bn1 = layers.BatchNormalization(axis=channel_dim,
                                    epsilon=bn_eps,
                                    momentum=bn_momentum)(inputs)
    act1 = layers.Activation("relu")(bn1)
    #Prvi konv. sloji uči 4x manje filtera u odnosu na zadnji, ne treba nam
    #vektor pristranosti jer koristimo BN sloj
    conv1 = layers.Conv2D(int(filters * 0.25), (1, 1),
                          use_bias=False,
                          kernel_regularizer=regularizers.l2(reg))(act1)

    #Drugi sloj sa 3x3 filterima
    bn2 = layers.BatchNormalization(axis=channel_dim,
                                    epsilon=bn_eps,
                                    momentum=bn_momentum)(conv1)
    act2 = layers.Activation("relu")(bn2)
    conv2 = layers.Conv2D(int(filters * 0.25), (3, 3),
                          strides=stride,
                          padding="same",
                          use_bias=False,
                          kernel_regularizer=regularizers.l2(reg))(act2)

    #Treći sloj sa 1x1 filterima
    bn3 = layers.BatchNormalization(axis=channel_dim,
                                    epsilon=bn_eps,
                                    momentum=bn_momentum)(conv2)
    act3 = layers.Activation("relu")(bn3)
    conv3 = layers.Conv2D(filters, (1, 1),
                          use_bias=False,
                          kernel_regularizer=regularizers.l2(reg))(act3)

    #Da li smanjujemo prostornu dim. ulaza
    if reduce_dim:
        shortcut = layers.Conv2D(filters, (1, 1),
                                 strides=stride,
                                 use_bias=False,
                                 kernel_regularizer=regularizers.l2(reg))(act1)

    #Zbrajanje preskočne veze i transformacije bottleneck grane
    output = layers.add([conv3, shortcut])

    return output
def createModel():
    #5.1 Set input shape which is 1 channel of 42x42 2D image
    inputShape=(30,55,1)
    inputs      = Input(shape=inputShape)
    inputs2 =inputs
    #inputs2   = Conv2D(16, (2,2), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(inputs)
    #inputs2   = Conv2D(32, (2,2), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(inputs2)
    
    #5.2 Auto encoder
    #This is to process the input so the neccessary feature that can be used for image reconstruction can be gathered.
    #5.2.1 First hidden layer. 64 neurons, downsize the image to (21,21). Conv2D is chosen over maxPooling as it allow the filter kernel to be trained.
    
    '''
    x           = Conv2D(64, (8,1), padding="same",strides=(1,2),kernel_initializer='he_normal', activation='relu')(inputs)
    encoded     = Conv2D(40, (4,1), padding="same",strides=(1,2),kernel_initializer='he_normal', activation='relu')(x)
    x           = UpSampling2D(size=(1, 2))(encoded)
    x           = Conv2D(40, (4,1), padding="same",kernel_initializer='he_normal', activation='relu')(x)
    x           = UpSampling2D(size=(1, 2))(x)
    x           = Conv2D(64, (8,1), padding="same",kernel_initializer='he_normal', activation='relu')(x)
    x           = Dense(1, name="horizontal")(x)
    '''

    '''    
    #horizontal
    x = AveragePooling2D(pool_size=(1, 8), strides=(1,8))(inputs)
    x = Lambda(lambda x: x - 0.375)(x)
    x = Activation("relu")(x)
    x = Lambda(lambda x: x *1024)(x)
    x = Activation("tanh")(x)
    x = UpSampling2D(size=(1, 8))(x)

    #vertical
    x2 = AveragePooling2D(pool_size=(5, 1), strides=(5,1))(inputs)
    x2 = Lambda(lambda x2: x2 - 0.33)(x2)
    x2 = Activation("relu")(x2)
    x2 = Lambda(lambda x2: x2 *1024)(x2)
    x2 = Activation("tanh")(x2)
    x2 = UpSampling2D(size=(5, 1))(x2)

    #heat trap
    x3 = AveragePooling2D(pool_size=(4, 4), strides=(4,4))(inputs)
    x3 = Lambda(lambda x3: x3 - 0.25)(x3)
    x3 = Activation("relu")(x3)
    x3 = Lambda(lambda x3: x3 *1024)(x3)
    x3 = Activation("tanh")(x3)
    x3 = UpSampling2D(size=(4, 4))(x3)
    '''    

    #heat trap
    x3 = AveragePooling2D(pool_size=(5, 5), strides=(5,5))(inputs2)
    x3_2 = Lambda(lambda x3: x3 - 0.2)(x3)
    x3_2 = Activation("relu")(x3_2)
    x3_2 = Lambda(lambda x3_2: x3_2 + 0.2)(x3_2)
    x3_2 = Lambda(lambda x3_2: x3_2 * 8)(x3_2)
    x3 = multiply([x3,x3_2])
     
    #x3 = Activation("tanh")(x3)
    x3 = UpSampling2D(size=(5, 5))(x3)
    #x3 = multiply([x3,inputs])
    x3 = Lambda(lambda x3: x3*0.5)(x3)
    x3 = Conv2D(16, (5,5), padding="same",strides=(5,5),kernel_initializer='he_normal', activation='relu')(x3)
    x3 = Conv2D(1, (1,1), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(x3)
    x3 = UpSampling2D(size=(5, 5))(x3)

    x3a = AveragePooling2D(pool_size=(3, 5), strides=(3,5))(inputs2)
    
    x3a_2 = Lambda(lambda x3a: x3a - 0.2)(x3a)
    x3a_2 = Activation("relu")(x3a_2)
    x3a_2 = Lambda(lambda x3a_2: x3a_2 + 0.2)(x3a_2)
    x3a_2 = Lambda(lambda x3a_2: x3a_2 * 8)(x3a_2)
    x3a = multiply([x3a,x3a_2])
    
    #x3a = Activation("tanh")(x3a)
    x3a = UpSampling2D(size=(3, 5))(x3a)
    #x3a = multiply([x3a,inputs])
    x3a = Lambda(lambda x3a: x3a *0.5)(x3a)
    x3a = Conv2D(16, (3,5), padding="same",strides=(3,5),kernel_initializer='he_normal', activation='relu')(x3a)
    x3a = Conv2D(1, (1,1), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(x3a)
    x3a = UpSampling2D(size=(3, 5))(x3a)


    x3 = add([x3,x3a], name="cluster")
    #x3 = multiply([x3,inputs])
    
    #vertical
    #x2 = subtract([inputs,x3])
    x2= inputs2
    x2 = AveragePooling2D(pool_size=(3, 1), strides=(3,1))(x2)
    
    x2_2 = Lambda(lambda x2: x2 - 0.2)(x2)
    x2_2 = Activation("relu")(x2_2)
    x2_2 = Lambda(lambda x2_2: x2_2 + 0.2)(x2_2)
    x2_2 = Lambda(lambda x2_2: x2_2 * 8)(x2_2)
    x2 = multiply([x2,x2_2])
    
    #x2 = Activation("tanh")(x2)
    x2 = UpSampling2D(size=(3, 1))(x2)
    #x2= multiply([x2,inputs], name='vertical')
    #x2 = add([x2,x2a])
    x2 = Conv2D(16, (3,1), padding="same",strides=(3,1),kernel_initializer='he_normal', activation='relu')(x2)
    x2 = Conv2D(1, (1,1), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(x2)
    x2 = UpSampling2D(size=(3, 1), name='vertical')(x2)
    #x2 = multiply([x2,inputs])

    #horizontal
    #x = subtract([inputs,x3])
    #x = subtract([x,x2])
    x = inputs2
    x = AveragePooling2D(pool_size=(1, 5), strides=(1,5))(x)
    
    #x = Lambda(lambda x: x *8)(x)
    x_2 = Lambda(lambda x: x - 0.2)(x)
    x_2 = Activation("relu")(x_2)
    x_2 = Lambda(lambda x_2: x_2 + 0.2)(x_2)
    x_2 = Lambda(lambda x_2: x_2 * 8)(x_2)    
    #x = multiply([x,x_2])
    #x = Activation("relu")(x)
    
    
    
    x = UpSampling2D(size=(1, 5))(x)
    #x = multiply([x,inputs])
    #x = add([x,xa])
    x = Conv2D(16, (1,5), padding="same",strides=(1,5),kernel_initializer='he_normal', activation='relu')(x)
    x = Conv2D(1, (1,1), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(x)
    x = UpSampling2D(size=(1, 5), name="horizontal")(x)
    #x = multiply([x,inputs])




    '''
    y = subtract([inputs2,x])    
    y = subtract([y,x2])    
    y = subtract([y,x3]) 
    
    #y = Lambda(lambda y: y * 8)(y)
    #y = Lambda(lambda y: y - 0.125)(y)
    y = Activation("relu")(y)
    #y = Lambda(lambda y: y *1024)(y)
    #y = Activation("tanh")(y)
    #y = multiply([y,inputs])
    y = Conv2D(16, (2,2), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu', name="pepper")(y)
    '''
    
    
    #=======================================================

    '''
    x           = Conv2D(32, (1,8), padding="same",strides=(1,8),kernel_initializer='he_normal', activation='relu')(x)
    x           = Lambda(lambda x: x - 0.1)(x)
    x           = Activation("relu")(x)
    x           = Lambda(lambda x: x * 1.5)(x)
    #x           = Lambda(lambda x: x * 8)(x)
    x_code      = Conv2D(16, (2,1), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(x)
    x           = UpSampling2D(size=(1,8), name="horizontal" )(x_code)
    #x2           = Dense(1)(x2)
    
    x2           = Conv2D(32, (5,1), padding="same",strides=(5,1),kernel_initializer='he_normal', activation='relu')(x2)
    x2           = Lambda(lambda x2: x2 - 0.1)(x2)
    x2           = Activation("relu")(x2)
    x2           = Lambda(lambda x2: x2 * 1.5)(x2)
    #x2           = Lambda(lambda x2: x2 * 8)(x2)
    x2_code      = Conv2D(16, (2,1), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(x2)
    x2           = UpSampling2D(size=(5, 1), name="vertical" )(x2_code)
    #x2           = Dense(1)(x2)

    x3      = Conv2D(32, (4,4), padding="same",strides=(4,4),kernel_initializer='he_normal', activation='relu')(x3)
    x3      = Lambda(lambda x3: x3 - 0.1)(x3)
    x3      = Activation("relu")(x3)
    x3_code      = Lambda(lambda x3: x3 * 1.5)(x3)
    #x3_code = Lambda(lambda x3: x3 * 16)(x3)
    #x3           = Dropout(0.3)(x3)
    x3           = UpSampling2D(size=(4, 4) )(x3_code)
    x3           = Dense(1)(x3)
    
    x3b      = Conv2D(32, (6,6), padding="same",strides=(4,4),kernel_initializer='he_normal', activation='relu')(x3)
    x3b      = Lambda(lambda x3b: x3b - 0.1)(x3b)
    x3b      = Activation("relu")(x3b)
    x3b_code      = Lambda(lambda x3b: x3b * 1.5)(x3b)
    #x3b_code = Lambda(lambda x3b: x3b * 16)(x3b)
    #x3b          = Dropout(0.3)(x3b)
    x3b          = UpSampling2D(size=(4, 4) )(x3b_code)
    x3b          = Dense(1)(x3b)

    x3c     = Conv2D(32, (2,2), padding="same",strides=(2,2),kernel_initializer='he_normal', activation='relu')(x3)
    x3c      = Lambda(lambda x3c: x3c - 0.1)(x3c)
    x3c      = Activation("relu")(x3c)
    x3c_code      = Lambda(lambda x3c: x3c * 1.5)(x3c)
    #x3c_code = Lambda(lambda x3c: x3c * 4)(x3c)
    #x3c          = Dropout(0.3)(x3c)
    x3c          = UpSampling2D(size=(2, 2) )(x3c_code)
    x3c          = Dense(1)(x3c)
    
    x3           = add([x3,x3b,x3c]) 
    x3           = Lambda(lambda x3: x3 /3, name="cluster")(x3)

    
    y           = Conv2D(32, (2,2), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(y)
    y           = Conv2D(16, (2,2), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu', name="pepper")(y)
    #y           = Dropout(0.3)(y)
    #y           = Conv2D(1, (1,1), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(y)
    #y           = Dense(1)(y)
    
    #y = Lambda(lambda y: y - 0.1)(y)
    #y = Activation("relu")(y)
    #y = Lambda(lambda y: y *1024)(y)
    #y = Activation("tanh")(y)
    #y           = multiply([y,inputs])
    #y           = subtract([y,x], name="pepper")
    '''


    
    
    
    
   
    #outputs = add([x,x2,x3,y])
    #outputs = Lambda(lambda outputs: outputs /4)(outputs)
    
    outputs = concatenate([x,x2,x3])
    outputs   = Conv2D(32, (2,2), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(outputs)
    outputs   = Conv2D(16, (2,2), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(outputs)
    #outputs = Conv2D(1, (2,2), padding="same",strides=(1,1),kernel_initializer='he_normal', activation='relu')(outputs)

    '''
    xx=Flatten()(x_code)
    xx2=Flatten()(x2_code)
    xx3a=Flatten()(x3_code)
    xx3b=Flatten()(x3b_code)
    xx3c=Flatten()(x3c_code)
    
    classifier=concatenate([xx,xx2,xx3a,xx3b,xx3c])
    classifier=Dense(64,kernel_initializer='he_normal', activation='relu')(classifier)
    classifier=Dense(8,kernel_initializer='he_normal', activation='softmax')(classifier)
    '''

    model       = Model(inputs=inputs,outputs=outputs) 
    model.compile(loss='mean_squared_error', optimizer = optimizers.RMSprop(), metrics=['accuracy'])
    model2       = Model(inputs=inputs,outputs=outputs) 
    model2.compile(loss='mean_squared_error', optimizer = optimizers.RMSprop(), metrics=['accuracy'])
    #model2       = Model(inputs=inputs,outputs=classifier) 
    #model2.compile(loss='categorical_crossentropy', optimizer=optimizers.Adam() , metrics=['accuracy'])    
    return model,model2   
def attention_model():
    inputs = tf.keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, CHANNEL+1))

    mask_list = []

    x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(inputs)
    x = layers.ReLU()(x)
    res = x

    for it in range(5):
        x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
        x = layers.ReLU()(x)
        x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
        x = layers.add([x, res])
        x = layers.ReLU()(x)
        res = x

    coni = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
    i = layers.Activation('sigmoid')(coni)
    cong = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
    g = layers.Activation('sigmoid')(cong)
    cono = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
    o = layers.Activation('sigmoid')(cono)

    c_2 = layers.multiply([i, g])
    c = c_2
    c_act = layers.Activation('tanh')(c)
    h = layers.multiply([o, c_act])

    mask = layers.Conv2D(1, (3, 3), strides=(1, 1), padding='same')(h)
    mask_list.append(mask)
    x = layers.Concatenate(axis = 3)([inputs, mask])


    for iter in range(3):

        x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
        x = layers.ReLU()(x)
        res = x

        for it in range(5):
            x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
            x = layers.ReLU()(x)
            x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
            x = layers.add([x, res])
            x = layers.ReLU()(x)
            res = x

        x = layers.Concatenate(axis = 3)([res, h])

        coni = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
        i = layers.Activation('sigmoid')(coni)
        conf = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
        f = layers.Activation('sigmoid')(conf)
        cong = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
        g = layers.Activation('sigmoid')(cong)
        cono = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
        o = layers.Activation('sigmoid')(cono)

        c_1 = layers.multiply([c, f])
        c_2 = layers.multiply([i, g])
        c = layers.add([c_1, c_2])
        c_act = layers.Activation('tanh')(c)
        h = layers.multiply([o, c_act])

        mask = layers.Conv2D(1, (3, 3), strides=(1, 1), padding='same')(h)
        mask_list.append(mask)

    outputs = Lambda(lambda x: x)(mask)

    att_model = tf.keras.Model(inputs = inputs, outputs=outputs)
    #att_model = tf.keras.Model(inputs = inputs, outputs=[mask, mask_list])
    return att_model
Beispiel #10
0
def make_model(input_shape):
    inp_tensor = Input(input_shape)
    weight_decay = 0.0

    ###Input Block
    x = Conv2D(32, 3, padding='same',
               kernel_regularizer=l2(weight_decay))(inp_tensor)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(32,
               3,
               strides=2,
               padding='same',
               kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = Conv2D(64, 3, padding='same', kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    #### Block 1
    residual = Conv2D(128,
                      1,
                      strides=1,
                      use_bias=False,
                      padding='same',
                      kernel_regularizer=l2(weight_decay))(x)
    # residual = BatchNormalization()(residual)

    x = SeparableConv2D(128,
                        3,
                        padding='same',
                        kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = SeparableConv2D(128,
                        3,
                        padding='same',
                        kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # x = MaxPooling2D(2, strides = 2, padding = 'same')(x)

    x = add([x, residual])

    #### Block 2
    residual = Conv2D(256,
                      1,
                      strides=2,
                      use_bias=False,
                      padding='same',
                      kernel_regularizer=l2(weight_decay))(x)
    # residual = BatchNormalization()(residual)

    x = SeparableConv2D(256,
                        3,
                        padding='same',
                        kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = SeparableConv2D(256,
                        3,
                        padding='same',
                        kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = MaxPooling2D(2, strides=2, padding='same')(x)

    x = add([x, residual])

    #### Block 3
    residual = Conv2D(256,
                      1,
                      strides=2,
                      use_bias=False,
                      padding='same',
                      kernel_regularizer=l2(weight_decay))(x)
    # residual = BatchNormalization()(residual)

    x = SeparableConv2D(256,
                        3,
                        padding='same',
                        kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = SeparableConv2D(256,
                        3,
                        padding='same',
                        kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = MaxPooling2D(2, strides=2, padding='same')(x)

    x = add([x, residual])

    #### Block 4
    residual = Conv2D(512,
                      1,
                      strides=2,
                      use_bias=False,
                      padding='same',
                      kernel_regularizer=l2(weight_decay))(x)
    # residual = BatchNormalization()(residual)

    x = SeparableConv2D(512,
                        3,
                        padding='same',
                        kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = SeparableConv2D(512,
                        3,
                        padding='same',
                        kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = MaxPooling2D(2, strides=2, padding='same')(x)

    x = add([x, residual])

    #### output block
    x = SeparableConv2D(512,
                        3,
                        padding='same',
                        kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    x = SeparableConv2D(512,
                        3,
                        padding='same',
                        kernel_regularizer=l2(weight_decay))(x)
    # x = BatchNormalization()(x)
    x = Activation('relu')(x)

    model = Model(inp_tensor, x)
    # model.summary()
    return model
Beispiel #11
0
def mb_conv_block(inputs, block_args, activation, drop_rate=None, prefix=''):

    has_seq = (block_args.seq_ratio
               is not None) and (0 < block_args.seq_ratio <= 1)
    filters = block_args.input_filters * block_args.expand_ratio

    if block_args.expand_ratio != 1:
        x = layers.Conv2D(filters,
                          1,
                          padding='same',
                          use_bias=False,
                          kernel_initializer=CONV_KERNEL_INITIALIZER,
                          name=prefix + 'expand_conv')(inputs)
        x = layers.BatchNormalization(axis=-1, name=prefix + 'expand_bn')(x)
        x = layers.Activation(activation, name=prefix + 'expand_activation')(x)
    else:
        x = inputs

    # Depthwise Convolution
    x = layers.DepthwiseConv2D(block_args.kernel_size,
                               strides=block_args.strides,
                               padding='same',
                               use_bias=False,
                               depthwise_initializer=CONV_KERNEL_INITIALIZER,
                               name=prefix + 'dw_conv')(x)
    x = layers.BatchNormalization(axis=-1, name=prefix + 'bn')(x)
    x = layers.Activation(activation, name=prefix + 'activation')(x)

    # 压缩后再放大,作为一个调整系数
    if has_seq:
        num_reduced_filters = max(
            1, int(block_args.input_filters * block_args.seq_ratio))
        se_tensor = layers.GlobalAveragePooling2D(name=prefix +
                                                  'seq_squeeze')(x)

        se_tensor = layers.Reshape((1, 1, filters),
                                   name=prefix + 'seq_reshape')(se_tensor)
        se_tensor = layers.Conv2D(num_reduced_filters,
                                  1,
                                  activation=activation,
                                  padding='same',
                                  use_bias=True,
                                  kernel_initializer=CONV_KERNEL_INITIALIZER,
                                  name=prefix + 'seq_reduce')(se_tensor)
        se_tensor = layers.Conv2D(filters,
                                  1,
                                  activation='sigmoid',
                                  padding='same',
                                  use_bias=True,
                                  kernel_initializer=CONV_KERNEL_INITIALIZER,
                                  name=prefix + 'seq_expand')(se_tensor)

        x = layers.multiply([x, se_tensor], name=prefix + 'seq_excite')

    # 利用1x1卷积对特征层进行压缩
    x = layers.Conv2D(block_args.output_filters,
                      1,
                      padding='same',
                      use_bias=False,
                      kernel_initializer=CONV_KERNEL_INITIALIZER,
                      name=prefix + 'project_conv')(x)
    x = layers.BatchNormalization(axis=-1, name=prefix + 'project_bn')(x)

    # 实现残差网络
    if block_args.id_skip and block_args.strides == 1 and block_args.input_filters == block_args.output_filters:

        if drop_rate and (drop_rate > 0):
            layers.Dropout(drop_rate,
                           noise_shape=(None, 1, 1, 1),
                           name=prefix + 'drop')(x)

        x = layers.add([x, inputs], name=prefix + 'add')

    return x
    .prefetch(tf.data.experimental.AUTOTUNE) \
    .cache()  # cache the dataset into RAM

'''
3. Keras Modeling (Functional API)
'''

inputs = layers.Input(shape=(64, 2))  # time_steps = 64, channel_num/feature_num = 2
x = layers.LayerNormalization(axis=-2)(inputs)  # out: (, 64, 2); acts on 64
x = layers.Conv1D(128, kernel_size=2, activation=tfa.activations.mish)(x)
x0 = layers.Conv1D(128, kernel_size=2, activation=tfa.activations.mish)(x)

x = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(x0)
x = layers.LayerNormalization(axis=-1)(x)
x = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(x)
x1 = layers.add([x, x0])


x = layers.Bidirectional(layers.LSTM(64, return_sequences=True))(x1)
x = layers.LayerNormalization(axis=-1)(x)
x = layers.Conv1D(128, kernel_size=2, activation=tfa.activations.mish, padding='same')(x)
x2 = layers.add([x, x1])

x = layers.Conv1D(8, kernel_size=3, padding='same')(x2)
x = layers.Flatten()(x)  # Or, tf.squeeze
x = layers.Dropout(0.5)(x)

x3 = layers.Dense(128, activation=tfa.activations.mish)(x)
x = layers.Dense(128, activation=tfa.activations.mish)(x3)
x = layers.Dense(128, activation=tfa.activations.mish)(x)
x = layers.add([x3, x])
# function API makes it easy to manipulate non-linear connectivity topologies
# non-linear topologies: layers are not connected sequentially
# cannot be handled with sequential api

# residual connections
# TODO: change input
# input block
inputs = keras.Input(shape=(32, 32, 3), name='face')
x = layers.Conv2D(64, 7, activation='relu', strides=2, padding='same')(inputs)
x = layers.MaxPooling2D(3, strides=2, padding='same')(x)
block_0_output = layers.BatchNormalization()(x)
# block_1_1
x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_0_output)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
x = layers.BatchNormalization()(x)
block_1_1_output = layers.add([x, block_0_output])

# block_1_2
x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_1_1_output)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
x = layers.BatchNormalization()(x)
block_1_2_output = layers.add([x, block_1_1_output])

# block_2_1
x = layers.Conv2D(128, 3, activation='relu', padding='same')(block_1_2_output)
x = layers.Conv2D(128, 3, activation='relu', padding='same')(x)
x = layers.BatchNormalization()(x)
# shortcut using 1by1 conv2d
block_1_2_output = layers.Conv2D(128, 1, activation='relu', padding='same')(block_1_2_output)
block_1_2_output = layers.BatchNormalization()(block_1_2_output)
        embeddings[(f1.feature_id,
                    f2.field_id)] = Embedding(f1.nb_features,
                                              EMBEDDING_DIMENSION,
                                              input_length=1)

products = []
for f1 in fields:
    for f2 in fields:
        if f2.field_id == f1.field_id:
            continue

    embedded_input_feature_1 = embeddings[(f1.feature_id, f2.field_id)](
        input_fields[f1.feature_id])
    embedded_input_feature_2 = embeddings[(f2.feature_id, f1.field_id)](
        input_fields[f2.feature_id])

    embedded_input_feature_1 = \
        Reshape((embeddings[(f1.feature_id, f2.field_id)].output_dim, 1))(embedded_input_feature_1)
    embedded_input_feature_2 = \
        Reshape((embeddings[(f2.feature_id, f1.field_id)].output_dim, 1))(embedded_input_feature_2)

    product = multiply([embedded_input_feature_1, embedded_input_feature_2],
                       axes=1,
                       normalize=False)
    products.append(Reshape((1, ))(product))

added = add(products)

output = Dense(1, activation='sigmoid', name='sigmoid_activation')(added)
def resnet_v2(input_shape, depth, num_classes=10):
    """ResNet Version 2 Model builder [b]

    Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or 
    also known as bottleneck layer.
    First shortcut connection per layer is 1 x 1 Conv2D.
    Second and onwards shortcut connection is identity.
    At the beginning of each stage, 
    the feature map size is halved (downsampled)
    by a convolutional layer with strides=2, 
    while the number of filter maps is
    doubled. Within each stage, the layers have 
    the same number filters and the same filter map sizes.
    Features maps sizes:
    conv1  : 32x32,  16
    stage 0: 32x32,  64
    stage 1: 16x16, 128
    stage 2:  8x8,  256

    Arguments:
        input_shape (tensor): shape of input image tensor
        depth (int): number of core convolutional layers
        num_classes (int): number of classes (CIFAR10 has 10)

    Returns:
        model (Model): Keras model instance
    """
    if (depth - 2) % 9 != 0:
        raise ValueError('depth should be 9n+2 (eg 110 in [b])')
    # start model definition.
    num_filters_in = 16
    num_res_blocks = int((depth - 2) / 9)

    inputs = Input(shape=input_shape)
    # v2 performs Conv2D with BN-ReLU
    # on input before splitting into 2 paths
    x = resnet_layer(inputs=inputs,
                     num_filters=num_filters_in,
                     conv_first=True)

    # instantiate the stack of residual units
    for stage in range(3):
        for res_block in range(num_res_blocks):
            activation = 'relu'
            batch_normalization = True
            strides = 1
            if stage == 0:
                num_filters_out = num_filters_in * 4
                # first layer and first stage
                if res_block == 0:
                    activation = None
                    batch_normalization = False
            else:
                num_filters_out = num_filters_in * 2
                # first layer but not first stage
                if res_block == 0:
                    # downsample
                    strides = 2

            # bottleneck residual unit
            y = resnet_layer(inputs=x,
                             num_filters=num_filters_in,
                             kernel_size=1,
                             strides=strides,
                             activation=activation,
                             batch_normalization=batch_normalization,
                             conv_first=False)
            y = resnet_layer(inputs=y,
                             num_filters=num_filters_in,
                             conv_first=False)
            y = resnet_layer(inputs=y,
                             num_filters=num_filters_out,
                             kernel_size=1,
                             conv_first=False)
            if res_block == 0:
                # linear projection residual shortcut connection
                # to match changed dims
                x = resnet_layer(inputs=x,
                                 num_filters=num_filters_out,
                                 kernel_size=1,
                                 strides=strides,
                                 activation=None,
                                 batch_normalization=False)
            x = add([x, y])

        num_filters_in = num_filters_out

    # add classifier on top.
    # v2 has BN-ReLU before Pooling
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = AveragePooling2D(pool_size=8)(x)
    y = Flatten()(x)
    outputs = Dense(num_classes,
                    activation='softmax',
                    kernel_initializer='he_normal')(y)

    # instantiate model.
    model = Model(inputs=inputs, outputs=outputs)
    return model
def generator_model():
    inputs = tf.keras.layers.Input(shape=(IMAGE_HEIGHT, IMAGE_WIDTH, CHANNEL+2))
    x = inputs

    '''
    #generator
    x = layers.Conv2D(64, (9, 9), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (9, 9), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')(x)
    res1 = x
    x = layers.LeakyReLU()(x)

    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')(x)
    res2 = x
    x = layers.LeakyReLU()(x)

    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(2, 2), padding='same')(x)
    res3 = x
    x = layers.LeakyReLU()(x)

    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    #x = layers.add([x, res3])
    x = layers.LeakyReLU()(x)
    x = layers.add([x, res3])
    x = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    #x = layers.add([x, res2])
    x = layers.LeakyReLU()(x)
    x = layers.add([x, res2])
    x = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    #x = layers.subtract([res1, x])
    x = layers.LeakyReLU()(x)
    x = layers.subtract([res1, x])
    x = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(64, (9, 9), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2DTranspose(3, (9, 9), strides=(1, 1), padding='same')(x)
    x = layers.Activation('tanh')(x)
    '''

    #generator
    x = layers.Conv2D(64, (5, 5), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    res1 = x

    x = layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    res2 = x

    x = layers.Conv2D(128, (3, 3), strides=(2, 2), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)


    #dilated convs
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same', dilation_rate=2)(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', dilation_rate=4)(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(256, (3, 3), strides=(1, 1), padding='same', dilation_rate=8)(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same', dilation_rate=16)(x)
    x = layers.LeakyReLU()(x)


    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.Conv2DTranspose(128, (4, 4), strides=(2, 2), padding='same')(x)
    x = layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.add([x, res2])

    x = layers.Conv2D(128, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.Conv2DTranspose(64, (4, 4), strides=(2, 2), padding='same')(x)
    x = layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)

    x = layers.add([x, res1])

    x = layers.Conv2D(32, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.LeakyReLU()(x)
    x = layers.Conv2D(3, (3, 3), strides=(1, 1), padding='same')(x)
    x = layers.Activation('tanh')(x)
    outputs = Lambda(lambda x: x)(x)

    gen_model = tf.keras.Model(inputs = inputs, outputs=outputs)
    return gen_model
Beispiel #17
0
 def call(self, inputs):
     return tf.transpose(
         layers.add(
             [layers.multiply([tf.transpose(inputs), self.std]),
              self.mean]))
Beispiel #18
0
    def generator(self):

        if self.G:
            return self.G

        # === Style Mapping ===

        self.S = Sequential()

        self.S.add(Dense(512, input_shape=[latent_size]))
        self.S.add(LeakyReLU(0.2))
        self.S.add(Dense(512))
        self.S.add(LeakyReLU(0.2))
        self.S.add(Dense(512))
        self.S.add(LeakyReLU(0.2))
        self.S.add(Dense(512))
        self.S.add(LeakyReLU(0.2))

        # === Generator ===

        # Inputs
        inp_style = []

        for i in range(n_layers):
            inp_style.append(Input([512]))

        inp_noise = Input([im_size, im_size, 1])

        # Latent
        x = Lambda(lambda x: x[:, :1] * 0 + 1)(inp_style[0])

        outs = []

        # Actual Model
        x = Dense(
            4 * 4 * 4 * cha, activation="relu", kernel_initializer="random_normal"
        )(x)
        x = Reshape([4, 4, 4 * cha])(x)

        x, r = g_block(x, inp_style[0], inp_noise, 32 * cha, u=False)  # 4
        outs.append(r)

        x, r = g_block(x, inp_style[1], inp_noise, 16 * cha)  # 8
        outs.append(r)

        x, r = g_block(x, inp_style[2], inp_noise, 8 * cha)  # 16
        outs.append(r)

        x, r = g_block(x, inp_style[3], inp_noise, 6 * cha)  # 32
        outs.append(r)

        x, r = g_block(x, inp_style[4], inp_noise, 4 * cha)  # 64
        outs.append(r)

        x, r = g_block(x, inp_style[5], inp_noise, 2 * cha)  # 128
        outs.append(r)

        x, r = g_block(x, inp_style[6], inp_noise, 1 * cha)  # 256
        outs.append(r)

        x = add(outs)

        x = Lambda(lambda y: y / 2 + 0.5)(
            x
        )  # Use values centered around 0, but normalize to [0, 1], providing better initialization

        self.G = Model(inputs=inp_style + [inp_noise], outputs=x)

        return self.G
def shortcut(x, prev, block_number):
    x = add([x, prev], name=f"Shortcut_{block_number}")
    return x
Beispiel #20
0
def bigxception(input_shape=(256, 256, 3)):
    regularization = l2(0.01)
    input_tensor = Input(input_shape)
    x = Conv2D(32, (3, 3), strides=(2, 2), use_bias=False)(input_tensor)
    x = BatchNormalization(name='block1_conv1_bn')(x)
    x = Activation('relu', name='block1_conv1_act')(x)
    x = Conv2D(64, (3, 3), use_bias=False)(x)
    x = BatchNormalization(name='block1_conv2_bn')(x)
    x = Activation('relu', name='block1_conv2_act')(x)

    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv1_bn')(x)
    x = Activation('relu', name='block2_sepconv2_act')(x)
    x = SeparableConv2D(128, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block2_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv1_bn')(x)
    x = Activation('relu', name='block3_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block3_sepconv2_bn')(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    residual = Conv2D(256, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block4_sepconv1_bn')(x)
    x = Activation('relu', name='block4_sepconv2_act')(x)
    x = SeparableConv2D(256, (3, 3), padding='same', use_bias=False)(x)
    x = BatchNormalization(name='block4_sepconv2_bn')(x)
    map_out = K.mean(x, axis=-1)
    cat_x = SeparableConv2D(50, (32, 32), strides=(1, 1),
                            activation='softmax')(x)
    cat_out = Flatten(name='categories')(cat_x)
    attr_x = SeparableConv2D(1000, (32, 32),
                             strides=(1, 1),
                             activation='sigmoid')(x)
    attr_out = Flatten(name='attributes')(attr_x)
    model = keras.models.Model(inputs=input_tensor,
                               outputs=[cat_out, attr_out])
    return model
def mini_XCEPTION(input_shape, num_classes, l2_regularization=0.01):
    regularization = l2(l2_regularization)

    # base
    img_input = Input(input_shape)
    x = Conv2D(8, (3, 3),
               strides=(1, 1),
               kernel_regularizer=regularization,
               use_bias=False)(img_input)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv2D(8, (3, 3),
               strides=(1, 1),
               kernel_regularizer=regularization,
               use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)

    # module 1
    residual = Conv2D(16, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(16, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(16, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    # module 2
    residual = Conv2D(32, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(32, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(32, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    # module 3
    residual = Conv2D(64, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(64, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(64, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    # module 4
    residual = Conv2D(128, (1, 1),
                      strides=(2, 2),
                      padding='same',
                      use_bias=False)(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = SeparableConv2D(128, (3, 3),
                        padding='same',
                        kernel_regularizer=regularization,
                        use_bias=False)(x)
    x = BatchNormalization()(x)

    x = MaxPooling2D((3, 3), strides=(2, 2), padding='same')(x)
    x = layers.add([x, residual])

    x = Conv2D(
        num_classes,
        (3, 3),
        # kernel_regularizer=regularization,
        padding='same')(x)
    x = GlobalAveragePooling2D()(x)
    output = Dense(30)(x)

    model = Model(img_input, output)
    return model
Beispiel #22
0
def conv_building_block(input_tensor,
                        kernel_size,
                        filters,
                        stage,
                        block,
                        strides=(2, 2),
                        training=None):
    """A block that has a conv layer at shortcut.

    Arguments:
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of
                middle conv layer at main path
        filters: list of integers, the filters of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: current block label, used for generating layer names
        strides: Strides for the first conv layer in the block.
        training: Only used if training keras model with Estimator.  In other
            scenarios it is handled automatically.

    Returns:
        Output tensor for the block.

    Note that from stage 3,
    the first conv layer at main path is with strides=(2, 2)
    And the shortcut should have strides=(2, 2) as well
    """
    filters1, filters2 = filters
    bn_axis = 1
    if tf.keras.backend.image_data_format() == 'channels_last':
        bn_axis = 3
    else:
        bn_axis = 1

    x = Conv2D(filters1,
               kernel_size,
               strides=strides,
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=l2(L2_WEIGHT_DECAY),
               bias_regularizer=l2(L2_WEIGHT_DECAY))(input_tensor)
    x = BatchNormalization(axis=bn_axis,
                           momentum=BATCH_NORM_DECAY,
                           epsilon=BATCH_NORM_EPSILON,
                           fused=True)(x, training=training)
    x = Activation('approx_activation')(x)

    x = Conv2D(filters2,
               kernel_size,
               padding='same',
               kernel_initializer='he_normal',
               kernel_regularizer=l2(L2_WEIGHT_DECAY),
               bias_regularizer=l2(L2_WEIGHT_DECAY))(x)
    x = BatchNormalization(axis=bn_axis,
                           momentum=BATCH_NORM_DECAY,
                           epsilon=BATCH_NORM_EPSILON,
                           fused=True)(x, training=training)

    shortcut = Conv2D(filters2, (1, 1),
                      strides=strides,
                      kernel_initializer='he_normal',
                      kernel_regularizer=l2(L2_WEIGHT_DECAY),
                      bias_regularizer=l2(L2_WEIGHT_DECAY))(input_tensor)
    shortcut = BatchNormalization(axis=bn_axis,
                                  momentum=BATCH_NORM_DECAY,
                                  epsilon=BATCH_NORM_EPSILON,
                                  fused=True)(shortcut, training=training)

    x = add([x, shortcut])
    x = Activation('approx_activation')(x)
    return x
from tensorflow.keras import layers
import numpy as np

# function API makes it easy to manipulate non-linear connectivity topoligies
# non-linear topologies: layers are not conencted sequentially
# cannot be handled with sequential api

# residual connections
inputs = keras.Input(shape=(32, 32, 3), name='img')
x = layers.Conv2D(32, 3, activation='relu')(inputs)
x = layers.Conv2D(64, 3, activation='relu')(x)
block_1_output = layers.MaxPooling2D(3)(x)

x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_1_output)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
block_2_output = layers.add([x, block_1_output])

x = layers.Conv2D(64, 3, activation='relu', padding='same')(block_2_output)
x = layers.Conv2D(64, 3, activation='relu', padding='same')(x)
block_3_output = layers.add([x, block_2_output])

x = layers.Conv2D(64, 3, activation='relu')(block_3_output)
x = layers.GlobalAveragePooling2D()(x)
x = layers.Dense(256, activation='relu')(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(10, activation='softmax')(x)

model = keras.Model(inputs, outputs, name='toy_resnet')
model.summary()
keras.utils.plot_model(model, 'mini_resnet.png', show_shapes=True)
def resnet8(img_input,
            num_pts,
            bins,
            scope='Prediction',
            reuse=False,
            f=0.25,
            reg=True,
            dense=0):
    """
    Define model architecture. The parameter 'f' controls the network width.
    """
    img_input = Input(tensor=img_input)
    kr = None
    if reg:
        kr = regularizers.l2(1e-4)

    with tf.variable_scope(scope, reuse=reuse):
        x1 = Conv2D(int(32 * f), (5, 5),
                    strides=[2, 2],
                    padding='same',
                    kernel_initializer='he_normal')(img_input)
        x1 = MaxPooling2D(pool_size=(3, 3), strides=[2, 2])(x1)
        x1 = BatchNormalization()(x1)

        # First residual block
        x2 = Activation('relu')(x1)
        x2 = Conv2D(int(32 * f), (3, 3),
                    strides=[2, 2],
                    padding='same',
                    kernel_initializer="he_normal",
                    kernel_regularizer=kr)(x2)
        x2 = BatchNormalization()(x2)

        x2 = Activation('relu')(x2)
        x2 = Conv2D(int(32 * f), (3, 3),
                    padding='same',
                    kernel_initializer="he_normal",
                    kernel_regularizer=kr)(x2)
        x2 = BatchNormalization()(x2)

        x1 = Conv2D(int(32 * f), (1, 1),
                    strides=[2, 2],
                    padding='same',
                    kernel_initializer='he_normal')(x1)
        x3 = add([x1, x2])

        # Second residual block
        x4 = Activation('relu')(x3)
        x4 = Conv2D(int(64 * f), (3, 3),
                    strides=[2, 2],
                    padding='same',
                    kernel_initializer="he_normal",
                    kernel_regularizer=kr)(x4)

        x4 = BatchNormalization()(x4)

        x4 = Activation('relu')(x4)
        x4 = Conv2D(int(64 * f), (3, 3),
                    padding='same',
                    kernel_initializer="he_normal",
                    kernel_regularizer=kr)(x4)

        x4 = BatchNormalization()(x4)

        x3 = Conv2D(int(64 * f), (1, 1),
                    strides=[2, 2],
                    padding='same',
                    kernel_initializer='he_normal')(x3)
        x5 = add([x3, x4])

        # Third residual block
        x6 = Activation('relu')(x5)
        x6 = Conv2D(int(128 * f), (3, 3),
                    strides=[2, 2],
                    padding='same',
                    kernel_initializer="he_normal",
                    kernel_regularizer=kr)(x6)

        x2 = BatchNormalization()(x2)

        x6 = Activation('relu')(x6)
        x6 = Conv2D(int(128 * f), (3, 3),
                    padding='same',
                    kernel_initializer="he_normal",
                    kernel_regularizer=kr)(x6)

        x2 = BatchNormalization()(x2)

        x5 = Conv2D(int(128 * f), (1, 1),
                    strides=[2, 2],
                    padding='same',
                    kernel_initializer='he_normal')(x5)
        x7 = add([x5, x6])

        x = Flatten()(x7)
        #print(x.shape.dims)
        x = Activation('relu')(x)
        if kr is not None:
            x = Dropout(0.5)(x)

        #x = Dense(int(8192*f), kernel_initializer='he_normal')(x)
        #x = Activation('relu')(x)

        x = Dense(int(4096 * f), kernel_initializer='he_normal')(x)
        x = Activation('relu')(x)

        x = BatchNormalization()(x)

        # for ii in range(dense):#Add more dense layers
        x = Dense(int(2048 * f), kernel_initializer='he_normal')(x)
        x = Activation('relu')(x)

        x = BatchNormalization()(x)

        x = Dense(int(1024 * f), kernel_initializer='he_normal')(x)
        x = Activation('relu')(x)

        # Output channel
        logits = []

        for ii in range(num_pts):
            temp_list = []
            for jj in range(3):
                temp = Dense(bins, kernel_initializer='he_normal')(x)
                temp_list.append(temp)

            logits.append(temp_list)

    return logits
Beispiel #25
0
def Xception(include_top=True,
             weights='imagenet',
             input_tensor=None,
             input_shape=None,
             pooling=None,
             classes=1000,
             **kwargs):
    """Instantiates the Xception architecture.
    Optionally loads weights pre-trained on ImageNet.
    Note that the data format convention used by the model is
    the one specified in your Keras config at `~/.keras/keras.json`.
    Note that the default input image size for this model is 299x299.
    # Arguments
        include_top: whether to include the fully-connected
            layer at the top of the network.
        weights: one of `None` (random initialization),
              'imagenet' (pre-training on ImageNet),
              or the path to the weights file to be loaded.
        input_tensor: optional Keras tensor
            (i.e. output of `layers.Input()`)
            to use as image input for the model.
        input_shape: optional shape tuple, only to be specified
            if `include_top` is False (otherwise the input shape
            has to be `(299, 299, 3)`.
            It should have exactly 3 inputs channels,
            and width and height should be no smaller than 71.
            E.g. `(150, 150, 3)` would be one valid value.
        pooling: Optional pooling mode for feature extraction
            when `include_top` is `False`.
            - `None` means that the output of the model will be
                the 4D tensor output of the
                last convolutional block.
            - `avg` means that global average pooling
                will be applied to the output of the
                last convolutional block, and thus
                the output of the model will be a 2D tensor.
            - `max` means that global max pooling will
                be applied.
        classes: optional number of classes to classify images
            into, only to be specified if `include_top` is True,
            and if no `weights` argument is specified.
    # Returns
        A Keras model instance.
    # Raises
        ValueError: in case of invalid argument for `weights`,
            or invalid input shape.
        RuntimeError: If attempting to run this model with a
            backend that does not support separable convolutions.
    """
    if not (weights in {'imagenet', None} or os.path.exists(weights)):
        raise ValueError('The `weights` argument should be either '
                         '`None` (random initialization), `imagenet` '
                         '(pre-training on ImageNet), '
                         'or the path to the weights file to be loaded.')

    if weights == 'imagenet' and include_top and classes != 1000:
        raise ValueError(
            'If using `weights` as `"imagenet"` with `include_top`'
            ' as true, `classes` should be 1000')

    if input_tensor is None:
        img_input = layers.Input(shape=input_shape)
    else:
        if not backend.is_keras_tensor(input_tensor):
            img_input = layers.Input(tensor=input_tensor, shape=input_shape)
        else:
            img_input = input_tensor

    channel_axis = 1 if backend.image_data_format() == 'channels_first' else -1

    x = layers.ZeroPadding2D((1, 1))(img_input)
    x = layers.Conv2D(32, (3, 3),
                      strides=(2, 2),
                      use_bias=False,
                      name='block1_conv1')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block1_conv1_bn')(x)
    x = layers.Activation('relu', name='block1_conv1_act')(x)
    x = layers.ZeroPadding2D((1, 1))(x)
    x = layers.Conv2D(64, (3, 3), use_bias=False, name='block1_conv2')(x)
    x = layers.BatchNormalization(axis=channel_axis, name='block1_conv2_bn')(x)
    x = layers.Activation('relu', name='block1_conv2_act')(x)

    residual = layers.Conv2D(128, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.SeparableConv2D(128, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block2_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block2_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block2_sepconv2_act')(x)
    x = layers.SeparableConv2D(128, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block2_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block2_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='block2_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv2D(256, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.Activation('relu', name='block3_sepconv1_act')(x)
    x = layers.SeparableConv2D(256, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block3_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block3_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block3_sepconv2_act')(x)
    x = layers.SeparableConv2D(256, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block3_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block3_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='block3_pool')(x)
    x = layers.add([x, residual])

    residual = layers.Conv2D(728, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.Activation('relu', name='block4_sepconv1_act')(x)
    x = layers.SeparableConv2D(728, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block4_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block4_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block4_sepconv2_act')(x)
    x = layers.SeparableConv2D(728, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block4_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block4_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='block4_pool')(x)
    x = layers.add([x, residual])

    for i in range(8):
        residual = x
        prefix = 'block' + str(i + 5)

        x = layers.Activation('relu', name=prefix + '_sepconv1_act')(x)
        x = layers.SeparableConv2D(728, (3, 3),
                                   padding='same',
                                   use_bias=False,
                                   name=prefix + '_sepconv1')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      name=prefix + '_sepconv1_bn')(x)
        x = layers.Activation('relu', name=prefix + '_sepconv2_act')(x)
        x = layers.SeparableConv2D(728, (3, 3),
                                   padding='same',
                                   use_bias=False,
                                   name=prefix + '_sepconv2')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      name=prefix + '_sepconv2_bn')(x)
        x = layers.Activation('relu', name=prefix + '_sepconv3_act')(x)
        x = layers.SeparableConv2D(728, (3, 3),
                                   padding='same',
                                   use_bias=False,
                                   name=prefix + '_sepconv3')(x)
        x = layers.BatchNormalization(axis=channel_axis,
                                      name=prefix + '_sepconv3_bn')(x)

        x = layers.add([x, residual])

    residual = layers.Conv2D(1024, (1, 1),
                             strides=(2, 2),
                             padding='same',
                             use_bias=False)(x)
    residual = layers.BatchNormalization(axis=channel_axis)(residual)

    x = layers.Activation('relu', name='block13_sepconv1_act')(x)
    x = layers.SeparableConv2D(728, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block13_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block13_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block13_sepconv2_act')(x)
    x = layers.SeparableConv2D(1024, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block13_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block13_sepconv2_bn')(x)

    x = layers.MaxPooling2D((3, 3),
                            strides=(2, 2),
                            padding='same',
                            name='block13_pool')(x)
    x = layers.add([x, residual])

    x = layers.SeparableConv2D(1536, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block14_sepconv1')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block14_sepconv1_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv1_act')(x)

    x = layers.SeparableConv2D(2048, (3, 3),
                               padding='same',
                               use_bias=False,
                               name='block14_sepconv2')(x)
    x = layers.BatchNormalization(axis=channel_axis,
                                  name='block14_sepconv2_bn')(x)
    x = layers.Activation('relu', name='block14_sepconv2_act')(x)

    if include_top:
        x = layers.GlobalAveragePooling2D(name='avg_pool')(x)
        x = layers.Dense(classes, activation='softmax', name='predictions')(x)
    else:
        if pooling == 'avg':
            x = layers.GlobalAveragePooling2D()(x)
        elif pooling == 'max':
            x = layers.GlobalMaxPooling2D()(x)

    # Ensure that the model takes into account
    # any potential predecessors of `input_tensor`.
    if input_tensor is not None:
        inputs = keras_utils.get_source_inputs(input_tensor)
    else:
        inputs = img_input
    # Create model.
    model = models.Model(inputs, x, name='xception')

    # Load weights.
    if weights == 'imagenet':
        if include_top:
            weights_path = keras_utils.get_file(
                'xception_weights_tf_dim_ordering_tf_kernels.h5',
                TF_WEIGHTS_PATH,
                cache_subdir='models',
                file_hash='0a58e3b7378bc2990ea3b43d5981f1f6')
        else:
            weights_path = keras_utils.get_file(
                'xception_weights_tf_dim_ordering_tf_kernels_notop.h5',
                TF_WEIGHTS_PATH_NO_TOP,
                cache_subdir='models',
                file_hash='b0042744bf5b25fce3cb969f33bebb97')
        model.load_weights(weights_path)
        if backend.backend() == 'theano':
            keras_utils.convert_all_kernels_in_model(model)
    elif weights is not None:
        model.load_weights(weights)

    return model
def deeplab_v3plus(image_size, n_categories):
    if np.mod(image_size[0], 32) != 0 or np.mod(image_size[1], 32) != 0:
        raise exception("image_size must be multiples of 32")

    if min(image_size) < 320:
        raise exception("minimum(image_size) must be larger or equal than 320")

    #xm means x_main. center flow of the fig. 4.
    #xs means x_side. side flow of the fig. 4.
    #encoder
    inputs = layers.Input(shape=(image_size[0], image_size[1], 3),
                          name="inputs")
    #entry_flow
    #entry block 1
    xm = Conv_BN(inputs,
                 32,
                 filter=3,
                 prefix="entry_b1",
                 suffix="1",
                 strides=2,
                 dilation_rate=1)
    xm = xs = Conv_BN(xm,
                      64,
                      filter=3,
                      prefix="entry_b1",
                      suffix="2",
                      strides=1,
                      dilation_rate=1)

    #entry block 2
    #xm = layers.DepthwiseConv2D((3,3), depth_multiplier=2, padding="same", name="entry_b2_dcv1")(xm)
    n_channels = 128
    xm = SepConv_BN(xm,
                    n_channels,
                    prefix="entry_b2",
                    suffix="1",
                    strides=1,
                    dilation_rate=1)
    xm = SepConv_BN(xm,
                    n_channels,
                    prefix="entry_b2",
                    suffix="2",
                    strides=1,
                    dilation_rate=1)
    xm = SepConv_BN(xm,
                    n_channels,
                    prefix="entry_b2",
                    suffix="3",
                    strides=2,
                    dilation_rate=1)
    xs = Conv_BN(xs,
                 n_channels,
                 filter=1,
                 prefix="entry_b2_side",
                 suffix="1",
                 strides=2,
                 dilation_rate=1)
    xs = xm = layers.add([xs, xm], name="entry_b2_add")
    #entry block 3
    n_channels = 256
    xm = SepConv_BN(xm,
                    n_channels,
                    prefix="entry_b3",
                    suffix="1",
                    strides=1,
                    dilation_rate=1)
    xm = SepConv_BN(xm,
                    n_channels,
                    prefix="entry_b3",
                    suffix="2",
                    strides=1,
                    dilation_rate=1)
    xm = SepConv_BN(xm,
                    n_channels,
                    prefix="entry_b3",
                    suffix="3",
                    strides=2,
                    dilation_rate=1)
    xs = Conv_BN(xs,
                 n_channels,
                 filter=1,
                 prefix="entry_b3_side",
                 suffix="1",
                 strides=2,
                 dilation_rate=1)
    xs = xm = x_dec = layers.add([xs, xm], name="entry_b3_add")
    #entry block 4
    n_channels = 728
    xm = SepConv_BN(xm,
                    n_channels,
                    prefix="entry_b4",
                    suffix="1",
                    strides=1,
                    dilation_rate=1)
    xm = SepConv_BN(xm,
                    n_channels,
                    prefix="entry_b4",
                    suffix="2",
                    strides=1,
                    dilation_rate=1)
    xm = SepConv_BN(xm,
                    n_channels,
                    prefix="entry_b4",
                    suffix="3",
                    strides=2,
                    dilation_rate=1)
    xs = Conv_BN(xs,
                 n_channels,
                 filter=1,
                 prefix="entry_b4_side",
                 suffix="1",
                 strides=2,
                 dilation_rate=1)
    xs = xm = layers.add([xs, xm], name="entry_b4_add")  #middle flow
    for i in range(16):
        ii = i + 1
        xm = SepConv_BN(xm,
                        n_channels,
                        prefix="middle_b%d" % ii,
                        suffix="1",
                        strides=1,
                        dilation_rate=1)
        xm = SepConv_BN(xm,
                        n_channels,
                        prefix="middle_b%d" % ii,
                        suffix="2",
                        strides=1,
                        dilation_rate=1)
        xm = SepConv_BN(xm,
                        n_channels,
                        prefix="middle_b%d" % ii,
                        suffix="3",
                        strides=1,
                        dilation_rate=1)
        xs = xm = layers.add([xs, xm],
                             name="middle_b%d_add" % ii)  #middle flow
    #exit flow
    #exit block1
    xm = SepConv_BN(xm,
                    728,
                    prefix="exit_b1",
                    suffix="1",
                    strides=1,
                    dilation_rate=1)
    xm = SepConv_BN(xm,
                    1024,
                    prefix="exit_b1",
                    suffix="2",
                    strides=1,
                    dilation_rate=1)
    xm = SepConv_BN(xm,
                    1024,
                    prefix="exit_b1",
                    suffix="3",
                    strides=2,
                    dilation_rate=1)
    xs = Conv_BN(xs,
                 1024,
                 filter=1,
                 prefix="exit_b1_side",
                 suffix="1",
                 strides=2,
                 dilation_rate=1)
    xs = xm = layers.add([xs, xm], name="exit_b1_add")  #middle flow

    #exit block2
    xm = SepConv_BN(xm,
                    1536,
                    prefix="exit_b2",
                    suffix="1",
                    strides=1,
                    dilation_rate=1)
    xm = SepConv_BN(xm,
                    1536,
                    prefix="exit_b2",
                    suffix="2",
                    strides=1,
                    dilation_rate=1)
    xm = SepConv_BN(xm,
                    2048,
                    prefix="exit_b2",
                    suffix="3",
                    strides=1,
                    dilation_rate=1)

    #encoder = keras.Model(inputs=inputs,outputs=xm, name="xception_encoder")

    #get feature_size and cal dilation_rates
    feature_size = keras.backend.int_shape(xm)[1:3]
    min_feature_size = min(feature_size)
    dilation_rates = cal_dilation_rates(min_feature_size)

    #ASPP
    aspp1 = Conv_BN(xm,
                    256,
                    filter=1,
                    prefix="aspp1",
                    suffix="1",
                    strides=1,
                    dilation_rate=1)
    aspp2 = SepConv_BN(xm,
                       256,
                       prefix="aspp2",
                       suffix="1",
                       strides=1,
                       dilation_rate=dilation_rates[0])
    aspp3 = SepConv_BN(xm,
                       256,
                       prefix="aspp3",
                       suffix="1",
                       strides=1,
                       dilation_rate=dilation_rates[1])
    aspp4 = SepConv_BN(xm,
                       256,
                       prefix="aspp4",
                       suffix="1",
                       strides=1,
                       dilation_rate=dilation_rates[2])

    aspp5 = keras.backend.mean(xm, axis=[1, 2], keepdims=True)
    aspp5 = Conv_BN(aspp5,
                    256,
                    filter=1,
                    prefix="aspp5",
                    suffix="1",
                    strides=1,
                    dilation_rate=1)
    aspp5 = layers.UpSampling2D(feature_size, name="aspp5_upsampling")(aspp5)

    ASPP = layers.concatenate([aspp1, aspp2, aspp3, aspp4, aspp5], name="ASPP")
    ASPP = Conv_BN(ASPP,
                   256,
                   filter=1,
                   prefix="ASPP",
                   suffix="1",
                   strides=1,
                   dilation_rate=1)
    ASPP = layers.UpSampling2D(4, name="ASPP_upsample_4")(ASPP)

    #decoder
    x_dec = Conv_BN(x_dec,
                    48,
                    filter=1,
                    prefix="dec1",
                    suffix="1",
                    strides=1,
                    dilation_rate=1)
    x_dec = layers.concatenate([x_dec, ASPP], name="dec_concat")
    x_dec = SepConv_BN(x_dec,
                       256,
                       prefix="dec1",
                       suffix="2",
                       strides=1,
                       dilation_rate=1)
    x_dec = SepConv_BN(x_dec,
                       256,
                       prefix="dec1",
                       suffix="3",
                       strides=1,
                       dilation_rate=1)
    x_dec = layers.UpSampling2D(4, name="dec_upsample_1")(x_dec)

    x_dec = SepConv_BN(x_dec,
                       n_categories,
                       prefix="dec2",
                       suffix="1",
                       strides=1,
                       dilation_rate=1,
                       last_activation=False)
    x_dec = layers.UpSampling2D(2, name="dec_upsample_2")(x_dec)
    outputs = layers.Activation(tf.nn.softmax, name="softmax")(x_dec)

    model = keras.Model(inputs=inputs, outputs=outputs, name="deeplab-v3plus")
    return model
Beispiel #27
0
def add_res(x, x_res, filters):
    residual = layers.Conv2D(filters, 1, padding="same")(x_res)
    x = layers.add([x, residual])
    return x
def resnet_v1(input_shape, depth, num_classes=10):
    """ResNet Version 1 Model builder [a]

    Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
    Last ReLU is after the shortcut connection.
    At the beginning of each stage, the feature map size is halved
    (downsampled) by a convolutional layer with strides=2, while 
    the number of filters is doubled. Within each stage, 
    the layers have the same number filters and the
    same number of filters.
    Features maps sizes:
    stage 0: 32x32, 16
    stage 1: 16x16, 32
    stage 2:  8x8,  64
    The Number of parameters is approx the same as Table 6 of [a]:
    ResNet20 0.27M
    ResNet32 0.46M
    ResNet44 0.66M
    ResNet56 0.85M
    ResNet110 1.7M

    Arguments:
        input_shape (tensor): shape of input image tensor
        depth (int): number of core convolutional layers
        num_classes (int): number of classes (CIFAR10 has 10)

    Returns:
        model (Model): Keras model instance
    """
    if (depth - 2) % 6 != 0:
        raise ValueError('depth should be 6n+2 (eg 20, 32, in [a])')
    # start model definition.
    num_filters = 16
    num_res_blocks = int((depth - 2) / 6)

    inputs = Input(shape=input_shape)
    x = resnet_layer(inputs=inputs)
    # instantiate the stack of residual units
    for stack in range(3):
        for res_block in range(num_res_blocks):
            strides = 1
            # first layer but not first stack
            if stack > 0 and res_block == 0:
                strides = 2  # downsample
            y = resnet_layer(inputs=x,
                             num_filters=num_filters,
                             strides=strides)
            y = resnet_layer(inputs=y,
                             num_filters=num_filters,
                             activation=None)
            # first layer but not first stack
            if stack > 0 and res_block == 0:
                # linear projection residual shortcut
                # connection to match changed dims
                x = resnet_layer(inputs=x,
                                 num_filters=num_filters,
                                 kernel_size=1,
                                 strides=strides,
                                 activation=None,
                                 batch_normalization=False)
            x = add([x, y])
            x = Activation('relu')(x)
        num_filters *= 2

    # add classifier on top.
    # v1 does not use BN after last shortcut connection-ReLU
    x = AveragePooling2D(pool_size=8)(x)
    y = Flatten()(x)
    outputs = Dense(num_classes,
                    activation='softmax',
                    kernel_initializer='he_normal')(y)

    # instantiate model.
    model = Model(inputs=inputs, outputs=outputs)
    return model
Beispiel #29
0
def conv_block_2D(
        input_tensor,
        kernel_size,
        filters,
        stage,
        block,
        strides=(2, 2),
        trainable=True,
):
    """A block that has a conv layer at shortcut.
    # Arguments
        input_tensor: input tensor
        kernel_size: default 3, the kernel size of middle conv layer at main path
        filters: list of integers, the filterss of 3 conv layer at main path
        stage: integer, current stage label, used for generating layer names
        block: 'a','b'..., current block label, used for generating layer names
    # Returns
        Output tensor for the block.
    Note that from stage 3, the first conv layer at main path is with strides=(2,2)
    And the shortcut should have strides=(2,2) as well
    """
    filters1, filters2, filters3 = filters
    bn_axis = 3

    conv_name_1 = 'conv' + str(stage) + '_' + str(block) + '_1x1_reduce'
    bn_name_1 = 'conv' + str(stage) + '_' + str(block) + '_1x1_reduce/bn'
    x = Conv2D(
        filters1,
        (1, 1),
        strides=strides,
        kernel_initializer='orthogonal',
        use_bias=False,
        trainable=trainable,
        kernel_regularizer=l2(weight_decay),
        name=conv_name_1,
    )(input_tensor)
    x = BatchNormalization(axis=bn_axis, trainable=trainable,
                           name=bn_name_1)(x)
    x = Activation('relu')(x)

    conv_name_2 = 'conv' + str(stage) + '_' + str(block) + '_3x3'
    bn_name_2 = 'conv' + str(stage) + '_' + str(block) + '_3x3/bn'
    x = Conv2D(
        filters2,
        kernel_size,
        padding='same',
        kernel_initializer='orthogonal',
        use_bias=False,
        trainable=trainable,
        kernel_regularizer=l2(weight_decay),
        name=conv_name_2,
    )(x)
    x = BatchNormalization(axis=bn_axis, trainable=trainable,
                           name=bn_name_2)(x)
    x = Activation('relu')(x)

    conv_name_3 = 'conv' + str(stage) + '_' + str(block) + '_1x1_increase'
    bn_name_3 = 'conv' + str(stage) + '_' + str(block) + '_1x1_increase/bn'
    x = Conv2D(
        filters3,
        (1, 1),
        kernel_initializer='orthogonal',
        use_bias=False,
        trainable=trainable,
        kernel_regularizer=l2(weight_decay),
        name=conv_name_3,
    )(x)
    x = BatchNormalization(axis=bn_axis, trainable=trainable,
                           name=bn_name_3)(x)

    conv_name_4 = 'conv' + str(stage) + '_' + str(block) + '_1x1_proj'
    bn_name_4 = 'conv' + str(stage) + '_' + str(block) + '_1x1_proj/bn'
    shortcut = Conv2D(
        filters3,
        (1, 1),
        strides=strides,
        kernel_initializer='orthogonal',
        use_bias=False,
        trainable=trainable,
        kernel_regularizer=l2(weight_decay),
        name=conv_name_4,
    )(input_tensor)
    shortcut = BatchNormalization(axis=bn_axis,
                                  trainable=trainable,
                                  name=bn_name_4)(shortcut)

    x = layers.add([x, shortcut])
    x = Activation('relu')(x)
    return x
Beispiel #30
0
def octConvBlock(ip_high,
                 ip_low,
                 filters,
                 kernel_size=(3, 3),
                 strides=(1, 1),
                 alpha=0.5,
                 padding='same',
                 dilation=None,
                 bias=False):
    """
    Constructs an Octave Convolution block.
    Accepts a pair of input tensors, and returns a pair of tensors.
    The first tensor is the high frequency pathway for both ip/op.
    The second tensor is the low frequency pathway for both ip/op.
    # Arguments:
        ip_high: keras tensor.
        ip_low: keras tensor.
        filters: number of filters in conv layer.
        kernel_size: conv kernel size.
        strides: strides of the conv.
        alpha: float between [0, 1]. Defines the ratio of filters
            allocated to the high frequency and low frequency
            branches of the octave conv.
        padding: padding mode.
        dilation: dilation conv kernel.
        bias: bool, whether to use bias or not.
    # Returns:
        a pair of tensors:
            - x_high: high frequency pathway.
            - x_low: low frequency pathway.
    """
    if dilation is None:
        dilation = (1, 1)

    low_low_filters = high_low_filters = int(alpha * filters)
    high_high_filters = low_high_filters = filters - low_low_filters

    avg_pool = AveragePooling2D()

    if strides[0] > 1:
        ip_high = avg_pool(ip_high)
        ip_low = avg_pool(ip_low)

    # High path
    x_high_high = Conv2D(high_high_filters,
                         kernel_size,
                         padding=padding,
                         dilation_rate=dilation,
                         use_bias=bias,
                         kernel_initializer='he_normal')(ip_high)

    x_low_high = Conv2D(low_high_filters,
                        kernel_size,
                        padding=padding,
                        dilation_rate=dilation,
                        use_bias=bias,
                        kernel_initializer='he_normal')(ip_low)
    x_low_high = UpSampling2D(interpolation='nearest')(x_low_high)

    # Low path
    x_low_low = Conv2D(low_low_filters,
                       kernel_size,
                       padding=padding,
                       dilation_rate=dilation,
                       use_bias=bias,
                       kernel_initializer='he_normal')(ip_low)

    x_high_low = avg_pool(ip_high)
    x_high_low = Conv2D(high_low_filters,
                        kernel_size,
                        padding=padding,
                        dilation_rate=dilation,
                        use_bias=bias,
                        kernel_initializer='he_normal')(x_high_low)

    # Merge paths
    x_high = add([x_high_high, x_low_high])
    x_low = add([x_low_low, x_high_low])

    return x_high, x_low