Exemplo n.º 1
0
def rnet_predict():
    origins = Input(shape=(3), name='origins')
    inputs = Input(shape=(24, 24, 3), name='inputs')
    conv_1 = Conv2D(filters=28,
                    kernel_size=(3, 3),
                    kernel_initializer='zeros',
                    name='conv_1')(inputs)
    prelu_1 = PReLU(name='prelu_1')(conv_1)
    pool_1 = MaxPool2D(name='pool_1')(prelu_1)
    conv_2 = Conv2D(filters=48,
                    kernel_size=(3, 3),
                    kernel_initializer='zeros',
                    name='conv_2')(pool_1)
    prelu_2 = PReLU(name='prelu_2')(conv_2)
    pool_2 = MaxPool2D(pool_size=(3, 3), strides=2, name='pool_2')(prelu_2)
    conv_3 = Conv2D(filters=64,
                    kernel_size=(2, 2),
                    kernel_initializer='zeros',
                    name='conv_3')(pool_2)
    prelu_3 = PReLU(name='prelu_3')(conv_3)
    flatten_1 = Flatten(name='flatten_1')(prelu_3)
    dense_1 = Dense(units=128, kernel_initializer='zeros',
                    name='dense_1')(flatten_1)
    prelu_4 = PReLU(name='prelu_4')(dense_1)
    output_1 = Dense(units=2,
                     activation='softmax',
                     kernel_initializer='zeros',
                     name='output_1')(prelu_4)
    output_2 = Dense(units=4, kernel_initializer='zeros',
                     name='output_2')(prelu_4)
    output = Lambda(function=calibrate_and_nms,
                    name='calibrate_and_nms')([output_1, output_2, origins])
    model = Model(inputs=[inputs, origins], outputs=output)
    return model
Exemplo n.º 2
0
def CNN3(input_shape=(48, 48, 1), n_classes=8):
    """
    参考论文实现
    A Compact Deep Learning Model for Robust Facial Expression Recognition
    :param input_shape:
    :param n_classes:
    :return:
    """
    # input
    input_layer = Input(shape=input_shape)
    x = Conv2D(32, (1, 1), strides=1, padding='same', activation='relu')(input_layer)
    # block1
    x = Conv2D(64, (3, 3), strides=1, padding='same')(x)
    x = PReLU()(x)
    x = Conv2D(64, (5, 5), strides=1, padding='same')(x)
    x = PReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    # block2
    x = Conv2D(64, (3, 3), strides=1, padding='same')(x)
    x = PReLU()(x)
    x = Conv2D(64, (5, 5), strides=1, padding='same')(x)
    x = PReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    # fc
    x = Flatten()(x)
    x = Dense(2048, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, activation='softmax')(x)

    model = Model(inputs=input_layer, outputs=x)
    return model
Exemplo n.º 3
0
def pnet_predict_v2():
    inputs = Input(shape=(None, None, 3), name='inputs')
    conv_1 = Conv2D(filters=10,
                    kernel_size=(3, 3),
                    kernel_initializer='zeros',
                    name='conv_1')(inputs)
    prelu_1 = PReLU(shared_axes=[1, 2], name='prelu_1')(conv_1)
    pool_1 = MaxPool2D(name='pool_1')(prelu_1)
    conv_2 = Conv2D(filters=16,
                    kernel_size=(3, 3),
                    kernel_initializer='zeros',
                    name='conv_2')(pool_1)
    prelu_2 = PReLU(shared_axes=[1, 2], name='prelu_2')(conv_2)
    conv_3 = Conv2D(filters=32,
                    kernel_size=(3, 3),
                    kernel_initializer='zeros',
                    name='conv_3')(prelu_2)
    prelu_3 = PReLU(shared_axes=[1, 2], name='prelu_3')(conv_3)
    output_1 = Conv2D(filters=2,
                      kernel_size=(1, 1),
                      activation='softmax',
                      kernel_initializer='zeros',
                      name='output_1')(prelu_3)
    output_2 = Conv2D(filters=4,
                      kernel_size=(1, 1),
                      kernel_initializer='zeros',
                      name='output_2')(prelu_3)
    output = Lambda(function=calibrate_and_nms_v2,
                    name='calibrate_and_nms')([output_1, output_2])
    model = Model(inputs=inputs, outputs=output)
    return model
Exemplo n.º 4
0
def pnet_train3(train_with_landmark = False):
    
    X = Input(shape = (12, 12, 3), name = 'Pnet_input')
    
    M = Conv2D(10, 3, strides = 1, padding = 'valid', kernel_initializer = glorot_normal, kernel_regularizer = l2(0.00001), name = 'Pnet_conv1')(X)
    M = PReLU(shared_axes = [1, 2], name = 'Pnet_prelu1')(M)
    M = MaxPooling2D(pool_size = 2, name = 'Pnet_maxpool1')(M) 
    
    M = Conv2D(16, 3, strides = 1, padding = 'valid', kernel_initializer = glorot_normal, kernel_regularizer = l2(0.00001), name = 'Pnet_conv2')(M)
    M = PReLU(shared_axes= [1, 2], name = 'Pnet_prelu2')(M)
    
    M = Conv2D(32, 3, strides = 1, padding = 'valid', kernel_initializer = glorot_normal, kernel_regularizer = l2(0.00001), name = 'Pnet_conv3')(M)
    M = PReLU(shared_axes= [1, 2], name = 'Pnet_prelu3')(M)
    
    Classifier_conv = Conv2D(2, 1, activation = 'softmax', name = 'Pnet_classifier_conv', kernel_initializer = glorot_normal)(M)
    Bbox_regressor_conv = Conv2D(4, 1, name = 'Pnet_bbox_regressor_conv', kernel_initializer = glorot_normal)(M)
    
    Classifier = Reshape((2, ), name = 'Pnet_classifier')(Classifier_conv)
    Bbox_regressor = Reshape((4, ), name = 'Pnet_bbox_regressor')(Bbox_regressor_conv)
    
    if train_with_landmark:
        Landmark_regressor_conv = Conv2D(12, 1, name = 'Pnet_landmark_regressor_conv', kernel_initializer = glorot_normal)(M)
        Landmark_regressor = Reshape((12, ), name = 'Pnet_landmark_regressor')(Landmark_regressor_conv)
        Pnet_output = Concatenate()([Classifier, Bbox_regressor, Landmark_regressor])
    else:
        Pnet_output = Concatenate()([Classifier, Bbox_regressor])
    
    model = Model(X, Pnet_output)
    
    return model
Exemplo n.º 5
0
def PNet(input_shape=None):
    if input_shape is None:
        input_shape = (None, None, 3)

    input_ = Input(input_shape)

    # Conv2D ---- 1
    x = Conv2D(10, kernel_size=(3, 3), strides=(1, 1), padding="valid")(input_)
    x = PReLU(shared_axes=[1, 2])(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(x)

    # Conv2D --- 2
    x = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="valid")(x)
    x = PReLU(shared_axes=[1, 2])(x)

    # Conv2D --- 3
    x = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(x)
    x = PReLU(shared_axes=[1, 2])(x)

    output_1 = Conv2D(2, kernel_size=(1, 1), strides=(1, 1))(x)
    output_1 = Softmax(axis=3)(output_1)

    output_2 = Conv2D(4, kernel_size=(1, 1), strides=(1, 1))(x)

    pnet = Model(input_, [output_2, output_1])

    return pnet
Exemplo n.º 6
0
def parallel_unets_with_tf(input_shape=(192, 640, 3)):
    '''Define Parallel U-Nets model.'''
    #Define inputs
    rgb_input_1 = Input(input_shape, name='input1')  #RGB Image at time=t
    rgb_input_2 = Input(input_shape, name='input2')  #RGB Image at time=(t-1)
    d_input_1 = Input((192, 640, 1), name='input3')  #Depth Image at time=t
    d_input_2 = Input((192, 640, 1), name='input4')  #Depth Image at time=(t-1)
    d1_reshape = Reshape((192, 640, 1))(d_input_1)
    d2_reshape = Reshape((192, 640, 1))(d_input_2)

    #Build RGBD
    rgbd1 = Concatenate()([rgb_input_1, d1_reshape])  #RGBD Image at time=t
    rgbd2 = Concatenate()([rgb_input_2, d2_reshape])  #RGBD Image at time=t-1

    cnn1 = cnn4()(rgbd1)
    cnn2 = cnn4()(rgbd2)
    flatten1 = Flatten()(cnn1)
    flatten2 = Flatten()(cnn2)
    dense_block1 = DenseBlock(input_shape=(flatten1.shape[0], 1))(flatten1)
    dense_block2 = DenseBlock(input_shape=(flatten2.shape[0], 1))(flatten2)
    merged = Concatenate()([dense_block1, dense_block2])
    flatten3 = Flatten()(merged)

    dense2 = Dense(128, activation=PReLU())(flatten3)
    rpy_output = Dense(3, activation='linear', name='rpy_output')(dense2)  #RPY

    dense4 = Dense(128, activation=PReLU())(flatten3)
    xyz_output = Dense(3, activation='linear', name='xyz_output')(dense4)  #XYZ

    #Define inputs and outputs
    model = Model(inputs=[rgb_input_1, rgb_input_2, d_input_1, d_input_2],
                  outputs=[rpy_output, xyz_output])

    return model
Exemplo n.º 7
0
def rnet(training = False, train_with_landmark = False):
    
    X = Input(shape = (24, 24, 3), name = 'Rnet_input')
    
    M = Conv2D(28, 3, strides = 1, padding = 'valid', name = 'Rnet_conv1')(X)
    M = PReLU(shared_axes=[1, 2], name = 'Rnet_prelu1')(M)
    M = MaxPooling2D(pool_size = 3, strides = 2, padding = 'same', name = 'Rnet_maxpool1')(M)
    
    M = Conv2D(48, 3, strides = 1, padding = 'valid', name = 'Rnet_conv2')(M)
    M = PReLU(shared_axes = [1, 2], name = 'Rnet_prelu2')(M)
    M = MaxPooling2D(pool_size = 3, strides = 2, name = 'Rnet_maxpool2')(M)
    
    M = Conv2D(64, 2, strides = 1, padding = 'valid', name = 'Rnet_conv3')(M)
    M = PReLU(shared_axes = [1, 2], name = 'Rnet_prelu3')(M)
    
    M = Flatten(name = 'Rnet_flatten')(M)
    M = Dense(128, name = 'Rnet_fc')(M)
    M = PReLU(name = 'Rnet_prelu4')(M)
    
    Classifier = Dense(1, activation = 'sigmoid', name = 'Rnet_classifier')(M)
    Bbox_regressor = Dense(4, name = 'Rnet_bbox_regressor')(M)
    
    if training and train_with_landmark: 
        Landmark_regressor = Dense(12, name = 'Rnet_landmark_regressor')(M)
        Rnet_output = Concatenate()([Classifier, Bbox_regressor, Landmark_regressor]) 
        model = Model(X, Rnet_output) 
    elif training and not train_with_landmark: 
        Rnet_output = Concatenate()([Classifier, Bbox_regressor, Landmark_regressor]) 
        model = Model(X, Rnet_output)
    else:
        model = Model(X, [Classifier, Bbox_regressor])
    
    return model
Exemplo n.º 8
0
def o_net(training=False):
    x = Input(shape=(48, 48, 3))
    y = Conv2D(32, 3, padding='same', strides=(1, 1), name='o_conv1')(x)
    y = PReLU(shared_axes=(1, 2), name='o_prelu1')(y)
    y = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='o_max_pooling1')(y)
    y = Conv2D(64, 3, padding='valid', strides=(1, 1), name='o_conv2')(y)
    y = PReLU(shared_axes=(1, 2), name='o_prelu2')(y)
    y = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='o_max_pooling2')(y)
    y = Conv2D(64, 3, padding='valid', strides=(1, 1), name='o_conv3')(y)
    y = PReLU(shared_axes=(1, 2), name='o_prelu3')(y)
    y = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name='o_max_pooling3')(y)
    y = Conv2D(128, 2, padding='valid', strides=(1, 1), name='o_conv4')(y)
    y = PReLU(shared_axes=(1, 2), name='o_prelu4')(y)
    y = Dense(256, name='o_dense')(y)
    y = PReLU(name='o_prelu5')(y)
    y = Flatten()(y)

    classifier = Dense(2, activation='softmax', name='o_classifier')(y)
    bbox = Dense(4, name='o_bbox')(y)
    landmark = Dense(10, name='o_landmark')(y)

    if training:
        outputs = Concatenate(name='o_predict')([classifier, bbox, landmark])
        model = Model(inputs=[x], outputs=[outputs], name='O_Net')
    else:
        model = Model(inputs=[x], outputs=[classifier, bbox, landmark], name='O_Net')
    return model
Exemplo n.º 9
0
    def build_model(self, input_shape):
        """モデル構築"""
        model = Sequential()
        model.add(Dense(self.params["units"][0], input_shape=input_shape))
        model.add(PReLU())
        model.add(BatchNormalization())
        model.add(Dropout(self.params["dropout"][0]))

        for l_i in range(1, self.params["layers"] - 1):
            model.add(Dense(self.params["units"][l_i]))
            model.add(PReLU())
            model.add(BatchNormalization())
            model.add(Dropout(self.params["dropout"][l_i]))

        model.add(Dense(self.params["nb_classes"]))
        model.add(Activation(self.params["pred_activation"]))
        if self.params["optimizer"] == "adam":
            opt = Adam(learning_rate=self.params["learning_rate"])
        else:
            opt = SGD(learning_rate=self.params["learning_rate"],
                      momentum=0.9,
                      nesterov=True)

        model.compile(
            loss=self.params["loss"],
            metrics=self.params["metrics"],
            optimizer=opt,
        )
        self.model = model
Exemplo n.º 10
0
def generator(image_shape, n_resnets):
    '''
    Builds a CNN based generator network with n_resnets resnet blocks and input of size image_shape
    '''
    input_image = Input(shape=image_shape)

    g = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(input_image)
    g = PReLU()(g)

    g_res = resnet_block(64, g)
    for i in range(n_resnets - 1):
        g_res = resnet_block(64, g_res)

    g_res = Conv2D(64, (3, 3), strides=(1, 1), padding='same')(g_res)
    g_res = BatchNormalization()(g_res)

    g = Add()([g, g_res])

    g = Conv2D(256, (3, 3), strides=(1, 1), padding='same')(g)
    g = tf.nn.depth_to_space(g, 2)
    g = PReLU()(g)

    g = Conv2D(256, (3, 3), strides=(1, 1), padding="same")(g)
    g = tf.nn.depth_to_space(g, 2)
    g = PReLU()(g)

    g = Conv2D(3, (9, 9), strides=(1, 1), padding="same")(g)

    output_image = Activation("tanh")(g)

    generator = Model(input_image, output_image)

    return generator
Exemplo n.º 11
0
def CNN3(input_shape=(48, 48, 1), n_classes=8):
    # input
    input_layer = Input(shape=input_shape)
    x = Conv2D(32, (1, 1), strides=1, padding='same',
               activation='relu')(input_layer)
    # block1
    x = Conv2D(64, (3, 3), strides=1, padding='same')(x)
    x = PReLU()(x)
    x = Conv2D(64, (5, 5), strides=1, padding='same')(x)
    x = PReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    # block2
    x = Conv2D(64, (3, 3), strides=1, padding='same')(x)
    x = PReLU()(x)
    x = Conv2D(64, (5, 5), strides=1, padding='same')(x)
    x = PReLU()(x)
    x = MaxPooling2D(pool_size=(2, 2), strides=2)(x)
    # fc
    x = Flatten()(x)
    x = Dense(2048, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(1024, activation='relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(n_classes, activation='softmax')(x)

    model = Model(inputs=input_layer, outputs=x)
    return model
Exemplo n.º 12
0
        def __init__(self, imageWidth, imageHeight, isColour=True):

            # supports three channels for rgb else one for greyscale
            colourChannels = 3 if isColour else 1

            # we can infer our input tensor shape from the data.
            inputShape = (imageWidth, imageHeight, colourChannels)

            self.Layers = [
                ZeroPadding2D(padding=(0, 0),
                              data_format=None,
                              name="generativeNetwork"),
                Conv2D(5, (5, 5), padding="same"),  #input_shape=inputShape),
                PReLU(alpha_initializer="zeros"),
                ZeroPadding2D(padding=(0, 0), data_format=None),
                Conv2D(3, (5, 5), padding="same", name="generative_ouput"),
                PReLU(alpha_initializer="zeros"),
            ]

            self.model = Sequential(self.Layers)
            self.compile()

            def compile(self):
                self.model.compile(optimizer="adam",
                                   loss=["mean_squared_error"],
                                   metrics=["accuracy"])
Exemplo n.º 13
0
def model_generator(input_shape=(64, 64, 3)):

    x_input = Input(shape=input_shape, name='Generator_Input')

    conv_1 = Conv2D(64, kernel_size=9, strides=1, padding='same', name='G_Conv_1')(x_input)
    conv_1 = PReLU(shared_axes=[1, 2])(conv_1)

    # This Copy Need For Layers After Residual Block
    x_copy = conv_1

    ### RESIDUAL BLOCKS * 5
    conv_2 = _residual_block(conv_1)
    conv_3 = _residual_block(conv_2)
    conv_4 = _residual_block(conv_3)
    conv_5 = _residual_block(conv_4)
    conv_6 = _residual_block(conv_5)

    conv_7 = Conv2D(64, kernel_size=3, strides=1, padding='same', name='G_Conv_7')(conv_6)
    conv_7 = BatchNormalization()(conv_7)
    conv_7 = Add()([x_copy, conv_7])

    # Last Two Layers k=3, n=256, s=1
    conv_8 = Conv2D(256, kernel_size=3, strides=1, padding='same', name='G_Conv_8')(conv_7)
    conv_8 = keras.layers.Lambda(_pixel_with_lambda(scale=2))(conv_8)
    conv_8 = PReLU(shared_axes=[1, 2])(conv_8)

    conv_9 = Conv2D(256, kernel_size=3, strides=1, padding='same', name='G_Conv_9')(conv_8)
    conv_9 = keras.layers.Lambda(_pixel_with_lambda(scale=2))(conv_9)
    conv_9 = PReLU(shared_axes=[1, 2])(conv_9)

    conv_10 = Conv2D(3, kernel_size=9, strides=1, padding='same', activation='tanh', name='G_Conv_10')(conv_9)


    return keras.Model(x_input, conv_10, name='Generator')
Exemplo n.º 14
0
def get_generator(input_shape):
    """ generator model."""
    nin = Input(input_shape)
    n = Conv2D(64, 9, 1, padding='SAME', kernel_initializer='he_normal')(nin)
    n = PReLU(shared_axes=[1, 2])(n)
    temp = n

    # B residual blocks
    for i in range(16):
        nn = Conv2D(64, 3, 1, padding='SAME', use_bias=False, kernel_initializer='he_normal')(n)

        nn = BatchNormalization()(nn)
        nn = PReLU(shared_axes=[1, 2])(nn)

        nn = Conv2D(64, 3, 1, padding='SAME', use_bias=False, kernel_initializer='he_normal')(nn)
        nn = BatchNormalization()(nn)
        nn = Add()([n, nn])
        n = nn

    n = Conv2D(64, 3, 1, padding='SAME', use_bias=False, kernel_initializer='he_normal')(n)
    n = BatchNormalization()(n)
    n = Add()([n, temp])
    # B residual blacks end

    n = Conv2D(256, 3, 1, padding='SAME', kernel_initializer='he_normal')(n)
    n = SubPixelConv2D(upsample_factor=2, nchannels=64)(n)
    n = PReLU(shared_axes=[1, 2])(n)

    n = Conv2D(256, 3, 1, padding='SAME', kernel_initializer='he_normal')(n)
    n = SubPixelConv2D(upsample_factor=2, nchannels=64)(n)
    n = PReLU(shared_axes=[1, 2])(n)

    nn = Conv2D(3, 9, 1, padding='SAME', kernel_initializer='he_normal')(n)
    return Model(inputs=nin, outputs=nn, name="generator")
Exemplo n.º 15
0
def down_trans(inputs, nf, nconvs, bn, dr, ty='v', name='block'):
    # inputs = Input((None, None, None, nch))

    downconv = Conv3D(nf,
                      2,
                      padding='valid',
                      strides=(2, 2, 2),
                      name=name + '_Conv3D_0')(inputs)
    downconv = PReLU(shared_axes=[1, 2, 3], name=name + '_PReLU_0')(downconv)
    if bn:
        downconv = BatchNormalization(name=name + '_bn_0')(downconv)
    if dr:
        downconv = Dropout(0.5, name=name + '_dr_0')(downconv)

    conv = downconv
    for i in range(nconvs):
        conv = Conv3D(nf,
                      3,
                      padding='same',
                      name=name + '_Conv3D_' + str(i + 1))(conv)
        conv = PReLU(shared_axes=[1, 2, 3],
                     name=name + '_PReLU_' + str(i + 1))(conv)
        if bn:
            conv = BatchNormalization(name=name + '_bn_' + str(i + 1))(conv)

    if ty == 'v':  # V-Net
        d = add([conv, downconv])
    elif ty == 'u':  # U-Net
        d = conv
    else:
        raise Exception("please assign the model net_type: 'v' or 'u'.")

    # m = Model(inputs=inputs, outputs=d)

    return d
Exemplo n.º 16
0
    def build_rnet(self, input_shape=None):
        if input_shape is None:
            input_shape = (24, 24, 3)

        r_inp = Input(input_shape)

        r_layer = Conv2D(28, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_inp)
        r_layer = PReLU(shared_axes=[1, 2])(r_layer)
        r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(r_layer)

        r_layer = Conv2D(48, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_layer)
        r_layer = PReLU(shared_axes=[1, 2])(r_layer)
        r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(r_layer)

        r_layer = Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding="valid")(r_layer)
        r_layer = PReLU(shared_axes=[1, 2])(r_layer)
        r_layer = Flatten()(r_layer)
        r_layer = Dense(128)(r_layer)
        r_layer = PReLU()(r_layer)

        r_layer_out1 = Dense(2)(r_layer)
        r_layer_out1 = Softmax(axis=1)(r_layer_out1)

        r_layer_out2 = Dense(4)(r_layer)

        r_net = Model(r_inp, [r_layer_out2, r_layer_out1])

        return r_net
Exemplo n.º 17
0
def new_attention_3d_block(
    slt_api_num,
    feature_dim,
    name='',
):
    """
    :param query: (None,D)
    :param key: (None,slt_api_num,D)
    :param value: (None,slt_api_num,D) 一般等于key
    :return:
    """

    query = Input(shape=(feature_dim, ), name=name + 'query_input')
    key = Input(shape=(
        slt_api_num,
        feature_dim,
    ), name=name + 'key_input')
    value = Input(shape=(
        slt_api_num,
        feature_dim,
    ),
                  name=name + 'value_input')

    Repeat_query = RepeatVector(slt_api_num)(query)  # (None,slt_api_num,D)
    att_score = Concatenate(name=name + 'att_info_concate')(
        [Repeat_query, key])  # (None,slt_api_num,2*D) 不加入外积和差效果较好?
    # outer_prod = Multiply()([Repeat_query,key])
    # sub = Subtract()([Repeat_query,key])
    # att_score = Concatenate(name=name+'att_info_concate')([Repeat_query,key,outer_prod,sub]) # (None,slt_api_num,4*D)

    att_score = Dense(36)(att_score)  # (None,slt_api_num,36)
    att_score = PReLU()(att_score)
    if 'new_3layer' in new_Para.param.CI_handle_slt_apis_mode:
        att_score = Dense(16)(att_score)  # (None,slt_api_num,16)
        att_score = PReLU()(att_score)

    # att_score = Dense(1, activation='linear')(att_score)  # (None,slt_api_num,1)
    att_score = Dense(1)(att_score)  # (None,slt_api_num,1) # 最后非线性
    att_score = PReLU()(att_score)

    att_score = Reshape((slt_api_num, ), )(att_score)  # (None,slt_api_num)
    a_probs = Dense(slt_api_num, activation='softmax')(
        att_score
    )  # (None,slt_api_num) 不需要加这一层dense???加上效果好。 一般softmax层也会加上dense参数
    # a_probs = Activation('softmax')(att_score) #
    a_probs = Reshape((slt_api_num, 1), )(a_probs)  # (None,slt_api_num,1)

    # # 直接全连接+softmax层,有问题!
    # a_probs = Dense(slt_api_num, activation='softmax')(att_score) # (None,slt_api_num,16)
    # a_probs = Permute((2, 1))(a_probs)

    output_attention_mul = Multiply(name=name + 'attention_mul')(
        [a_probs, value])  # shape=(?,slt_api_num, D)
    att_result = Lambda(lambda x: tf.reduce_sum(x, axis=1))(
        output_attention_mul)  # (None,D)

    model = Model(inputs=[query, key, value],
                  outputs=[att_result],
                  name=name + 'attBlock')
    return model
Exemplo n.º 18
0
    def build_onet(self, input_shape=None):
        if input_shape is None:
            input_shape = (48, 48, 3)

        o_inp = Input(input_shape)
        o_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_inp)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)
        o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(o_layer)

        o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)
        o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(o_layer)

        o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)
        o_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(o_layer)

        o_layer = Conv2D(128, kernel_size=(2, 2), strides=(1, 1), padding="valid")(o_layer)
        o_layer = PReLU(shared_axes=[1, 2])(o_layer)

        o_layer = Flatten()(o_layer)
        o_layer = Dense(256)(o_layer)
        o_layer = PReLU()(o_layer)

        o_layer_out1 = Dense(2)(o_layer)
        o_layer_out1 = Softmax(axis=1)(o_layer_out1)
        o_layer_out2 = Dense(4)(o_layer)
        o_layer_out3 = Dense(10)(o_layer)

        o_net = Model(o_inp, [o_layer_out2, o_layer_out3, o_layer_out1])
        return o_net
Exemplo n.º 19
0
def encode_block_lstm(size, inputs, kernel, stride, activation, kinit, padding, max_pool=True,
                      batch_normalization=False, mask=None):
    result = []
    use_bias = not batch_normalization
    x, state_h, state_c = ConvLSTM2D(size, kernel_size=kernel, strides=stride,
                                     kernel_initializer=kinit, use_bias=use_bias, activation='linear',
                                     padding=padding, return_sequences=True, return_state=True)(inputs, mask=mask)
    x = BatchNormalization()(x) if batch_normalization else x
    x = PReLU()(x) # In theory this should avoid the vanishing gradient situation that is, arguably more accute with RNNs

    x, state_h, state_c = ConvLSTM2D(size, kernel_size=kernel, strides=stride,
                                     kernel_initializer=kinit, use_bias=use_bias, activation='linear',
                                     padding=padding, return_sequences=True, return_state=True)(x,
                                                                                                mask=mask)  # can't set initial_state=(state_h, state_c) due to a bug in keras

    x = BatchNormalization()(x) if batch_normalization else x
    x = PReLU()(x)
    # result.append(x)
    result.append(state_c)

    if max_pool:
        pool1 = MaxPooling3D(pool_size=(2, 2, 2))(x)
        result.append(pool1)
    else:
        result.append(None)

    return result
Exemplo n.º 20
0
    def add_residual_block(cls, residual_input):
        '''
        Builds a single residual block with 2 Conv2D layers (with batch normalization
        and ReLU) and attachs to previous layers

        :param residual_input: tf.keras.layers.* - layer to perform a skip on
        :return: tf.keras.layers.PReLU - output layer for the residual block
        '''
        conv1 = Conv2D(filters=64,
                       kernel_size=(3, 3),
                       strides=1,
                       padding='same',
                       kernel_initializer=KERNEL_INIT)(residual_input)
        bn1 = BatchNormalization()(conv1)
        rec1 = PReLU()(bn1)

        conv2 = Conv2D(filters=64,
                       kernel_size=(3, 3),
                       strides=1,
                       padding='same',
                       kernel_initializer=KERNEL_INIT)(rec1)
        bn2 = BatchNormalization()(conv2)

        add1 = Add()([bn2, residual_input])
        rec2 = PReLU()(add1)

        return rec2
Exemplo n.º 21
0
def onet(training = False):
    
    X = Input(shape = (48, 48, 3), name = 'Onet_input')

    M = Conv2D(32, 3, strides = 1, padding = 'valid', name = 'Onet_conv1')(X)
    M = PReLU(shared_axes = [1, 2], name = 'Onet_prelu1')(M)
    M = MaxPooling2D(pool_size = 3, strides = 2, padding = 'same', name = 'Onet_maxpool1')(M)
        
    M = Conv2D(64, 3, strides = 1, padding = 'valid', name = 'Onet_conv2')(M)
    M = PReLU(shared_axes = [1, 2], name = 'Onet_prelu2')(M)
    M = MaxPooling2D(pool_size = 3, strides = 2, padding = 'valid', name = 'Onet_maxpool2')(M)
        
    M = Conv2D(64, 3, strides = 1, padding = 'valid', name = 'Onet_conv3')(M)
    M = PReLU(shared_axes = [1,2], name = 'Onet_prelu3')(M)
    M = MaxPooling2D(pool_size = 2, padding = 'valid', name = 'Onet_maxpool3')(M)
    
    M = Conv2D(128, 2, strides = 1, padding = 'valid', name = 'Onet_conv4')(M)
    M = PReLU(shared_axes = [1, 2], name='Onet_prelu4')(M)
    
    M = Flatten(name = 'Onet_flatten')(M)
    M = Dense(256, name = 'Onet_fc') (M)
    M = PReLU(name = 'Onet_prelu5')(M)
    
    Classifier = Dense(2, activation = 'softmax', name='Onet_classifier')(M)
    Bbox_regressor = Dense(4, name = 'Onet_bbox_regressor')(M)
    Landmark_regressor = Dense(12, name = 'Onet_landmark_regressor')(M)
    
    if training:
        Onet_output = Concatenate()([Classifier, Bbox_regressor, Landmark_regressor])
        
        model = Model(X, Onet_output)
    else:
        model = Model(X, [Classifier, Bbox_regressor, Landmark_regressor])
    
    return model
Exemplo n.º 22
0
    def build_pnet(self, input_shape=None):
        if input_shape is None:
            input_shape = (None, None, 3)

        p_inp = Input(input_shape)

        p_layer = Conv2D(10,
                         kernel_size=(3, 3),
                         strides=(1, 1),
                         padding="valid")(p_inp)
        p_layer = PReLU(shared_axes=[1, 2])(p_layer)
        p_layer = MaxPooling2D(pool_size=(2, 2),
                               strides=(2, 2),
                               padding="same")(p_layer)

        p_layer = Conv2D(16,
                         kernel_size=(3, 3),
                         strides=(1, 1),
                         padding="valid")(p_layer)
        p_layer = PReLU(shared_axes=[1, 2])(p_layer)

        p_layer = Conv2D(32,
                         kernel_size=(3, 3),
                         strides=(1, 1),
                         padding="valid")(p_layer)
        p_layer = PReLU(shared_axes=[1, 2])(p_layer)

        p_layer_out1 = Conv2D(2, kernel_size=(1, 1), strides=(1, 1))(p_layer)
        p_layer_out1 = Softmax(axis=3)(p_layer_out1)

        p_layer_out2 = Conv2D(4, kernel_size=(1, 1), strides=(1, 1))(p_layer)

        p_net = Model(p_inp, [p_layer_out2, p_layer_out1])

        return p_net
Exemplo n.º 23
0
def RNet(input_shape=None):
    if input_shape is None:
        input_shape = (24, 24, 3)

    input_ = Input(input_shape)

    # Conv2D --- 1
    x = Conv2D(28, kernel_size=(3, 3), strides=(1, 1), padding="valid")(input_)
    x = PReLU(shared_axes=[1, 2])(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(x)

    # Conv2D --- 2
    x = Conv2D(48, kernel_size=(3, 3), strides=(1, 1), padding="valid")(x)
    x = PReLU(shared_axes=[1, 2])(x)
    x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(x)

    # Conv2D --- 3
    x = Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding="valid")(x)
    x = PReLU(shared_axes=[1, 2])(x)
    x = Flatten()(x)
    x = Dense(128)(x)
    x = PReLU()(x)

    output_1 = Dense(2)(x)
    output_1 = Softmax(axis=1)(output_1)

    output_2 = Dense(4)(x)

    rnet = Model(input_, [output_2, output_1])

    return rnet
Exemplo n.º 24
0
def create_Pnet(weight_path):
    # h,w
    input = Input(shape=[None, None, 3])

    # h,w,3 -> h/2,w/2,10
    x = Conv2D(10, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1, 2], name='PReLU1')(x)
    x = MaxPool2D(pool_size=2)(x)

    # h/2,w/2,10 -> h/2,w/2,16
    x = Conv2D(16, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1, 2], name='PReLU2')(x)
    # h/2,w/2,32
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1, 2], name='PReLU3')(x)

    # h/2, w/2, 2
    classifier = Conv2D(2, (1, 1), activation='softmax', name='conv4-1')(x)
    # 无激活函数,线性。
    # h/2, w/2, 4
    bbox_regress = Conv2D(4, (1, 1), name='conv4-2')(x)

    model = Model([input], [classifier, bbox_regress])
    model.load_weights(weight_path, by_name=True)
    return model
Exemplo n.º 25
0
    def DenseDownProj(x_in, num_filters, kernel_size=12):
        h_in = Conv2D(num_filters, kernel_size=1, strides=1, padding='same',
                      kernel_initializer=test_initializer,
                      kernel_regularizer=l2(reg_scale))(x_in)
        h_in = PReLU(alpha_initializer='zero', shared_axes=[1, 2])(h_in)

        l0 = Conv2D(num_filters, kernel_size=kernel_size, strides=(8,8), padding='same',
                    kernel_initializer=test_initializer,
                    kernel_regularizer=l2(reg_scale))(h_in)
        l0 = PReLU(alpha_initializer='zero', shared_axes=[1, 2])(l0)

        h0 = Conv2DTranspose(num_filters, kernel_size=kernel_size, strides=(8, 8), padding='same',
                             kernel_initializer=test_initializer,
                             kernel_regularizer=l2(reg_scale))(l0)
        h0 = PReLU(alpha_initializer='zero', shared_axes=[1, 2])(h0)

        e0 = subtract([h0, h_in])

        l1 = Conv2D(num_filters, kernel_size=kernel_size, strides=(8,8), padding='same',
                    kernel_initializer=test_initializer,
                    kernel_regularizer=l2(reg_scale))(e0)
        l1 = PReLU(alpha_initializer='zero', shared_axes=[1, 2])(l1)

        out = add([l1, l0])

        return out
Exemplo n.º 26
0
def create_Rnet(weight_path):
    input = Input(shape=[24, 24, 3])
    # 24,24,3 -> 11,11,28
    x = Conv2D(28, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
    x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)

    # 11,11,28 -> 4,4,48
    x = Conv2D(48, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
    x = MaxPool2D(pool_size=3, strides=2)(x)

    # 4,4,48 -> 3,3,64
    x = Conv2D(64, (2, 2), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
    # 3,3,64 -> 64,3,3
    x = Permute((3, 2, 1))(x)
    x = Flatten()(x)
    # 576 -> 128
    x = Dense(128, name='conv4')(x)
    x = PReLU(name='prelu4')(x)
    # 128 -> 2 128 -> 4
    classifier = Dense(2, activation='softmax', name='conv5-1')(x)
    bbox_regress = Dense(4, name='conv5-2')(x)
    model = Model([input], [classifier, bbox_regress])
    model.load_weights(weight_path, by_name=True)
    return model
Exemplo n.º 27
0
def get_model(
    input_len,
    reg,
    hidden1,
    hidden2,
    hidden3,
):
    model = Sequential()
    model.add(BatchNormalization())
    model.add(
        Dense(hidden1,
              kernel_regularizer=regularizers.l2(reg),
              activity_regularizer=regularizers.l2(reg),
              input_shape=(input_len, )))
    model.add(PReLU())
    model.add(
        Dense(hidden2,
              kernel_regularizer=regularizers.l2(reg),
              activity_regularizer=regularizers.l2(reg)))
    model.add(PReLU())
    model.add(
        Dense(hidden3,
              kernel_regularizer=regularizers.l2(reg),
              activity_regularizer=regularizers.l2(reg)))
    model.add(PReLU())
    model.add(Dense(1, activation="sigmoid"))

    model.compile(loss="binary_crossentropy",
                  optimizer=Adam(),
                  metrics=tensorflow.keras.metrics.AUC())

    return model
Exemplo n.º 28
0
def create_Onet(weight_path):
    input = Input(shape=[48, 48, 3])
    # 48,48,3 -> 23,23,32
    x = Conv2D(32, (3, 3), strides=1, padding='valid', name='conv1')(input)
    x = PReLU(shared_axes=[1, 2], name='prelu1')(x)
    x = MaxPool2D(pool_size=3, strides=2, padding='same')(x)
    # 23,23,32 -> 10,10,64
    x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv2')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu2')(x)
    x = MaxPool2D(pool_size=3, strides=2)(x)
    # 8,8,64 -> 4,4,64
    x = Conv2D(64, (3, 3), strides=1, padding='valid', name='conv3')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu3')(x)
    x = MaxPool2D(pool_size=2)(x)
    # 4,4,64 -> 3,3,128
    x = Conv2D(128, (2, 2), strides=1, padding='valid', name='conv4')(x)
    x = PReLU(shared_axes=[1, 2], name='prelu4')(x)
    # 3,3,128 -> 128,3,3
    x = Permute((3, 2, 1))(x)

    # 1152 -> 256
    x = Flatten()(x)
    x = Dense(256, name='conv5')(x)
    x = PReLU(name='prelu5')(x)

    # 鉴别
    # 256 -> 2 256 -> 4 256 -> 10
    classifier = Dense(2, activation='softmax', name='conv6-1')(x)
    bbox_regress = Dense(4, name='conv6-2')(x)
    landmark_regress = Dense(10, name='conv6-3')(x)

    model = Model([input], [classifier, bbox_regress, landmark_regress])
    model.load_weights(weight_path, by_name=True)

    return model
Exemplo n.º 29
0
def inception_resnet_B(x, scope="inception_b", training=True):
    with tf.name_scope(scope):
        init = x

        split_conv_x1 = Conv2D(filters=64, kernel_size=1, padding='same')(x)
        split_conv_x1 = BatchNormalization()(split_conv_x1, training=training)
        split_conv_x1 = PReLU()(split_conv_x1)

        split_conv_x2 = Conv2D(filters=32, kernel_size=1, padding='same')(x)
        split_conv_x2 = BatchNormalization()(split_conv_x2, training=training)
        split_conv_x2 = PReLU()(split_conv_x2)
        split_conv_x2 = Conv2D(filters=32, kernel_size=(7, 1),
                               padding='same')(split_conv_x2)
        split_conv_x2 = BatchNormalization()(split_conv_x2, training=training)
        split_conv_x2 = PReLU()(split_conv_x2)
        split_conv_x2 = Conv2D(filters=64, kernel_size=(1, 7),
                               padding='same')(split_conv_x2)
        split_conv_x2 = PReLU()(split_conv_x2)

        x = concatenate([split_conv_x1, split_conv_x2], axis=-1)
        x = Conv2D(filters=256, kernel_size=1, padding='same')(x)

        x = tf.math.scalar_mul(0.1, x)
        x = add([init, x])

        x = BatchNormalization()(x, training=training)
        x = PReLU()(x)

        return x
Exemplo n.º 30
0
def EMB_ECODER_BACILLUS_02(input_shape, n_class):
    # Input layer
    x = Input(shape=input_shape)

    emb = Embedding(4, 9, input_length=input_shape[0])(x)

    # Block 01
    block1 = Conv1D(
        filters=128,
        kernel_size=5,
        padding='same',
        strides=1)(emb)
    block1 = keras_contrib.InstanceNormalization()(block1)
    block1 = PReLU(shared_axes=[1])(block1)
    block1 = Dropout(rate=0.2)(block1)
    block1 = MaxPooling1D(pool_size=2)(block1)

    # Block 02
    block2 = Conv1D(
        filters=256,
        kernel_size=11,
        padding='same',
        strides=1)(emb)
    block2 = keras_contrib.InstanceNormalization()(block1)
    block2 = PReLU(shared_axes=[1])(block2)
    block2 = Dropout(rate=0.2)(block2)
    block2 = MaxPooling1D(pool_size=2)(block2)

    # # Block 03
    # block3 = Conv1D(
    #     filters=256,
    #     kernel_size=21,
    #     padding='same',
    #     strides=1)(emb)
    # block3 = keras_contrib.InstanceNormalization()(block2)
    # block3 = PReLU(shared_axes=[1])(block3)
    # block3 = Dropout(rate=0.2)(block3)
    # block3 = MaxPooling1D(pool_size=2)(block3)

    # split for attention
    attention_data = Lambda(lambda x: x)(block2)
    attention_softmax = Lambda(lambda x: x)(block2)

    # attention mechanism
    attention_softmax = Softmax()(attention_softmax)
    multiply_layer = Multiply()([attention_softmax, attention_data])

    # Fully connected layers
    dense_layer = Dense(units=256, activation='sigmoid')(multiply_layer)
    dense_layer = keras_contrib.InstanceNormalization()(dense_layer)


    # Classification layer
    flatten_layer = Flatten()(dense_layer)
    output_layer = Dense(units=n_class, activation='sigmoid')(flatten_layer)

    # Create model object
    model = models.Model(inputs=[x], outputs=[output_layer])

    return model