示例#1
0
def model():
    input1 = Input((50, 200, 3))
    input2 = Input((mxlen, ))

    cnn_features = conv_net(input1)
    embedding_layer = prepare.embedding_layer(prepare.tokenizer.word_index,
                                              prepare.get_embeddings_index(),
                                              mxlen)
    embedding = embedding_layer(input2)
    bi_lstm = Bidirectional(
        LSTM(lstm_unit, implementation=2, return_sequences=False))
    lstm_encode = bi_lstm(embedding)
    shapes = cnn_features.shape
    w, h = shapes[1], shapes[2]
    features = []
    for k1 in range(w):
        for k2 in range(h):

            def get_feature(t):
                return t[:, k1, k2, :]

            get_feature_layer = Lambda(get_feature)
            features.append(get_feature_layer(cnn_features))

    relations = []
    concat = Concatenate()
    for feature1 in features:
        for feature2 in features:
            relations.append(concat([feature1, feature2, lstm_encode]))

    g_MLP = get_MLP(4, get_dense(4))
    f_MLP = get_MLP(2, get_dense(2))

    mid_relations = []
    for r in relations:
        mid_relations.append(g_MLP(r))

    combined_relation = Add()(mid_relations)

    rn = dropout_dense(combined_relation)
    rn = dropout_dense(rn)
    pred = Dense(1, activation='sigmoid')(rn)

    model = Model(inputs=[input1, input2], outputs=pred)
    optimizer = Adam(lr=3e-5)
    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
示例#2
0
    model = bn_layer(32, 3)(inputs)
    model = MaxPooling2D((2, 2), 2)(model)
    model = bn_layer(32, 3)(model)
    model = MaxPooling2D((2, 2), 2)(model)
    model = bn_layer(32, 3)(model)
    model = MaxPooling2D((2, 2), 2)(model)
    model = bn_layer(32, 3)(model)
    model = MaxPooling2D((2, 2), 2)(model)
    model = bn_layer(64, 3)(model)
    return model


input1 = Input((50, 200, 3))
input2 = Input((mxlen,))
cnn_features = conv_net(input1)
embedding_layer = prepare.embedding_layer(prepare.tokenizer.word_index, prepare.get_embeddings_index(), mxlen)
embedding = embedding_layer(input2)
bi_lstm = Bidirectional(LSTM(lstm_unit, implementation=2, return_sequences=False))
lstm_encode = bi_lstm(embedding)
shapes = cnn_features.shape
w, h = shapes[1], shapes[2]

def slice_1(t):
    return t[:, 0, :, :]

def slice_2(t):
    return t[:, 1:, :, :]

def slice_3(t):
    return t[:, 0, :]