Q1_test = X_test[:,0]
Q2_test = X_test[:,1]

# Define the model
question1 = Input(shape=(MAX_SEQUENCE_LENGTH,))
print(question1)
question2 = Input(shape=(MAX_SEQUENCE_LENGTH,))
print("word_embedding_matrix",word_embedding_matrix)
print(EMBEDDING_DIM)
print(question1)
q1 = Embedding(nb_words + 1, 
                 EMBEDDING_DIM, 
                 weights=[word_embedding_matrix], 
                 input_length=MAX_SEQUENCE_LENGTH, 
                 trainable=False)(question1)
print("q1.shape1",q1.get_shape())
q1 = TimeDistributed(Dense(EMBEDDING_DIM, activation='relu'))(q1)
print("q1.shape2",q1.get_shape())

q1 = Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, ))(q1)
print("q1.shape3",q1.get_shape())

q2 = Embedding(nb_words + 1, 
                 EMBEDDING_DIM, 
                 weights=[word_embedding_matrix], 
                 input_length=MAX_SEQUENCE_LENGTH, 
                 trainable=False)(question2)
q2 = TimeDistributed(Dense(EMBEDDING_DIM, activation='relu'))(q2)
q2 = Lambda(lambda x: K.max(x, axis=1), output_shape=(EMBEDDING_DIM, ))(q2)
print("q2 shape",q2.get_shape())
#在这里瞎搞一下,把两个向量做做运算看看。
Beispiel #2
0
    rb = Flatten()(rb)
    pb = Flatten()(pb)

    rb = Dense(units=1, activation='relu')(rb)
    pb = Dense(units=1, activation='relu')(pb)
    add = Add()([pb, dot, rb])
    #  o=Flatten()(add)
    out = Dense(units=1, activation='relu')(add)

    #   cat=Concatenate()([rb,pb,dot])
    #out=Dense(units=1,activation='relu')(pb)
    model = Model(inputs=[u, m], outputs=[add])

    model.compile(optimizer='adam', loss='mse', metrics=['accuracy'])
    print(dot.get_shape())
    print(rb.get_shape())
    print(pb.get_shape())
    model.fit([user, movie],
              rate,
              validation_data=([userv, moviev], ratev),
              epochs=7,
              batch_size=32,
              callbacks=[
                  ModelCheckpoint('model',
                                  monitor='val_acc',
                                  save_best_only=True)
              ])

#    model.save('model')
else:
    model = load_model('model')
Beispiel #3
0
#def word_idx2embedding(idx):
#    import keras.backend as be
#    return be.gather(emb_vocab_tensor, tf.to_int64(idx))


word_idx_in = Input(shape=(input_len,))
#word_vecs = Lambda(word_idx2embedding,
#                     output_shape=(input_len, emb_model.vector_size+1))(word_idx_in)

word_vecs = Embedding(len(emb_vocab), 50, trainable=True)(word_idx_in)

grammar_features_in = Input(shape=(input_len, 62), name='grammar_features_in') #TODO: len(grammar_feature_extractor...)

clstm_in = Input(shape=(char_encoder.max_wlen,), name='clstm_in')
clstm_embed = Embedding(len(char_encoder.chars), len(char_encoder.chars), weights=[char_encoder.embeddings], trainable=False)(clstm_in)
print('clstm_embed initial -', clstm_embed.get_shape())

filter_length = [5, 3, 3]
nb_filter = [196, 196, 256]
pool_length = 2

for i in range(len(nb_filter)):
    clstm_embed = Convolution1D(nb_filter=nb_filter[i],
                            filter_length=filter_length[i],
                            border_mode='same',
                            activation='relu',
                            init='glorot_normal',
                            subsample_length=1)(clstm_embed)
    print('clstm_embed', i, '-', clstm_embed.get_shape())

    clstm_embed = Dropout(0.1)(clstm_embed)