示例#1
0
                             embedding_dim,
                             input_length=max_sequence_length,
                             trainable=True)

inputs = Input(shape=(max_sequence_length, ), dtype='int32', name='input')
embeddings_sequences = embeddings_layer(inputs)
graphconv = GraphConv(filters=64,
                      neighbors_ix_mat=q_mat_layer1,
                      num_neighbors=12,
                      activation='relu')
selfattention = SeqSelfAttention(
    attention_activation='relu')(embeddings_sequences)
ind_rnn_layer_1 = Bidirectional(
    IndRNN(250,
           recurrent_clip_min=-1,
           recurrent_clip_max=-1,
           dropout=0.0,
           recurrent_dropout=0.0,
           return_sequences=True))(selfattention)
output = Bidirectional(
    IndRNN(250,
           recurrent_clip_min=-1,
           recurrent_clip_max=-1,
           dropout=0.0,
           recurrent_dropout=0.0,
           return_sequences=False))(ind_rnn_layer_1)

print(output)

output = Dense(512, activation='sigmoid')(output)
output = Dense(256, activation='sigmoid')(output)
output = Dense(128, activation='sigmoid')(output)
示例#2
0
saveBestModel = ModelCheckpoint(save_best_model_file,
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=True,
                                save_weights_only=True)

embeddings_layer = Embedding(len(word_index) + 1,
                             embedding_dim,
                             input_length=max_sequence_length,
                             trainable=True)

inputs = Input(shape=(max_sequence_length, ), dtype='int32', name='input')
embeddings_sequences = embeddings_layer(inputs)
ind_rnn_layer_1 = IndRNN(250,
                         recurrent_clip_min=-1,
                         recurrent_clip_max=-1,
                         dropout=0.35,
                         recurrent_dropout=0.0,
                         return_sequences=True)(embeddings_sequences)
output = IndRNN(125,
                recurrent_clip_min=-1,
                recurrent_clip_max=-1,
                dropout=0.35,
                recurrent_dropout=0.0,
                return_sequences=False)(ind_rnn_layer_1)
# output=Dense(256,activation='sigmoid')(ind_rnn_layer_1)
# output=Dense(128,activation='sigmoid')(output)
# output=Dense(64,activation='sigmoid')(output)
output = Dense(1, activation='sigmoid')(output)
model = Model(inputs=inputs, outputs=[output])
model.summary()
model.compile(loss='binary_crossentropy',
               activation='relu')(conv1)
conv1 = MaxPooling1D()(conv1)
conv1 = BatchNormalization()(conv1)
texture = Dropout(0.3)(conv1)
print()
texture = Dense(100, activation='relu')(texture)

sequence_input4 = Input(shape=(max_sequence_length1, ), dtype='int32')
wo_ind = Embedding(len(word_index4) + 1,
                   embedding_dim,
                   input_length=max_sequence_length1,
                   trainable=True)(sequence_input4)

wo_ind = IndRNN(64,
                recurrent_clip_min=-1,
                recurrent_clip_max=-1,
                dropout=0.0,
                recurrent_dropout=0.0,
                return_sequences=True)(wo_ind)  #97%

wo_ind = Dense(100, activation='relu')(wo_ind)

k = 12

output = concatenate([texture, wo_ind])
print("output:", output)
"""
自注意力机制的加载
"""
from keras_self_attention import SeqSelfAttention
output = SeqSelfAttention(attention_activation='relu',
                          name='self_attention')(output)
示例#4
0
conv1 = MaxPooling1D()(conv1)
conv1 = BatchNormalization()(conv1)
texture = Dropout(0.3)(conv1)
print()
texture = Dense(100, activation='relu')(texture)

sequence_input4 = Input(shape=(max_sequence_length1, ), dtype='int32')
wo_ind = Embedding(len(word_index4) + 1,
                   embedding_dim,
                   input_length=max_sequence_length1,
                   trainable=True)(sequence_input4)

wo_ind = Bidirectional(
    IndRNN(32,
           recurrent_clip_min=-1,
           recurrent_clip_max=-1,
           dropout=0.0,
           recurrent_dropout=0.0,
           return_sequences=True))(wo_ind)  #97%
wo_ind = Bidirectional(
    IndRNN(32,
           recurrent_clip_min=-1,
           recurrent_clip_max=-1,
           dropout=0.0,
           recurrent_dropout=0.0,
           return_sequences=True))(wo_ind)  #97%
wo_ind = Bidirectional(
    IndRNN(32,
           recurrent_clip_min=-1,
           recurrent_clip_max=-1,
           dropout=0.0,
           recurrent_dropout=0.0,
示例#5
0
embeddings_sequences = embeddings_layer(inputs)
graphconv = GraphConv(filters=64,
                      neighbors_ix_mat=q_mat_layer1,
                      num_neighbors=12,
                      activation='relu')
self_attention = SeqSelfAttention(attention_activation='relu',
                                  name='self_attention')(embeddings_sequences)
conv1 = Conv1D(filters=128,
               kernel_size=3,
               strides=1,
               padding='same',
               activation='relu',
               name='conv1')(self_attention)
ind_rnn_layer_1 = IndRNN(200,
                         recurrent_clip_min=-1,
                         recurrent_clip_max=-1,
                         dropout=0.7,
                         recurrent_dropout=0.0,
                         return_sequences=True)(conv1)
output = IndRNN(100,
                recurrent_clip_min=-1,
                recurrent_clip_max=-1,
                dropout=0.7,
                recurrent_dropout=0.0,
                return_sequences=True)(ind_rnn_layer_1)

print(output)

dropout = Dropout(0.5)(output)
primary_caps = PrimaryCap(dropout,
                          dim_vector=dim_capsule1,
                          n_channels=3,
                             trainable=True)

inputs = Input(shape=(max_sequence_length, ), dtype='int32', name='input')
embeddings_sequences = embeddings_layer(inputs)
selfattention = SeqSelfAttention(
    attention_activation='relu')(embeddings_sequences)
# ind_rnn_layer_1 = Bidirectional(IndRNN(250,
#                  recurrent_clip_min=-1,
#                  recurrent_clip_max=-1,
#                  dropout=0.0,
#                  recurrent_dropout=0.0,
#                  return_sequences=True))(selfattention)
output = Bidirectional(
    IndRNN(125,
           recurrent_clip_min=-1,
           recurrent_clip_max=-1,
           dropout=0.0,
           recurrent_dropout=0.0,
           return_sequences=False))(embeddings_sequences)
# ind_rnn_layer_1 = Bidirectional(IndRNN(250,)

print(output)

# output=Dense(128,activation='sigmoid')(output)
#
# output=Dense(64,activation='sigmoid')(output)
# output=Dense(32,activation='sigmoid')(output)
output = Dense(1, activation='sigmoid')(output)
model = Model(inputs=inputs, outputs=[output])
model.summary()
model.compile(loss='binary_crossentropy',
              optimizer=Adam(0.0001),
                        nb_filter=nb_filter,
                        filter_length=filter_length,
                        border_mode='valid',
                        activation='relu',
                        subsample_length=1))

left_branch = Sequential()
left_branch.add(Embedding(len(word_index)+1,
                    EMBEDDING_DIM,
                    input_length=MAX_SEQUENCE_LENGTH
                    ))

# left_branch.add(LSTM(250,return_sequences=False))
left_branch.add(IndRNN(250,
                    recurrent_clip_min=-1,
                    recurrent_clip_max=-1,
                    dropout=0.0,
                    recurrent_dropout=0.0,
                   return_sequences=True))
left_branch.add(IndRNN(250,
                    recurrent_clip_min=-1,
                    recurrent_clip_max=-1,
                    dropout=0.0,
                    recurrent_dropout=0.0,
                   return_sequences=False))


merged = Merge([left_branch,right_branch], mode='dot',output_shape=lambda x: x[0])

final_model = Sequential()
final_model.add(merged)
final_model.add(Dense(1))