params = RNNModelParams(layers_size=LAYERS_SIZE, spatial_dropout=SPATIAL_DROPOUT, recurrent_dropout=RECURRENT_DROPOUT, dropout_dense=DROPOUT_DENSE, dense_encoder_size=DENSE) word_encoder = RNNModel(inputs=[input], features=features, features_to_dense=[], output=None, params=params, attention=True) word_encoder.build() word_encoder.model().summary() input_model = Input(shape=( 3, MAX_LEN, ), name='input_1') review_word_enc = TimeDistributed(word_encoder.model())(input_model) l_lstm_sent = Bidirectional( LSTM(200, recurrent_dropout=0.2, return_sequences=True))(review_word_enc) l_att_sent = Attention()(l_lstm_sent) preds = Dense(2, activation='softmax', name='output_1')(l_att_sent) model = Model(inputs=input_model, outputs=[preds]) model.compile(loss='categorical_crossentropy', optimizer='adam',
params = RNNModelParams(layers_size=LAYERS_SIZE, spatial_dropout=SPATIAL_DROPOUT, recurrent_dropout=RECURRENT_DROPOUT, dropout_dense=DROPOUT_DENSE, dense_encoder_size=DENSE) word_encoder = RNNModel(inputs=[pretrained_elmo_input], input_direct=pretrained_elmo_input, features=[], features_to_dense=[], output=None, params=params, attention=True) word_encoder.build() word_encoder.model().summary() input_model = Input(shape=( 3, MAX_LEN, ), name='input_1') review_word_enc = TimeDistributed(word_encoder.model())(input_model) l_lstm_sent = Bidirectional( LSTM(200, recurrent_dropout=0.2, return_sequences=True))(review_word_enc) l_att_sent = Attention()(l_lstm_sent) preds = Dense(2, activation='softmax', name='output_1')(l_att_sent) model = Model(inputs=input_model, outputs=[preds]) model.compile(loss='categorical_crossentropy', optimizer='adam',