Example #1
0
features: List[Feature] = [manual_features, glove_features, elmo_features]

params = RNNModelParams(layers_size=LAYERS_SIZE,
                        spatial_dropout=SPATIAL_DROPOUT,
                        recurrent_dropout=RECURRENT_DROPOUT,
                        dropout_dense=DROPOUT_DENSE,
                        dense_encoder_size=DENSE)

model = RNNModel(inputs=[input, text_input],
                 features=features,
                 output=Dense(3, activation='softmax', name="output"),
                 params=params,
                 attention=ATTENTION)

model.build()
model.compile()

X = [
    value for value in tqdm(
        sem_eval_dataset.iterate_train_x(max_len=MAX_LEN, one_hot=True))
]
X_text = [
    value for value in tqdm(
        sem_eval_dataset.iterate_train_x(max_len=MAX_LEN, one_hot=False))
]

X_val = [
    value for value in tqdm(
        sem_eval_dataset_dev.iterate_x(max_len=MAX_LEN, one_hot=True))
]
Example #2
0
#ext_features = [ext_features]

params = RNNModelParams(layers_size=LAYERS_SIZE,
                        spatial_dropout=SPATIAL_DROPOUT,
                        recurrent_dropout=RECURRENT_DROPOUT,
                        dropout_dense=DROPOUT_DENSE,
                        dense_encoder_size=DENSE)

word_encoder = RNNModel(inputs=[input],
                        features=features,
                        features_to_dense=[],
                        output=None,
                        params=params,
                        attention=True)

word_encoder.build()
word_encoder.model().summary()

input_model = Input(shape=(
    3,
    MAX_LEN,
), name='input_1')
review_word_enc = TimeDistributed(word_encoder.model())(input_model)
l_lstm_sent = Bidirectional(
    LSTM(200, recurrent_dropout=0.2, return_sequences=True))(review_word_enc)
l_att_sent = Attention()(l_lstm_sent)
preds = Dense(2, activation='softmax', name='output_1')(l_att_sent)

model = Model(inputs=input_model, outputs=[preds])

model.compile(loss='categorical_crossentropy',
params = RNNModelParams(layers_size=LAYERS_SIZE,
                        spatial_dropout=SPATIAL_DROPOUT,
                        recurrent_dropout=RECURRENT_DROPOUT,
                        dropout_dense=DROPOUT_DENSE,
                        dense_encoder_size=DENSE)

word_encoder = RNNModel(inputs=[pretrained_elmo_input],
                        input_direct=pretrained_elmo_input,
                        features=[],
                        features_to_dense=[],
                        output=None,
                        params=params,
                        attention=True)

word_encoder.build()
word_encoder.model().summary()

input_model = Input(shape=(
    3,
    MAX_LEN,
), name='input_1')
review_word_enc = TimeDistributed(word_encoder.model())(input_model)
l_lstm_sent = Bidirectional(
    LSTM(200, recurrent_dropout=0.2, return_sequences=True))(review_word_enc)
l_att_sent = Attention()(l_lstm_sent)
preds = Dense(2, activation='softmax', name='output_1')(l_att_sent)

model = Model(inputs=input_model, outputs=[preds])

model.compile(loss='categorical_crossentropy',