Beispiel #1
0
Y_validation_ec = pd.get_dummies(validation["environment_cleaness"])[[-2, -1, 0, 1]].values
Y_validation_dp = pd.get_dummies(validation["dish_portion"])[[-2, -1, 0, 1]].values
Y_validation_dt = pd.get_dummies(validation["dish_taste"])[[-2, -1, 0, 1]].values
Y_validation_dl = pd.get_dummies(validation["dish_look"])[[-2, -1, 0, 1]].values
Y_validation_dr = pd.get_dummies(validation["dish_recommendation"])[[-2, -1, 0, 1]].values
Y_validation_ooe = pd.get_dummies(validation["others_overall_experience"])[[-2, -1, 0, 1]].values
Y_validation_owta = pd.get_dummies(validation["others_willing_to_consume_again"])[[-2, -1, 0, 1]].values

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation, maxlen=maxlen)

list_tokenized_total = tokenizer.texts_to_sequences(X_total)
input_total = sequence.pad_sequences(list_tokenized_total, maxlen=maxlen)

print("model1")
model1 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ltc.hdf5"
retrain_path = model_dir_retrain + "model_ltc_{epoch:02d}.hdf5"
model1.load_weights(file_path)
checkpoint = ModelCheckpoint(retrain_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model1.fit(input_total, Y_total_ltc, batch_size=batch_size, epochs=epochs,
                     validation_data=(input_validation, Y_validation_ltc), callbacks=callbacks_list, verbose=2)
del model1
del history
gc.collect()
K.clear_session()

print("model2")
model2 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
        for word, i in word_index.items():
            if word in w2_model:
                embedding_vector = w2_model[word]
            else:
                embedding_vector = None
            if embedding_vector is not None:
                embeddings_matrix[i] = embedding_vector

        submit = pd.read_csv(
            "ai_challenger_sentiment_analysis_testa_20180816/sentiment_analysis_testa.csv"
        )
        submit_prob = pd.read_csv(
            "ai_challenger_sentiment_analysis_testa_20180816/sentiment_analysis_testa.csv"
        )

        model1 = TextClassifier().model(embeddings_matrix, maxlen, word_index,
                                        4)
        model1.load_weights(model_dir + "model_ltc_02.hdf5")
        submit["location_traffic_convenience"] = list(
            map(getClassification, model1.predict(input_validation)))
        submit_prob["location_traffic_convenience"] = list(
            model1.predict(input_validation))
        del model1
        gc.collect()
        K.clear_session()

        model2 = TextClassifier().model(embeddings_matrix, maxlen, word_index,
                                        4)
        model2.load_weights(model_dir + "model_ldfbd_02.hdf5")
        submit["location_distance_from_business_district"] = list(
            map(getClassification, model2.predict(input_validation)))
        submit_prob["location_distance_from_business_district"] = list(
Beispiel #3
0
Y_validation_dr = pd.get_dummies(
    validation["dish_recommendation"])[[-2, -1, 0, 1]].values
Y_validation_ooe = pd.get_dummies(
    validation["others_overall_experience"])[[-2, -1, 0, 1]].values
Y_validation_owta = pd.get_dummies(
    validation["others_willing_to_consume_again"])[[-2, -1, 0, 1]].values

list_tokenized_train = tokenizer.texts_to_sequences(X_train)
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model7")
model7 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ssp_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model7.fit(input_train,
                     Y_train_ssp,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_data=(input_validation, Y_validation_ssp),
                     callbacks=callbacks_list,
                     verbose=2)
del model7
del history
gc.collect()
K.clear_session()
Beispiel #4
0
Y_validation_dr = pd.get_dummies(
    validation["dish_recommendation"])[[-2, -1, 0, 1]].values
Y_validation_ooe = pd.get_dummies(
    validation["others_overall_experience"])[[-2, -1, 0, 1]].values
Y_validation_owta = pd.get_dummies(
    validation["others_willing_to_consume_again"])[[-2, -1, 0, 1]].values

list_tokenized_train = tokenizer.texts_to_sequences(X_train)
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model19")
model19 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ooe_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model19.fit(input_train,
                      Y_train_ooe,
                      batch_size=batch_size,
                      epochs=epochs,
                      validation_data=(input_validation, Y_validation_ooe),
                      callbacks=callbacks_list,
                      verbose=2)
del model19
del history
gc.collect()
K.clear_session()
Beispiel #5
0
Y_validation_dr = pd.get_dummies(
    validation["dish_recommendation"])[[-2, -1, 0, 1]].values
Y_validation_ooe = pd.get_dummies(
    validation["others_overall_experience"])[[-2, -1, 0, 1]].values
Y_validation_owta = pd.get_dummies(
    validation["others_willing_to_consume_again"])[[-2, -1, 0, 1]].values

list_tokenized_train = tokenizer.texts_to_sequences(X_train)
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model11")
model11 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ed_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model11.fit(input_train,
                      Y_train_ed,
                      batch_size=batch_size,
                      epochs=epochs,
                      validation_data=(input_validation, Y_validation_ed),
                      callbacks=callbacks_list,
                      verbose=2)
del model11
del history
gc.collect()
K.clear_session()
print("model5")
model5 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_swa_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model5.fit(input_train, Y_train_swa, batch_size=batch_size, epochs=epochs,
                     validation_data=(input_validation, Y_validation_swa), callbacks=callbacks_list, verbose=2)
del model5
del history
gc.collect()
K.clear_session()
'''
print("model6")
model6 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_spc_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model6.fit(input_train,
                     Y_train_spc,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_data=(input_validation, Y_validation_spc),
                     callbacks=callbacks_list,
                     verbose=2)
del model6
del history
gc.collect()
K.clear_session()
Beispiel #7
0
Y_validation_dr = pd.get_dummies(
    validation["dish_recommendation"])[[-2, -1, 0, 1]].values
Y_validation_ooe = pd.get_dummies(
    validation["others_overall_experience"])[[-2, -1, 0, 1]].values
Y_validation_owta = pd.get_dummies(
    validation["others_willing_to_consume_again"])[[-2, -1, 0, 1]].values

list_tokenized_train = tokenizer.texts_to_sequences(X_train)
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model5")
model5 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_swa_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model5.fit(input_train,
                     Y_train_swa,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_data=(input_validation, Y_validation_swa),
                     callbacks=callbacks_list,
                     verbose=2)
del model5
del history
gc.collect()
K.clear_session()
Y_validation_dr = pd.get_dummies(
    validation["dish_recommendation"])[[-2, -1, 0, 1]].values
Y_validation_ooe = pd.get_dummies(
    validation["others_overall_experience"])[[-2, -1, 0, 1]].values
Y_validation_owta = pd.get_dummies(
    validation["others_willing_to_consume_again"])[[-2, -1, 0, 1]].values

list_tokenized_train = tokenizer.texts_to_sequences(X_train)
input_train = sequence.pad_sequences(list_tokenized_train, maxlen=maxlen)

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation,
                                          maxlen=maxlen)

print("model3")
model3 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_letf_{epoch:02d}.hdf5"
checkpoint = ModelCheckpoint(file_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model3.fit(input_train,
                     Y_train_letf,
                     batch_size=batch_size,
                     epochs=epochs,
                     validation_data=(input_validation, Y_validation_letf),
                     callbacks=callbacks_list,
                     verbose=2)
del model3
del history
gc.collect()
K.clear_session()