コード例 #1
0
Y_validation_dl = pd.get_dummies(validation["dish_look"])[[-2, -1, 0, 1]].values
Y_validation_dr = pd.get_dummies(validation["dish_recommendation"])[[-2, -1, 0, 1]].values
Y_validation_ooe = pd.get_dummies(validation["others_overall_experience"])[[-2, -1, 0, 1]].values
Y_validation_owta = pd.get_dummies(validation["others_willing_to_consume_again"])[[-2, -1, 0, 1]].values

list_tokenized_validation = tokenizer.texts_to_sequences(X_validation)
input_validation = sequence.pad_sequences(list_tokenized_validation, maxlen=maxlen)

list_tokenized_total = tokenizer.texts_to_sequences(X_total)
input_total = sequence.pad_sequences(list_tokenized_total, maxlen=maxlen)

print("model1")
model1 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ltc.hdf5"
retrain_path = model_dir_retrain + "model_ltc_{epoch:02d}.hdf5"
model1.load_weights(file_path)
checkpoint = ModelCheckpoint(retrain_path, verbose=2, save_weights_only=True)
metrics = Metrics()
callbacks_list = [checkpoint, metrics]
history = model1.fit(input_total, Y_total_ltc, batch_size=batch_size, epochs=epochs,
                     validation_data=(input_validation, Y_validation_ltc), callbacks=callbacks_list, verbose=2)
del model1
del history
gc.collect()
K.clear_session()

print("model2")
model2 = TextClassifier().model(embeddings_matrix, maxlen, word_index, 4)
file_path = model_dir + "model_ldfbd.hdf5"
retrain_path = model_dir_retrain + "model_ldfbd_{epoch:02d}.hdf5"
model2.load_weights(file_path)
コード例 #2
0
                embedding_vector = w2_model[word]
            else:
                embedding_vector = None
            if embedding_vector is not None:
                embeddings_matrix[i] = embedding_vector

        submit = pd.read_csv(
            "ai_challenger_sentiment_analysis_testa_20180816/sentiment_analysis_testa.csv"
        )
        submit_prob = pd.read_csv(
            "ai_challenger_sentiment_analysis_testa_20180816/sentiment_analysis_testa.csv"
        )

        model1 = TextClassifier().model(embeddings_matrix, maxlen, word_index,
                                        4)
        model1.load_weights(model_dir + "model_ltc_02.hdf5")
        submit["location_traffic_convenience"] = list(
            map(getClassification, model1.predict(input_validation)))
        submit_prob["location_traffic_convenience"] = list(
            model1.predict(input_validation))
        del model1
        gc.collect()
        K.clear_session()

        model2 = TextClassifier().model(embeddings_matrix, maxlen, word_index,
                                        4)
        model2.load_weights(model_dir + "model_ldfbd_02.hdf5")
        submit["location_distance_from_business_district"] = list(
            map(getClassification, model2.predict(input_validation)))
        submit_prob["location_distance_from_business_district"] = list(
            model2.predict(input_validation))