X_train, X_test, y_train, y_test, vocab = load_emotion_data()

vocab_size = len(vocab)

print("Vocab size:", vocab_size)

X_train = sequence.pad_sequences(X_train, maxlen=sequence_length)
X_test = sequence.pad_sequences(X_test, maxlen=sequence_length)

y_train = to_categorical(y_train)
y_test = to_categorical(y_test)

print("X_train.shape:", X_train.shape)
print("X_test.shape:", X_test.shape)

print("y_train.shape:", y_train.shape)
print("y_test.shape:", y_test.shape)

model = get_model_emotions(vocab_size, sequence_length=sequence_length, embedding_size=embedding_size)
model.load_weights("results/model_v1_0.68_0.73.h5")
model.compile(loss="categorical_crossentropy", optimizer="rmsprop", metrics=["accuracy"])

if not os.path.isdir("results"):
    os.mkdir("results")

checkpointer = ModelCheckpoint("results/model_v1_{val_loss:.2f}_{val_acc:.2f}.h5", save_best_only=True, verbose=1)

model.fit(X_train, y_train, epochs=epochs,
            validation_data=(X_test, y_test),
            batch_size=batch_size,
            callbacks=[checkpointer])
Exemplo n.º 2
0
# with sr.Microphone() as source:
#     print("Say something!")

#     index = random.randint(0,len(Questionlist)-1)
#     c=Questionlist[index]
#     os.system("mpg321 "+ c)

#     #player.play_song("question.mp3")

#     audio = r.listen(source)

print("Loading vocab2int")
vocab2int = pickle.load(open("data/vocab2int.pickle", "rb"))

model = get_model_emotions(len(vocab2int),
                           sequence_length=sequence_length,
                           embedding_size=embedding_size)
model.load_weights("results/model_v1_0.59_0.76.h5")

if __name__ == "__main__":
    import argparse
    parser = argparse.ArgumentParser(
        description="Emotion classifier using text")
    parser.add_argument("text", type=str, help="The text you want to analyze")

    args = parser.parse_args()

    text = tokenize_words(clean_text(args.text), vocab2int)
    x = pad_sequences([text], maxlen=sequence_length)
    prediction = model.predict_classes(x)[0]