all_texts = np.load('text_cache.npy', allow_pickle=True) categorical_sentiments = to_categorical(sentiments, num_classes=5) tokenizer = Tokenizer(num_words=300000, oov_token=None) tokenizer.fit_on_texts(all_texts) X_train, X_test, Y_train, Y_test = train_test_split(texts, categorical_sentiments, test_size=0.2) np.save("text_train.npy", X_train) np.save("sentiment_train.npy", Y_train) models = Models() logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S") tensorboard_callback = TensorBoard(log_dir=logdir) filepath = "savedModel2/saved-model3-{epoch:02d}.h5" filepath2 = "return.h5" model = load_model(filepath2) models.build_myModel(embeddings, model) model = models.model if os.path.isfile("savedModel/saved-model3-25.h5"): model = load_model("savedModel/saved-model3-25.h5") checkpoint = ModelCheckpoint(filepath, monitor='loss', verbose=1, save_best_only=True, mode='min') callbacks_list = [checkpoint, tensorboard_callback] model.fit(pad_sequences(tokenizer.texts_to_sequences(X_train[:100000]), maxlen=150), Y_train[:100000], batch_size=512,