reduce_learning_rate = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1) callbacks = [csv_logger, model_checkpoint, reduce_learning_rate] history = model.fit_generator( generator=generator.flow(mode='train'), steps_per_epoch=int(num_training_samples / batch_size), epochs=num_epochs, verbose=1, callbacks=callbacks, validation_data=generator.flow(mode='validation'), validation_steps=int(num_validation_samples / batch_size)) model.save(os.path.join('../trained_models/nuswide', "weights_final.hdf5")) # list all data in history print(history.history.keys()) # summarize history for accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') # plt.show() plt.savefig('../results/acc_val_acc.png') # summarize history for loss plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss')
reduce_learning_rate = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1) callbacks = [csv_logger, model_checkpoint, reduce_learning_rate] history = model.fit_generator( generator=generator.flow(mode='train'), steps_per_epoch=num_training_samples // batch_size, epochs=num_epochs, verbose=1, callbacks=callbacks, validation_data=generator.flow(mode='validation'), validation_steps=num_validation_samples // batch_size) model.save("trained_models/image_text/weights_final.hdf5") # list all data in history print(history.history.keys()) # summarize history for accuracy plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') # plt.show() plt.savefig('./results/acc_val_acc.png') plt.cla() # 清除axes plt.clf() # 清除当前 figure 的所有axes plt.close()