def main(dl: DataLoader, model: RNN): prev_best = 0 patience = 0 decay = 0 lr = config.lr #optimizer = torch.optim.SGD(model.parameters(), lr=lr) optimizer = torch.optim.Adam(model.parameters(), lr=lr) criterion = nn.BCEWithLogitsLoss() for epoch in tqdm(range(config.max_epochs)): start_time = time.time() train_loss, train_acc = train(dl.train_examples, model, optimizer, criterion) dev_loss, dev_acc = evaluate(dl.dev_examples, model, criterion) end_time = time.time() epoch_mins, epoch_secs = epoch_time(start_time, end_time) print( f'Epoch: {epoch + 1:02} | Epoch Time: {epoch_mins}m {epoch_secs}s') print(f'\tTrain Loss: {train_loss:.3f} | Train Acc: {train_acc:.2f}') print(f'\t Dev Loss: {dev_loss:.3f} | Dev Acc: {dev_acc:.2f}') if dev_acc <= prev_best: patience += 1 if patience == 3: lr *= 0.5 optimizer = torch.optim.SGD(model.parameters(), lr=lr) tqdm.write( 'Dev accuracy did not increase in 3 epochs, halfing the learning rate' ) patience = 0 decay += 1 else: prev_best = dev_acc print('Save the best model') model.save() if decay >= 3: print('Evaluating model on test set') model.load() print('Load the best model') test_loss, test_acc = evaluate(dl.test_examples, model, criterion) print( f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc * 100:.2f}%' ) break
if __name__ == '__main__': # people can find their devices using tf.config.list_physical_devices("GPU") # "CPU" with tf.device("/GPU:0"): # Model Training model.fit( imdb.X_train, imdb.Y_train, batch_size=512, epochs=10, validation_split=0.2, callbacks=[EarlyStopping(patience=2, verbose=1)], ) # Run model on test set accr = model.evaluate(imdb.X_test, imdb.Y_test) print('Test set\n Loss: {:0.4f}\n Accuracy: {:0.2f}'.format( accr[0], accr[1] * 100)) # save weights as HDF5 model.save("../model/tf/weights.h5") print("Saved model to disk") # save model as JSON model_json = model.to_json() with open("../model/tf/model.json", "w") as file: file.write(model_json) # save tokenizer as JSON tokenizer_json = imdb.tokenizer.to_json() with open("../model/tf/tokenizer.json", 'w', encoding='utf-8') as file: file.write(json.dumps(tokenizer_json, ensure_ascii=True))