# Train the model training_iter = data_iter(training_set, batch_size) train_eval_iter = eval_iter(training_set[:256], batch_size) validation_iter = eval_iter(validation_set, batch_size) total_batches = int(len(training_set) / batch_size) #total_batches = 100 # In[441]: tls.training_loop(batch_size, total_batches, hyper_params['alphabet_size'], hyper_params['l0'], num_epochs, model, loss, optimizer, training_iter, validation_iter, train_eval_iter, save_model_path, comet, cuda=True) # Loading best model and calculating accuracy on test set tls.load_checkpoint(model, save_model_path) test_set = dataGenerator(data_path + 'test.txt', test=True, max_length=hyper_params['l0']) test_iter = eval_iter(test_set, batch_size) test_acc = tls.evaluate(model,
print(model) # Train the model training_iter = data_iter(training_set, batch_size) train_eval_iter = eval_iter(training_set[:256], batch_size) validation_iter = eval_iter(validation_set, batch_size) total_batches = int(len(training_set) / batch_size) tls.training_loop(batch_size, total_batches, alphabet_size, l0, num_epochs, model, loss, optimizer, training_iter, validation_iter, train_eval_iter, save_path, comet, cuda=True) # Loading best model and calculating accuracy on test set tls.load_checkpoint(model, save_path) test_set = dataGeneratorTest(list_subword_without_end, file_name=data_path + 'test.txt', max_length=l0) test_iter = eval_iter(test_set, batch_size) test_acc = tls.evaluate(model,