[p for n, p in param_optimizer if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] # Optimizer and Scheduler optimizer = torch.optim.AdamW(optimizer_parameters, lr=3e-5) num_training_steps = int( len(train_dataset) / config.TRAIN_BATCH_SIZE * config.EPOCHS) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=0, num_training_steps=num_training_steps) # Training loop best_micro = 0 for epoch in range(config.EPOCHS): engine.train(epoch, model, training_loader, device, optimizer, scheduler) outputs, targets = engine.validation(epoch, model, testing_loader, device) outputs = np.array(outputs) >= 0.5 accuracy = metrics.accuracy_score(targets, outputs) f1_score_micro = metrics.f1_score(targets, outputs, average='micro') f1_score_macro = metrics.f1_score(targets, outputs, average='macro') print(f"Accuracy Score = {accuracy}") print(f"F1 Score (Micro) = {f1_score_micro}") print(f"F1 Score (Macro) = {f1_score_macro}") if f1_score_micro > best_micro: torch.save(model.state_dict(), config.MODEL_PATH) best_micro = f1_score_micro
train_loader, model, train_dir, ord_loss, optimizer, epoch, logger, PRINT_FREQ, BETA=BETA, GAMMA=GAMMA, ORD_NUM=80.0) validation(device, val_loader, model, ord_loss, val_dir, epoch, logger, PRINT_FREQ, BETA=BETA, GAMMA=GAMMA, ORD_NUM=80.0) # save model and checkpoint per epoch checkpoint_filename = os.path.join( output_dir, 'checkpoint-{}.pth.tar'.format(str(epoch))) torch.save(model, checkpoint_filename) epochbar.update(1)