def train(train_loader: Any, model: Any, criterion: Any, optimizer: Any, epoch: int, lr_scheduler: Any) -> None: logger.info(f'epoch {epoch}') batch_time = AverageMeter() losses = AverageMeter() avg_score = AverageMeter() model.train() num_steps = min(len(train_loader), opt.TRAIN.STEPS_PER_EPOCH) print('total batches:', len(train_loader)) end = time.time() for i, (input_, target) in enumerate(train_loader): if i >= opt.TRAIN.STEPS_PER_EPOCH: break # compute output output = model(input_.cuda()) loss = criterion(output, target.cuda()) # get metric predict = (output.detach() > 0.5).type(torch.FloatTensor) avg_score.update(F_score(predict, target).item()) # compute gradient and do SGD step losses.update(loss.data.item(), input_.size(0)) optimizer.zero_grad() loss.backward() optimizer.step() if hasattr(lr_scheduler, 'batch_step'): lr_scheduler.batch_step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % opt.TRAIN.PRINT_FREQ == 0: logger.info(f'{epoch} [{i}/{num_steps}]\t' f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' f'loss {losses.val:.4f} ({losses.avg:.4f})\t' f'F2 {avg_score.val:.4f} ({avg_score.avg:.4f})') logger.info(f' * average accuracy on train {avg_score.avg:.4f}')
def validate(val_loader: Any, model: Any, epoch: int) -> Tuple[float, float]: ''' Calculates validation score. 1. Infers predictions 2. Finds optimal threshold 3. Returns the best score and a threshold. ''' logger.info('validate()') predicts, targets = inference(val_loader, model) predicts, targets = torch.tensor(predicts), torch.tensor(targets) best_score, best_thresh = 0.0, 0.0 for threshold in tqdm(np.linspace(0.05, 0.15, 33), disable=IN_KERNEL): score = F_score(predicts, targets, threshold=threshold).item() if score > best_score: best_score, best_thresh = score, threshold logger.info(f'{epoch} F2 {best_score:.4f} threshold {best_thresh:.4f}') logger.info(f' * F2 on validation {best_score:.4f}') return best_score, best_thresh