def test_model(test_loader, net): net.eval() device = params['device'] batch_size = params['batch_size'] test_loss = 0 test_acc = 0 test_iou = {} with torch.no_grad(): for batch_index, (img, target) in enumerate(test_loader): img, target = img.to(device), target.to(device) if model_version == 'deeplab': output = net(img)['out'] else: output = net(img) target = target.long() loss = criterion(output, target).item() test_loss += loss pred = aux.get_predicted_image(output) output, target, pred = output.detach().cpu(), target.detach( ).cpu(), pred.detach().cpu() # compute number of correct predictions in the batch test_accuracy = metrics.calculate_accuracy(output, target) test_acc += test_accuracy iou_inds = metrics.calculate_iou(pred, target) for key in iou_inds: if key not in test_iou: test_iou[key] = iou_inds[key] else: test_iou[key] += iou_inds[key] test_loss = test_loss / (len(test_loader.dataset) / batch_size) test_acc = 100 * (test_acc / (len(test_loader.dataset) / batch_size)) test_iou = metrics.convert_batched_iou( test_iou, (len(test_loader.dataset) / batch_size)) mIoU = metrics.get_mIoU(test_iou) mIoU_desc = metrics.miou_to_string(test_iou) return test_loss, test_acc, mIoU, mIoU_desc
def val_one_epoch(val_loader, net): net.eval() device = params['device'] batch_size = params['batch_size'] val_loss = 0 val_acc = 0 val_iou = {} pred = 0 with torch.no_grad(): for batch_index, (img, target) in enumerate(val_loader): img, target = img.to(device), target.to(device) output = net(img) target = target.long() loss = criterion(output, target).item() val_loss += loss pred = aux.get_predicted_image(output) # Desvinculamos el valor de los nuevos targets y los pasamos a CPU para calcular las métricas output, target, pred = output.detach().cpu(), target.detach( ).cpu(), pred.detach().cpu() # compute number of correct predictions in the batch val_accuracy = metrics.calculate_accuracy(output, target) val_acc += val_accuracy iou_inds = metrics.calculate_iou(pred, target) for key in iou_inds: if key not in val_iou: val_iou[key] = iou_inds[key] else: val_iou[key] += iou_inds[key] #print('Batch index: {}, loss: {}, accuracy: {:.2f}%'.format(batch_index, loss, val_accuracy * 100)) # Average acc across all correct predictions batches now val_loss = val_loss / (len(val_loader.dataset) / batch_size) val_acc = 100 * (val_acc / (len(val_loader.dataset) / batch_size)) val_iou = metrics.convert_batched_iou( val_iou, (len(val_loader.dataset) / batch_size)) mIoU = metrics.get_mIoU(val_iou) #print('\nValidation set: Average loss: {:.4f}, Accuracy: {:.0f}%, mIoU: {:.4f}\n'.format(val_loss, val_acc, mIoU)) mIoU_desc = metrics.miou_to_string(val_iou) return val_loss, val_acc, mIoU, mIoU_desc
def train_one_epoch(train_loader, net, optimizer, criterion, hparams): # Activate the train=True flag inside the model net.train() device = hparams['device'] batch_size = hparams['batch_size'] train_loss, train_accs = 0, 0 train_iou = {} times_per_step_iteration = [] times_per_metric_iteration = [] times_per_iteration = [] for batch_index, (img, target) in enumerate(train_loader): #Arrancamos temporizador general start_total.record() img, target = img.to(device), target.to(device) optimizer.zero_grad() # Arrancamos temporizador para inferencia start.record() output = net(img) target = target.long() loss = criterion(output, target) loss.backward() optimizer.step() pred = aux.get_predicted_image(output) #Paramos temporizador de inferencia end.record() torch.cuda.synchronize() times_per_step_iteration.append(start.elapsed_time(end)) # Accuracy #Arrancamos temporizador para métricas start.record() # Desvinculamos el valor de los nuevos targets y los pasamos a CPU para calcular las métricas output, target, pred = output.detach().cpu(), target.detach().cpu( ), pred.detach().cpu() train_loss += loss.item() # Devuelve values, indices. Los indices son el nº de feature map (clase) en la que se encuentra el valor más alto en el pixel train_accuracy = metrics.calculate_accuracy(output, target) #, predicted train_accs += train_accuracy iou_inds = metrics.calculate_iou(pred, target) for key in iou_inds: if key not in train_iou: train_iou[key] = iou_inds[key] else: train_iou[key] += iou_inds[key] #Paramos temporizador para métricas end.record() torch.cuda.synchronize() times_per_metric_iteration.append(start.elapsed_time(end)) #Paramos temporizador general end_total.record() torch.cuda.synchronize() times_per_iteration.append(start_total.elapsed_time(end)) avg_time_taken = sum(times_per_iteration) / len( times_per_iteration) avg_time_step_taken = sum(times_per_step_iteration) / len( times_per_step_iteration) avg_time_metrics_taken = sum(times_per_metric_iteration) / len( times_per_metric_iteration) print('Average Time spent total: {:.02f}s'.format(avg_time_taken * 1e-3)) print('Average Time spent by steps: {:.02f}s'.format( avg_time_step_taken * 1e-3)) print('Average Time spent by metrics: {:.02f}s'.format( avg_time_metrics_taken * 1e-3)) print('Average Time spent by data load: {:.02f}s'.format( avg_time_taken * 1e-3 - avg_time_step_taken * 1e-3 - avg_time_metrics_taken * 1e-3)) train_loss = train_loss / (len(train_loader.dataset) / batch_size) train_accs = 100 * (train_accs / (len(train_loader.dataset) / batch_size)) train_iou = metrics.convert_batched_iou( train_iou, (len(train_loader.dataset) / batch_size)) mIoU = metrics.get_mIoU(train_iou) mIoU_desc = metrics.miou_to_string(train_iou) return train_loss, train_accs, mIoU, mIoU_desc