def train(model, args, device, writer, optimizer, data_loader, epoch): # Set train mode model.train() criterion = nn.CrossEntropyLoss(reduction='mean') metric_ftns = ['loss', 'correct', 'total', 'accuracy', 'sens', 'ppv'] metrics = MetricTracker(*[m for m in metric_ftns], writer=writer, mode='train') metrics.reset() cm = torch.zeros(args.classes, args.classes) for batch_idx, input_tensors in enumerate(data_loader): input_data, target = input_tensors[0].to(device), input_tensors[1].to( device) # Forward output = model(input_data) loss = criterion(output, target) correct, total, acc = accuracy(output, target) update_confusion_matrix(cm, output, target) metrics.update_all_metrics({ 'correct': correct, 'total': total, 'loss': loss.item(), 'accuracy': acc }) # Backward optimizer.zero_grad() loss.backward() optimizer.step() # Save TB stats writer_step = (epoch - 1) * len(data_loader) + batch_idx if ((batch_idx + 1) % args.log_interval == 0): # Calculate confusion for this bucket ppv, sens = update_confusion_calc(cm) metrics.update_all_metrics({'sens': sens, 'ppv': ppv}) cm = torch.zeros(args.classes, args.classes) metrics.write_tb(writer_step) num_samples = batch_idx * args.batch_size print_stats(args, epoch, num_samples, data_loader, metrics) return metrics, writer_step
def validation(args, model, testloader, epoch, writer): model.eval() criterion = nn.CrossEntropyLoss(reduction='mean') metric_ftns = [ 'loss', 'correct', 'total', 'accuracy', 'ppv', 'sensitivity' ] val_metrics = MetricTracker(*[m for m in metric_ftns], writer=writer, mode='val') val_metrics.reset() confusion_matrix = torch.zeros(args.class_dict, args.class_dict) with torch.no_grad(): for batch_idx, input_tensors in enumerate(testloader): input_data, target = input_tensors if (args.cuda): input_data = input_data.cuda() target = target.cuda() output = model(input_data) loss = criterion(output, target) correct, total, acc = accuracy(output, target) num_samples = batch_idx * args.batch_size + 1 _, pred = torch.max(output, 1) num_samples = batch_idx * args.batch_size + 1 for t, p in zip(target.cpu().view(-1), pred.cpu().view(-1)): confusion_matrix[t.long(), p.long()] += 1 val_metrics.update_all_metrics( { 'correct': correct, 'total': total, 'loss': loss.item(), 'accuracy': acc }, writer_step=(epoch - 1) * len(testloader) + batch_idx) print_summary(args, epoch, num_samples, val_metrics, mode="Validation") s = sensitivity(confusion_matrix.numpy()) ppv = positive_predictive_value(confusion_matrix.numpy()) print(f" s {s} ,ppv {ppv}") val_metrics.update('sensitivity', s, writer_step=(epoch - 1) * len(testloader) + batch_idx) val_metrics.update('ppv', ppv, writer_step=(epoch - 1) * len(testloader) + batch_idx) print('Confusion Matrix\n{}'.format(confusion_matrix.cpu().numpy())) return val_metrics, confusion_matrix
def train(args, model, trainloader, optimizer, epoch): start_time = time.time() model.train() train_metrics = MetricTracker(*[m for m in METRICS_TRACKED], mode='train') w2 = torch.Tensor([1.0, 1.0, 1.5]) if (args.cuda): model.cuda() w2 = w2.cuda() train_metrics.reset() # JUST FOR CHECK counter_batches = 0 counter_covid = 0 for batch_idx, input_tensors in enumerate(trainloader): optimizer.zero_grad() input_data, target = input_tensors counter_batches += 1 if (args.cuda): input_data = input_data.cuda() target = target.cuda() output = model(input_data) loss, counter = weighted_loss(output, target, w2) counter_covid += counter loss.backward() optimizer.step() correct, total, acc = accuracy(output, target) precision_mean, recall_mean = precision_score(output, target) num_samples = batch_idx * args.batch_size + 1 train_metrics.update_all_metrics( { 'correct': correct, 'total': total, 'loss': loss.item(), 'accuracy': acc, 'precision_mean': precision_mean, 'recall_mean': recall_mean }, writer_step=(epoch - 1) * len(trainloader) + batch_idx) print_stats(args, epoch, num_samples, trainloader, train_metrics) print("--- %s seconds ---" % (time.time() - start_time)) print_summary(args, epoch, num_samples, train_metrics, mode="Training") return train_metrics
def validation(args, model, testloader, epoch): model.eval() val_metrics = MetricTracker(*[m for m in METRICS_TRACKED], mode='val') val_metrics.reset() w2 = torch.Tensor([1.0, 1.0, 1.5]) #w_full = torch.Tensor([1.456,1.0,15.71]) if (args.cuda): w2 = w2.cuda() confusion_matrix = torch.zeros(args.classes, args.classes) with torch.no_grad(): for batch_idx, input_tensors in enumerate(testloader): input_data, target = input_tensors if (args.cuda): input_data = input_data.cuda() target = target.cuda() output = model(input_data) loss, counter = weighted_loss(output, target, w2) correct, total, acc = accuracy(output, target) precision_mean, recall_mean = precision_score(output, target) num_samples = batch_idx * args.batch_size + 1 _, preds = torch.max(output, 1) for t, p in zip(target.cpu().view(-1), preds.cpu().view(-1)): confusion_matrix[t.long(), p.long()] += 1 val_metrics.update_all_metrics( { 'correct': correct, 'total': total, 'loss': loss.item(), 'accuracy': acc, 'precision_mean': precision_mean, 'recall_mean': recall_mean }, writer_step=(epoch - 1) * len(testloader) + batch_idx) print_summary(args, epoch, num_samples, val_metrics, mode="Validation") print('Confusion Matrix\n {}'.format(confusion_matrix.cpu().numpy())) return val_metrics, confusion_matrix
def train(args, model, trainloader, optimizer, epoch, writer, log): model.train() criterion = nn.CrossEntropyLoss(reduction='mean') metric_ftns = [ 'loss', 'correct', 'total', 'accuracy', 'ppv', 'sensitivity' ] train_metrics = MetricTracker(*[m for m in metric_ftns], writer=writer, mode='train') train_metrics.reset() confusion_matrix = torch.zeros(args.class_dict, args.class_dict) for batch_idx, input_tensors in enumerate(trainloader): optimizer.zero_grad() input_data, target = input_tensors if (args.cuda): input_data = input_data.cuda() target = target.cuda() output = model(input_data) loss = criterion(output, target) loss.backward() optimizer.step() correct, total, acc = accuracy(output, target) pred = torch.argmax(output, dim=1) num_samples = batch_idx * args.batch_size + 1 train_metrics.update_all_metrics( { 'correct': correct, 'total': total, 'loss': loss.item(), 'accuracy': acc }, writer_step=(epoch - 1) * len(trainloader) + batch_idx) print_stats(args, epoch, num_samples, trainloader, train_metrics) for t, p in zip(target.cpu().view(-1), pred.cpu().view(-1)): confusion_matrix[t.long(), p.long()] += 1 s = sensitivity(confusion_matrix.numpy()) ppv = positive_predictive_value(confusion_matrix.numpy()) print(f" s {s} ,ppv {ppv}") # train_metrics.update('sensitivity', s, writer_step=(epoch - 1) * len(trainloader) + batch_idx) # train_metrics.update('ppv', ppv, writer_step=(epoch - 1) * len(trainloader) + batch_idx) print_summary(args, epoch, num_samples, train_metrics, mode="Training") return train_metrics
def val(args, model, data_loader, epoch, writer, device): model.eval() criterion = nn.CrossEntropyLoss(reduction='mean') metric_ftns = ['loss', 'correct', 'total', 'accuracy', 'ppv', 'sens'] metrics = MetricTracker(*[m for m in metric_ftns], writer=writer, mode='val') metrics.reset() cm = torch.zeros(args.classes, args.classes) with torch.no_grad(): for batch_idx, input_tensors in enumerate(data_loader): torch.cuda.empty_cache() input_data, target = input_tensors[0].to( device), input_tensors[1].to(device) # Forward output = model(input_data) loss = criterion(output, target) correct, total, acc = accuracy(output, target) update_confusion_matrix(cm, output, target) # Update the metrics record metrics.update_all_metrics({ 'correct': correct, 'total': total, 'loss': loss.item(), 'accuracy': acc }) ppv, sens = update_confusion_calc(cm) metrics.update_all_metrics({'sens': sens, 'ppv': ppv}) return metrics, cm
def train(args, model, trainloader, optimizer, epoch, writer): model.train() criterion = nn.CrossEntropyLoss(reduction='mean') metric_ftns = ['loss', 'correct', 'total', 'accuracy'] train_metrics = MetricTracker(*[m for m in metric_ftns], writer=writer, mode='train') train_metrics.reset() for batch_idx, input_tensors in enumerate(trainloader): optimizer.zero_grad() input_data, target = input_tensors if (args.cuda): input_data = input_data.cuda() target = target.cuda() output = model(input_data) loss = criterion(output, target) loss.backward() optimizer.step() correct, total, acc = accuracy(output, target) num_samples = batch_idx * args.batch_size + 1 train_metrics.update_all_metrics( { 'correct': correct, 'total': total, 'loss': loss.item(), 'accuracy': acc }, writer_step=(epoch - 1) * len(trainloader) + batch_idx) print_stats(args, epoch, num_samples, trainloader, train_metrics) print_summary(args, epoch, num_samples, train_metrics, mode="Training") return train_metrics