def val(model, val_loader, epoch, logger): model.eval() val_loss = 0 val_dice0 = 0 val_dice1 = 0 val_dice2 = 0 with torch.no_grad(): for data, target in val_loader: data, target = data.float(), target.float() data, target = data.to(device), target.to(device) output = model(data) loss = metrics.DiceMeanLoss()(output, target) dice0 = metrics.dice(output, target, 0) dice1 = metrics.dice(output, target, 1) dice2 = metrics.dice(output, target, 2) val_loss += float(loss) val_dice0 += float(dice0) val_dice1 += float(dice1) val_dice2 += float(dice2) val_loss /= len(val_loader) val_dice0 /= len(val_loader) val_dice1 /= len(val_loader) val_dice2 /= len(val_loader) logger.scalar_summary('val_loss', val_loss, epoch) logger.scalar_summary('val_dice0', val_dice0, epoch) logger.scalar_summary('val_dice1', val_dice1, epoch) logger.scalar_summary('val_dice2', val_dice2, epoch) print('\nVal set: Average loss: {:.6f}, dice0: {:.6f}\tdice1: {:.6f}\tdice2: {:.6f}\t\n'.format( val_loss, val_dice0, val_dice1, val_dice2))
def train(model, train_loader, optimizer, epoch, logger): print("=======Epoch:{}=======".format(epoch)) model.train() train_loss = 0 train_dice0 = 0 train_dice1 = 0 train_dice2 = 0 for idx, (data, target) in tqdm(enumerate(train_loader), total=len(train_loader)): data = torch.squeeze(data, dim=0) target = torch.squeeze(target, dim=0) data, target = data.float(), target.float() data, target = data.to(device), target.to(device) output = model(data) optimizer.zero_grad() # loss = nn.CrossEntropyLoss()(output,target) # loss=metrics.SoftDiceLoss()(output,target) # loss=nn.MSELoss()(output,target) loss = metrics.DiceMeanLoss()(output, target) # loss=metrics.WeightDiceLoss()(output,target) # loss=metrics.CrossEntropy()(output,target) loss.backward() optimizer.step() train_loss += loss train_dice0 += metrics.dice(output, target, 0) train_dice1 += metrics.dice(output, target, 1) train_dice2 += metrics.dice(output, target, 2) train_loss /= len(train_loader) train_dice0 /= len(train_loader) train_dice1 /= len(train_loader) train_dice2 /= len(train_loader) print( 'Train Epoch: {} \tLoss: {:.4f}\tdice0: {:.4f}\tdice1: {:.4f}\tdice2: {:.4f}' .format(epoch, train_loss, train_dice0, train_dice1, train_dice2)) logger.scalar_summary('train_loss', float(train_loss), epoch) logger.scalar_summary('train_dice0', float(train_dice0), epoch) logger.scalar_summary('train_dice1', float(train_dice1), epoch) logger.scalar_summary('train_dice2', float(train_dice2), epoch)
def train(model, train_loader, optimizer, epoch, logger): model.train() train_loss = 0 train_dice0 = 0 train_dice1 = 0 train_dice2 = 0 for batch_idx, (data, target) in enumerate(train_loader): data = torch.squeeze(data, dim=0) target = torch.squeeze(target, dim=0) data, target = data.float(), target.float() data, target = data.to(device), target.to(device) output = model(data) optimizer.zero_grad() # loss = nn.CrossEntropyLoss()(output,target) # loss=metrics.SoftDiceLoss()(output,target) # loss=nn.MSELoss()(output,target) loss = metrics.DiceMeanLoss()(output, target) # loss=metrics.WeightDiceLoss()(output,target) # loss=metrics.CrossEntropy()(output,target) loss.backward() optimizer.step() train_loss = loss train_dice0 = metrics.dice(output, target, 0) train_dice1 = metrics.dice(output, target, 1) train_dice2 = metrics.dice(output, target, 2) print( 'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tdice0: {:.6f}\tdice1: {:.6f}\tdice2: {:.6f}\tT: {:.6f}\tP: {:.6f}\tTP: {:.6f}' .format(epoch, batch_idx, len(train_loader), 100. * batch_idx / len(train_loader), loss.item(), train_dice0, train_dice1, train_dice2, metrics.T(output, target), metrics.P(output, target), metrics.TP(output, target))) logger.scalar_summary('train_loss', float(train_loss), epoch) logger.scalar_summary('train_dice0', float(train_dice0), epoch) logger.scalar_summary('train_dice1', float(train_dice1), epoch) logger.scalar_summary('train_dice2', float(train_dice2), epoch)
def val(model, val_loader, epoch, logger): model.eval() val_loss = 0 val_dice0 = 0 val_dice1 = 0 val_dice2 = 0 with torch.no_grad(): for idx, (data, target) in tqdm(enumerate(val_loader), total=len(val_loader)): data = torch.squeeze(data, dim=0) target = torch.squeeze(target, dim=0) data, target = data.float(), target.float() data, target = data.to(device), target.to(device) output = model(data) loss = metrics.DiceMeanLoss()(output, target) dice0 = metrics.dice(output, target, 0) dice1 = metrics.dice(output, target, 1) dice2 = metrics.dice(output, target, 2) val_loss += float(loss) val_dice0 += float(dice0) val_dice1 += float(dice1) val_dice2 += float(dice2) val_loss /= len(val_loader) val_dice0 /= len(val_loader) val_dice1 /= len(val_loader) val_dice2 /= len(val_loader) logger.scalar_summary('val_loss', val_loss, epoch) logger.scalar_summary('val_dice0', val_dice0, epoch) logger.scalar_summary('val_dice1', val_dice1, epoch) logger.scalar_summary('val_dice2', val_dice2, epoch) print( 'Val performance: Average loss: {:.4f}\tdice0: {:.4f}\tdice1: {:.4f}\tdice2: {:.4f}\t\n' .format(val_loss, val_dice0, val_dice1, val_dice2))
def validation(valid_loader, model, criterion, logger, epoch_num): """ Args: train_loader: model: criterion: optimizer: epoch: Returns: """ # logging accuracy and loss valid_acc = metrics.MetricTracker() valid_loss = metrics.MetricTracker() log_iter = len(valid_loader) // logger.print_freq # switch to evaluate mode model.eval() # Iterate over data. for idx, data in enumerate(tqdm(valid_loader, desc='validation')): # get the inputs and wrap in Variable if torch.cuda.is_available(): inputs = Variable(data['sat_img'].cuda(), volatile=True) labels = Variable(data['map_img'].cuda(), volatile=True) else: inputs = Variable(data['sat_img'], volatile=True) labels = Variable(data['map_img'], volatile=True) # forward # prob_map = model(inputs) # last activation was a sigmoid # outputs = (prob_map > 0.3).float() outputs = model(inputs) outputs = torch.nn.functional.sigmoid(outputs) loss = criterion(outputs, labels) valid_acc.update(metrics.dice_coeff(outputs, labels), outputs.size(0)) valid_loss.update(loss.data[0], outputs.size(0)) # tensorboard logging if idx % log_iter == 0: step = (epoch_num * logger.print_freq) + (idx / log_iter) # log accuracy and loss info = {'loss': valid_loss.avg, 'accuracy': valid_acc.avg} for tag, value in info.items(): logger.scalar_summary(tag, value, step) # log the sample images log_img = [ data_utils.show_tensorboard_image(data['sat_img'], data['map_img'], outputs, as_numpy=True), ] logger.image_summary('valid_images', log_img, step) print('Validation Loss: {:.4f} Acc: {:.4f}'.format(valid_loss.avg, valid_acc.avg)) print() return {'valid_loss': valid_loss.avg, 'valid_acc': valid_acc.avg}
def train(train_loader, model, criterion, optimizer, scheduler, logger, epoch_num): """ Args: train_loader: model: criterion: optimizer: epoch: Returns: """ # logging accuracy and loss train_acc = metrics.MetricTracker() train_loss = metrics.MetricTracker() log_iter = len(train_loader) // logger.print_freq scheduler.step() # iterate over data for idx, data in enumerate(tqdm(train_loader, desc="training")): # get the inputs and wrap in Variable if torch.cuda.is_available(): inputs = Variable(data['sat_img'].cuda()) labels = Variable(data['map_img'].cuda()) else: inputs = Variable(data['sat_img']) labels = Variable(data['map_img']) # zero the parameter gradients optimizer.zero_grad() # forward # prob_map = model(inputs) # last activation was a sigmoid # outputs = (prob_map > 0.3).float() outputs = model(inputs) outputs = torch.nn.functional.sigmoid(outputs) loss = criterion(outputs, labels) # backward loss.backward() optimizer.step() train_acc.update(metrics.dice_coeff(outputs, labels), outputs.size(0)) train_loss.update(loss.data[0], outputs.size(0)) # tensorboard logging if idx % log_iter == 0: step = (epoch_num * logger.print_freq) + (idx / log_iter) # log accuracy and loss info = {'loss': train_loss.avg, 'accuracy': train_acc.avg} for tag, value in info.items(): logger.scalar_summary(tag, value, step) # log weights, biases, and gradients for tag, value in model.named_parameters(): tag = tag.replace('.', '/') logger.histo_summary(tag, value.data.cpu().numpy(), step) logger.histo_summary(tag + '/grad', value.grad.data.cpu().numpy(), step) # log the sample images log_img = [ data_utils.show_tensorboard_image(data['sat_img'], data['map_img'], outputs, as_numpy=True), ] logger.image_summary('train_images', log_img, step) print('Training Loss: {:.4f} Acc: {:.4f}'.format(train_loss.avg, train_acc.avg)) print() return {'train_loss': train_loss.avg, 'train_acc': train_acc.avg}