def test_dice(args, epoch, model, testLoader, optimizer, testF, weights): model.eval() test_loss = 0 incorrect = 0 with torch.no_grad(): for data, target in testLoader: if args.cuda: data, target = data.cuda(), target.cuda() # data, target = Variable(data), Variable(target) output = model(data) criterion = bioloss.dice_loss() loss = criterion(output, target) test_loss += loss.item() incorrect += (1. - loss) test_loss /= len( testLoader) # loss function already averages over batch size nTotal = len(testLoader) err = 100. * incorrect / nTotal print('\nTest set: Average Dice Coeff: {:.4f}, Error: {}/{} ({:.0f}%)\n'. format(test_loss, incorrect, nTotal, err)) testF.write('{},{},{}\n'.format(epoch, test_loss, err)) testF.flush() return err
def train_dice(args, epoch, model, trainLoader, optimizer, trainF, weights): model.train() nProcessed = 0 nTrain = len(trainLoader.dataset) criterion = bioloss.dice_loss() for batch_idx, (data, target) in enumerate(trainLoader): if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, requires_grad=True), Variable( target, requires_grad=True) optimizer.zero_grad() output = model(data) loss = criterion(output, target) # make_graph.save('/tmp/t.dot', loss.creator); assert(False) loss.backward() optimizer.step() nProcessed += len(data) # err = 100.*(1. - loss.data[0]) err = 100. * (1. - loss.item()) partialEpoch = epoch + batch_idx / len(trainLoader) - 1 print( 'Train Epoch: {:.2f} [{}/{} ({:.0f}%)]\tLoss: {:.8f}\tError: {:.8f}' .format(partialEpoch, nProcessed, nTrain, 100. * batch_idx / len(trainLoader), loss.item(), err)) trainF.write('{},{},{}\n'.format(partialEpoch, loss.item(), err)) trainF.flush()
def test_dice(args, epoch, model, testLoader, optimizer, testF): model.eval() test_dice = 0 incorrect = 0 for batch_idx, output in enumerate(testLoader): data, target, id = output # print("testing with {}".format(id[0])) target = target[0, :, :, :].view(-1) # right? added by Chao. if args.cuda: data, target = data.cuda(), target.cuda() data = Variable(data) target = Variable(target) output = model(data) dice = bioloss.dice_loss(output, target).data[0] test_dice += dice incorrect += (1. - dice) nTotal = len(testLoader) test_dice /= nTotal # loss function already averages over batch size err = 100. * incorrect / nTotal # print('\nTest set: Average Dice Coeff: {:.4f}, Error: {}/{} ({:.0f}%)\n'.format( # test_loss, incorrect, nTotal, err)) # # testF.write('{},{},{}\n'.format(epoch, test_loss, err)) print( '\nFor testing: Epoch:{}\tAverage Dice Coeff: {:.4f}\tError:{:.4f}\n'. format(epoch, test_dice, err)) testF.write('{},{},{}\n'.format(epoch, test_dice, err)) testF.flush() return test_dice
def test_dice(args, epoch, model, testLoader, optimizer, testF, weights): model.eval() test_loss = 0 incorrect = 0 for data, target in testLoader: data = data.type(torch.FloatTensor) target = target.type(torch.LongTensor) if args.cuda: data, target = data.cuda(), target.cuda() data, target = Variable(data, volatile=True), Variable(target) output = model(data) loss = bioloss.dice_loss(output, target).data[0] test_loss += loss incorrect += (1. - loss) test_loss /= len( testLoader) # loss function already averages over batch size nTotal = len(testLoader) err = 100. * incorrect / nTotal print('\nTest set: Average Dice Coeff: {:.4f}, Error: {}/{} ({:.0f}%)\n'. format(test_loss, incorrect, nTotal, err)) testF.write('{},{},{}\n'.format(epoch, test_loss, err)) testF.flush() return err
def train_dice(args, epoch, model, trainLoader, optimizer, trainF): model.train() nProcessed = 0 nTrain = len(trainLoader.dataset) for batch_idx, output in enumerate(trainLoader): data, target, id = output # print("training with {}".format(id[0])) target = target[0, :, :, :].view(-1) # right? added by Chao. if args.cuda: data, target = data.cuda(), target.cuda() data = Variable(data) target = Variable(target) # data, target = Variable(data), Variable(target) optimizer.zero_grad() output = model(data) # pdb.set_trace() loss = bioloss.dice_loss(output, target) # make_graph.save('/tmp/t.dot', loss.creator); assert(False) loss.backward() optimizer.step() nProcessed += len(data) err = 100. * (1. - loss.data[0] ) # loss.data[0] is dice coefficient? By Chao. # partialEpoch = epoch + batch_idx / len(trainLoader) - 1 # print('Train Epoch: {:.2f} [{}/{} ({:.0f}%)]\tLoss: {:.8f}\tError: {:.8f}'.format( # partialEpoch, nProcessed, nTrain, 100. * batch_idx / len(trainLoader), # loss.data[0], err)) print( '\nFor trainning: Epoch: {} \tdice_coefficient: {:.4f}\tError: {:.4f}\n' .format(epoch, loss.data[0], err)) # trainF.write('{},{},{}\n'.format(partialEpoch, loss.data[0], err)) trainF.write('{},{},{}\n'.format(epoch, loss.data[0], err)) trainF.flush()