Ejemplo n.º 1
0
def evaluate(args, model, criterion, val_loader):
    model.eval()
    losses = AverageMeter()
    jaccars = AverageMeter()
    dices = AverageMeter()
    eva = Evaluation()

    for i, (images, labels) in enumerate(val_loader):
        if args.cuda:
            images = images.cuda()
            labels = labels.cuda()
            criterion = criterion.cuda()

        images = Variable(images)
        labels = Variable(labels)
        outputs = model(images)
        loss = criterion(outputs, labels)
        losses.update(loss.data.cpu().numpy())

        jacc_index = eva.jaccard_similarity_coefficient(
            outputs.cpu().data.numpy().squeeze(),
            labels.cpu().data.numpy())
        dice_index = eva.dice_coefficient(outputs.cpu().data.numpy().squeeze(),
                                          labels.cpu().data.numpy())
        jaccars.update(jacc_index)
        dices.update(dice_index)

    return losses.avg, jaccars.avg, dices.avg
Ejemplo n.º 2
0
def evaluate(args, model, criterion, val_loader):
    model.eval()
    losses = AverageMeter()
    jaccars = AverageMeter()
    dices = AverageMeter()
    eva = Evaluation()

    for i, (images, labels) in enumerate(val_loader):
        if args.cuda:
            images = images.cuda()
            labels = labels.cuda()
            criterion = criterion.cuda()

        images = Variable(images)
        labels = Variable(labels)
        outputs = model(images)
        #         print('images(eval) : ', images.shape)
        #         print('labels(eval) : ', labels.shape)
        #        print('outputs(eval) : ', outputs.shape)

        outputs = outputs.view(1, -1)
        #         outputs = outputs.view(1, *outputs.size()[-2:])
        #        print('output_size_changed(eval) : ', outputs.size())

        labels = labels.view(1, -1)
        #         labels = labels.view(1, *labels.size())
        #         print('labels.view(changed) : ', labels.size())
        jacc_index = eva.jaccard_similarity_coefficient(
            outputs.cpu().data.numpy().squeeze(),
            labels.cpu().data.numpy())
        dice_index = eva.dice_coefficient(outputs.cpu().data.numpy().squeeze(),
                                          labels.cpu().data.numpy())
        #loss = criterion(outputs, labels)
        crit = criterion(outputs, labels)
        loss = crit  #- jacc_index

        #         print('no jacc loss : ', crit)
        #         print(' jacc loss : ', loss)

        losses.update(loss.data.cpu().numpy())
        jaccars.update(jacc_index)
        dices.update(dice_index)

    return losses.avg, jaccars.avg, dices.avg
Ejemplo n.º 3
0
class Experiments():
    def __init__(self):

        self.eval = Evaluation()
        self.board = Dashboad(8098)

    def error_hist(self, gtdir, resdir, imgprefix, plot=False):
        listGTFiles = [
            k.split('/')[-1].split('.')[0]
            for k in glob.glob(os.path.join(gtdir, '*.bmp'))
        ]

        filename_jacc = dict()
        filename_dice = dict()
        filename_sens = dict()
        filename_spec = dict()
        for currfile in tqdm(listGTFiles):
            if currfile.count('_') == 2:
                continue
            gt = np.array(Image.open(os.path.join(gtdir,
                                                  currfile + '.bmp'))) / 255
            res = np.array(
                Image.open(
                    os.path.join(resdir, currfile + '_' + imgprefix + '.bmp')))
            res[res > 10] = 255
            res /= 255

            jacc_index = self.eval.jaccard_similarity_coefficient(
                gt.squeeze(), res.squeeze())
            dice = self.eval.dice_coefficient(gt.squeeze(), res.squeeze())
            spec, sens, _ = self.eval.specificity_sensitivity(
                gt.squeeze(), res.squeeze())
            filename_jacc[currfile] = jacc_index
            filename_dice[currfile] = dice
            filename_sens[currfile] = sens
            filename_spec[currfile] = spec
        if plot:
            self.board.metric_bar(filename_jacc.values(),
                                  'Jaccard_' + imgprefix,
                                  nbins=20)
            self.board.metric_bar(filename_dice.values(),
                                  'Dice_' + imgprefix,
                                  nbins=20)
            self.board.metric_bar(filename_sens.values(),
                                  'Sens_' + imgprefix,
                                  nbins=20)
            self.board.metric_bar(filename_spec.values(),
                                  'Spec_' + imgprefix,
                                  nbins=20)

        return filename_jacc, filename_dice, filename_sens, filename_spec

    def get_failure_cases(self, args, threshold=0.5):
        list_dics = []

        for m in args.methods:
            result, _, _, _ = self.error_hist(args.gtdir,
                                              args.resdir,
                                              m,
                                              plot=False)
            list_dics.append(result)

        # Remove the images with jaccard greater than 0.5
        for d in list_dics:
            for k, v in d.items():
                if v > threshold:
                    del d[k]

        # Find the failure cases common between all methods
        common_failures = set.intersection(*tuple(
            set(d.keys()) for d in list_dics))
        return common_failures

    # TODO Remove val argument in function
    def make_grid(self, args, val=True, selected_filenames=None):
        bordersize = 2
        batch = np.empty((0, 3, 244, 324))
        num2sample = 60
        if selected_filenames is not None:
            filenames = list(selected_filenames)
            num2sample = len(filenames)
        else:
            if val:
                filenames = [
                    k.split('.')[-2].split('/')[-1]
                    for k in glob.glob(os.path.join(args.imgdir, "val_*"))
                ]
            if args.test:
                filenames = [
                    k.split('.')[-2].split('/')[-1]
                    for k in glob.glob(os.path.join(args.imgdir, "test_*.bmp"))
                ]
            else:
                filenames = [
                    k.split('.')[-2].split('/')[-1]
                    for k in glob.glob(os.path.join(args.imgdir, "*.bmp"))
                ]

        train_ind = np.random.choice(np.arange(0, len(filenames)),
                                     num2sample,
                                     replace=False)
        for i in range(train_ind.shape[0]):
            currfile = filenames[train_ind[i]]
            im = np.array(
                Image.open(os.path.join(args.imgdir,
                                        currfile + ".bmp")).convert('RGB'))
            # Applies a border on the top of the image
            im = cv2.copyMakeBorder(im,
                                    top=bordersize,
                                    bottom=bordersize,
                                    left=bordersize,
                                    right=bordersize,
                                    borderType=cv2.BORDER_CONSTANT,
                                    value=[255, 0, 0])
            im = cv2.putText(im,
                             currfile, (10, 20),
                             fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                             fontScale=0.75,
                             color=(255, 255, 255),
                             thickness=2)
            im = im.transpose((2, 0, 1))
            batch = np.append(batch, im[np.newaxis, :, :, :], axis=0)

            if val:
                im = np.array(
                    Image.open(os.path.join(args.gtdir,
                                            currfile + ".bmp")).convert('L'))
                im = np.repeat(im[:, :, np.newaxis], 3, axis=2)
                im = cv2.copyMakeBorder(im,
                                        top=bordersize,
                                        bottom=bordersize,
                                        left=bordersize,
                                        right=bordersize,
                                        borderType=cv2.BORDER_CONSTANT,
                                        value=[0, 255, 0])
                im = im.transpose((2, 0, 1))
                batch = np.append(batch, im[np.newaxis, :, :, :], axis=0)

            for m in args.methods:
                res = np.array(
                    Image.open(
                        os.path.join(args.resdir, currfile + "_" + m +
                                     ".bmp")).convert('L'))
                res = np.repeat(res[:, :, np.newaxis], 3, axis=2)
                res = cv2.copyMakeBorder(res,
                                         top=bordersize,
                                         bottom=bordersize,
                                         left=bordersize,
                                         right=bordersize,
                                         borderType=cv2.BORDER_CONSTANT,
                                         value=[0, 255, 200])

                # Writes the name of the models.
                res = cv2.putText(res,
                                  m, (10, 20),
                                  fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                  fontScale=0.75,
                                  color=(255, 255, 255),
                                  thickness=2)
                res = res.transpose((2, 0, 1))
                batch = np.append(batch, res[np.newaxis, :, :, :], axis=0)

        return batch
Ejemplo n.º 4
0
def train(args, model):
    #     board = Dashboad(args.visdom_port) #visdom
    tr_losses = AverageMeter()
    tLoader, vLoader = load_data(args)

    criterion = nn.BCELoss()
    #     criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          weight_decay=args.weight_decay,
                          momentum=0.99)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)

    for epoch in range(1, args.num_epochs + 1):
        scheduler.step()
        if epoch == 1:
            tr_loss, _, _ = evaluate(args, model, criterion, tLoader)
            vl_loss, vl_jacc, vl_dice = evaluate(args, model, criterion,
                                                 vLoader)

            # Draw the loss curves
            win = None
            #             win = board.loss_curves([tr_loss, vl_loss], epoch, win=win) #visdom
            print('[Initial TrainLoss: {0:.4f}]'
                  '\t[Initial ValidationLoss: {1:.4f}]'
                  '\t[Initial ValidationJaccard: {2:.4f}]'
                  '\t[Initial ValidationDice: {3:.4f}]'.format(
                      tr_loss, vl_loss, vl_jacc, vl_dice))
            print(
                '----------------------------------------------------------------------------------------------------'
                '--------------')

        for step, (images, labels) in enumerate(tLoader):
            model.train(True)
            if args.cuda:
                images = images.cuda()
                labels = labels.cuda()
                criterion = criterion.cuda()

            inputs = Variable(images)
            targets = Variable(labels)

            optimizer.zero_grad()
            outputs = model(inputs)

            eva = Evaluation()
            #            jacc = eva.jaccard_similarity_coefficient(outputs.cpu().data.numpy().squeeze(),
            #                                                        labels.cpu().data.numpy())
            output_np = outputs.cpu().data.numpy().squeeze()

            targets_np = targets.cpu().data.numpy()

            dice = eva.dice_coefficient(output_np, targets_np)

            crit = criterion(outputs, targets)
            loss = crit - dice

            print('bceloss : ', crit.data)
            print('bceloss - dice : ', loss.data)

            loss.backward()
            optimizer.step()

            tr_losses.update(loss.data.cpu().numpy())

        if epoch % args.log_step == 0:
            vl_loss, vl_jacc, vl_dice = evaluate(args, model, criterion,
                                                 vLoader)
            print('[Epoch: {0:02}/{1:02}]'
                  '\t[TrainLoss: {2:.4f}]'
                  '\t[ValidationLoss: {3:.4f}]'
                  '\t[ValidationJaccard: {4:.4f}]'
                  '\t[ValidationDice: {5:.4f}]'.format(epoch, args.num_epochs,
                                                       tr_losses.avg, vl_loss,
                                                       vl_jacc, vl_dice)),

            filename = "weights/{0}-{1:02}.pth".format(args.model, epoch)
            torch.save(model.state_dict(), filename)
            print('  [Snapshot]')
        else:
            vl_loss, vl_jacc, vl_dice = evaluate(args, model, criterion,
                                                 vLoader)
            print('[Epoch: {0:02}/{1:02}]'
                  '\t[TrainLoss: {2:.4f}]'
                  '\t[ValidationLoss: {3:.4f}]'
                  '\t[ValidationJaccard: {4:.4f}]'
                  '\t[ValidationDice: {5:.4f}]'.format(epoch, args.num_epochs,
                                                       tr_losses.avg, vl_loss,
                                                       vl_jacc, vl_dice))