def main():
    glassLabel = 255
    thicknessForEvaluation = 20
    objects=('bank', 'bottle', 'bucket', 'glass', 'wineglass')
    basePath = '/media/2Tb/transparentBases/fixedOnTable/base/'

    base = utils.TODBase(basePath, objects)
    patchSizes = (5, 10, 20, 40, 100, 200, 400)
    fks = (10, 20, 50, 100, 200, 400, 800, 1600)

    allSamples = base.getSamples()
    evaluator = utils.Evaluator(len(patchSizes))
#    evaluator = utils.Evaluator(len(fks))
    for sample in allSamples:
        print sample.imageFilename
 
        groundTruthMask = sample.getGlassMask()
        if (groundTruthMask == None):
            continue
        [groundTruthDT, groundTruthEdgelsCount] = utils.mask2dtAndEdgelsCount(groundTruthMask)
        glassMask = groundTruthMask.copy()
        glassContours, hierarchy = cv2.findContours(glassMask, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)
        glassContoursImage = np.zeros(glassMask.shape, np.uint8)
        cv2.drawContours(glassContoursImage, glassContours, -1, (255), thicknessForEvaluation)

        image = sample.getImage()
        for idx, size in enumerate(patchSizes):
            segmentation = computeVeksler(image, size)
#        for idx, k in enumerate(fks):
#            segmentation = computeFelzenszwalb(image, k)
            minSegmentation = cv2.erode(segmentation.astype(np.float32), None)
            maxSegmentation = cv2.dilate(segmentation.astype(np.float32), None)
            boundaries = 255 * (maxSegmentation - minSegmentation != 0).astype(np.uint8)
            boundaries[glassContoursImage == 0] = 0
            boundariesEdgelsCount = len(np.nonzero(boundaries == 255)[0])
            boundariesDT = cv2.distanceTransform(~boundaries, cv2.cv.CV_DIST_L1, cv2.cv.CV_DIST_MASK_PRECISE)

            [recallEdgels, precisionEdgels] = utils.computeRecallPrecisionEdgels(groundTruthDT, boundariesDT)
            evaluator.addObservation(idx, recallEdgels, groundTruthEdgelsCount, precisionEdgels, boundariesEdgelsCount)
        evaluator.computeStatistics()
        evaluator.printStatistics()


    evaluator.plot('veksler')
#    evaluator.plot('felz')
    utils.plt.show()
예제 #2
0
def main(args):
    num_classes = 1
    size = [192, 192]  # size of images
    thresh = 0.5

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([3, size[0], size[1]])
    out = SegNet(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(net, eddl.adam(0.0001), ["cross_entropy"],
               ["mean_squared_error"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "skin_lesion_segmentation")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
        ecvl.AugMirror(0.5),
        ecvl.AugFlip(0.5),
        ecvl.AugRotate([-180, 180]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGammaContrast([0.5, 1.5]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5)
    ])
    validation_augs = ecvl.SequentialAugmentationContainer(
        [ecvl.AugResizeDim(size)])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_augs, None])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[0], size[1]])
    num_samples_train = len(d.GetSplit())
    num_batches_train = num_samples_train // args.batch_size
    d.SetSplit(ecvl.SplitType.validation)
    num_samples_validation = len(d.GetSplit())
    num_batches_validation = num_samples_validation // args.batch_size
    indices = list(range(args.batch_size))

    evaluator = utils.Evaluator()
    print("Starting training")
    for e in range(args.epochs):
        print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs),
              flush=True)
        d.SetSplit(ecvl.SplitType.training)
        eddl.reset_loss(net)
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        d.ResetAllBatches()
        for b in range(num_batches_train):
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                e + 1, args.epochs, b + 1, num_batches_train),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            y.div_(255.0)
            tx, ty = [x], [y]
            eddl.train_batch(net, tx, ty, indices)
            eddl.print_loss(net, b)
            print()

        print("Saving weights")
        eddl.save(net, "isic_segmentation_checkpoint_epoch_%s.bin" % e, "bin")

        d.SetSplit(ecvl.SplitType.validation)
        evaluator.ResetEval()
        print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True)
        for b in range(num_batches_validation):
            n = 0
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) ".format(
                e + 1, args.epochs, b + 1, num_batches_validation),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            y.div_(255.0)
            eddl.forward(net, [x])
            output = eddl.getOutput(out_sigm)
            for k in range(args.batch_size):
                img = output.select([str(k)])
                gt = y.select([str(k)])
                img_np = np.array(img, copy=False)
                gt_np = np.array(gt, copy=False)
                iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
                print("- IoU: %.6g " % iou, end="", flush=True)
                if args.out_dir:
                    # C++ BinaryIoU modifies image as a side effect
                    img_np[img_np >= thresh] = 1
                    img_np[img_np < thresh] = 0
                    img_t = ecvl.TensorToView(img)
                    img_t.colortype_ = ecvl.ColorType.GRAY
                    img_t.channels_ = "xyc"
                    img.mult_(255.)
                    # orig_img
                    orig_img = x.select([str(k)])
                    orig_img.mult_(255.)
                    orig_img_t = ecvl.TensorToImage(orig_img)
                    orig_img_t.colortype_ = ecvl.ColorType.BGR
                    orig_img_t.channels_ = "xyc"

                    tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                    ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                    ecvl.ConnectedComponentsLabeling(tmp, labels)
                    ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                    contours = ecvl.FindContours(tmp)
                    ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                    tmp_np = np.array(tmp, copy=False)
                    for cseq in contours:
                        for c in cseq:
                            tmp_np[c[0], c[1], 0] = 0
                            tmp_np[c[0], c[1], 1] = 0
                            tmp_np[c[0], c[1], 2] = 255
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s.png" % head
                    output_fn = os.path.join(args.out_dir, bname)
                    ecvl.ImWrite(output_fn, tmp)
                    if e == 0:
                        gt_t = ecvl.TensorToView(gt)
                        gt_t.colortype_ = ecvl.ColorType.GRAY
                        gt_t.channels_ = "xyc"
                        gt.mult_(255.)
                        gt_filename = d.samples_[d.GetSplit()[n]].label_path_
                        gt_fn = os.path.join(args.out_dir,
                                             os.path.basename(gt_filename))
                        ecvl.ImWrite(gt_fn, gt_t)
                n += 1
            print()
        print("MIoU: %.6g" % evaluator.MeanMetric())
예제 #3
0
def main(args):
    num_classes = 1
    size = [192, 192]  # size of images
    thresh = 0.5

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([3, size[0], size[1]])
    out = SegNet(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(net, eddl.adam(0.0001), ["cross_entropy"],
               ["mean_squared_error"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "skin_lesion_segmentation_inference")

    if not os.path.exists(args.ckpts):
        raise RuntimeError('Checkpoint "{}" not found'.format(args.ckpts))
    eddl.load(net, args.ckpts, "bin")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    dataset_augs = ecvl.DatasetAugmentations([training_augs, None, test_augs])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[0], size[1]])
    print("Testing")
    d.SetSplit(ecvl.SplitType.test)
    num_samples_test = len(d.GetSplit())
    num_batches_test = num_samples_test // args.batch_size

    evaluator = utils.Evaluator()
    evaluator.ResetEval()
    for b in range(num_batches_test):
        n = 0
        print("Batch {:d}/{:d} ".format(b + 1, num_batches_test),
              end="",
              flush=True)
        d.LoadBatch(x, y)
        x.div_(255.0)
        y.div_(255.0)
        eddl.forward(net, [x])
        output = eddl.getOutput(out_sigm)
        for k in range(args.batch_size):
            img = output.select([str(k)])
            gt = y.select([str(k)])
            img_np, gt_np = np.array(img, copy=False), np.array(gt, copy=False)
            iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
            print("- IoU: %.6g " % iou, end="", flush=True)
            if args.out_dir:
                # C++ BinaryIoU modifies image as a side effect
                img_np[img_np >= thresh] = 1
                img_np[img_np < thresh] = 0
                img_t = ecvl.TensorToView(img)
                img_t.colortype_ = ecvl.ColorType.GRAY
                img_t.channels_ = "xyc"
                img.mult_(255.)
                # orig_img
                orig_img = x.select([str(k)])
                orig_img.mult_(255.)
                orig_img_t = ecvl.TensorToImage(orig_img)
                orig_img_t.colortype_ = ecvl.ColorType.BGR
                orig_img_t.channels_ = "xyc"

                tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                ecvl.ConnectedComponentsLabeling(tmp, labels)
                ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                contours = ecvl.FindContours(tmp)
                ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                tmp_np = np.array(tmp, copy=False)
                for cseq in contours:
                    for c in cseq:
                        tmp_np[c[0], c[1], 0] = 0
                        tmp_np[c[0], c[1], 1] = 0
                        tmp_np[c[0], c[1], 2] = 255

                filename = d.samples_[d.GetSplit()[n]].location_[0]
                head, tail = os.path.splitext(os.path.basename(filename))
                bname = "%s.png" % head
                output_fn = os.path.join(args.out_dir, bname)
                ecvl.ImWrite(output_fn, tmp)

                gt_t = ecvl.TensorToView(gt)
                gt_t.colortype_ = ecvl.ColorType.GRAY
                gt_t.channels_ = "xyc"
                gt.mult_(255.)
                gt_filename = d.samples_[d.GetSplit()[n]].label_path_
                gt_fn = os.path.join(args.out_dir,
                                     os.path.basename(gt_filename))
                ecvl.ImWrite(gt_fn, gt_t)
            n += 1
        print()
    print("MIoU: %.6g" % evaluator.MeanMetric())
예제 #4
0
def main(args):
    num_classes = 1
    size = [512, 512]  # size of images
    thresh = 0.5
    best_dice = -1

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([1, size[0], size[1]])
    out = SegNetBN(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(net, eddl.adam(0.0001), ["cross_entropy"],
               ["mean_squared_error"],
               eddl.CS_GPU([1], mem='low_mem') if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "pneumothorax_segmentation_training")

    if args.ckpts and os.path.exists(args.ckpts):
        print("Loading checkpoints '{}'".format(args.ckpts))
        eddl.load(net, args.ckpts, 'bin')

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
        ecvl.AugMirror(0.5),
        ecvl.AugRotate([-10, 10]),
        ecvl.AugBrightness([0, 30]),
        ecvl.AugGammaContrast([0, 3]),
    ])
    validation_augs = ecvl.SequentialAugmentationContainer(
        [ecvl.AugResizeDim(size)])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_augs, None])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs,
                       ecvl.ColorType.GRAY)
    # Prepare tensors which store batch
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[0], size[1]])

    # Retrieve indices of images with a black ground truth
    # which are not include in a split
    train_split = d.GetSplit(ecvl.SplitType.training)
    val_split = d.GetSplit(ecvl.SplitType.validation)
    test_split = d.GetSplit(ecvl.SplitType.test)
    all_split = set(train_split + val_split + test_split)

    images_list = set(range(len(d.samples_)))

    # Obtain images with black ground truth
    black_images = images_list - all_split

    # Add a 25% of training samples with black ground truth.
    num_samples_train = math.floor(len(train_split) * 1.25)
    num_batches_train = num_samples_train // args.batch_size

    # Add a 25% of validation samples with black ground truth.
    num_samples_validation = math.floor(len(val_split) * 1.25)
    num_batches_validation = num_samples_validation // args.batch_size

    black_images = list(black_images)
    black_training = black_images[0:-(num_samples_validation - len(val_split))]
    black_validation = black_images[-(num_samples_validation -
                                      len(val_split)):]
    indices = list(range(args.batch_size))

    evaluator = utils.Evaluator()
    print("Starting training")
    for e in range(args.epochs):
        print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs),
              flush=True)
        d.SetSplit(ecvl.SplitType.training)
        eddl.reset_loss(net)
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        random.shuffle(black_training)

        d.ResetAllBatches()
        # Indices to track mask and black vector in PneumothoraxLoadBatch
        m_i = 0
        b_i = 0
        for i, b in enumerate(range(num_batches_train)):
            d, images, labels, _, m_i, b_i = PneumothoraxLoadBatch(
                d, black_training, m_i, b_i)
            x, y = fill_tensors(images, labels, x, y)
            x.div_(255.0)
            y.div_(255.0)
            eddl.train_batch(net, [x], [y], indices)
            if i % args.log_interval == 0:
                print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                    e + 1, args.epochs, b + 1, num_batches_train),
                      end="",
                      flush=True)
                eddl.print_loss(net, b)
                print()

        d.SetSplit(ecvl.SplitType.validation)
        evaluator.ResetEval()
        print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True)
        m_i = 0
        b_i = 0
        for b in range(num_batches_validation):
            n = 0
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) ".format(
                e + 1, args.epochs, b + 1, num_batches_validation),
                  end="",
                  flush=True)
            d, images, labels, names, m_i, b_i = PneumothoraxLoadBatch(
                d, black_validation, m_i, b_i)
            x, y = fill_tensors(images, labels, x, y)
            x.div_(255.0)
            y.div_(255.0)
            eddl.forward(net, [x])
            output = eddl.getOutput(out_sigm)

            # Compute Dice metric and optionally save the output images
            for k in range(args.batch_size):
                pred = output.select([str(k)])
                gt = y.select([str(k)])
                pred_np = np.array(pred, copy=False)
                gt_np = np.array(gt, copy=False)
                # DiceCoefficient modifies image as a side effect
                dice = evaluator.DiceCoefficient(pred_np, gt_np, thresh=thresh)
                print("- Dice: {:.6f} ".format(dice), end="", flush=True)

                if args.out_dir:
                    # Save original image fused together with prediction and
                    # ground truth
                    pred_np *= 255
                    pred_ecvl = ecvl.TensorToImage(pred)
                    pred_ecvl.colortype_ = ecvl.ColorType.GRAY
                    pred_ecvl.channels_ = "xyc"
                    ecvl.ResizeDim(pred_ecvl, pred_ecvl, (1024, 1024),
                                   ecvl.InterpolationType.nearest)

                    filename_gt = names[n + 1]
                    gt_ecvl = ecvl.ImRead(filename_gt,
                                          ecvl.ImReadMode.GRAYSCALE)

                    filename = names[n]

                    # Image as BGR
                    img_ecvl = ecvl.ImRead(filename)
                    ecvl.Stack([img_ecvl, img_ecvl, img_ecvl], img_ecvl)
                    img_ecvl.channels_ = "xyc"
                    img_ecvl.colortype_ = ecvl.ColorType.BGR
                    image_np = np.array(img_ecvl, copy=False)
                    pred_np = np.array(pred_ecvl, copy=False)
                    gt_np = np.array(gt_ecvl, copy=False)

                    pred_np = pred_np.squeeze()
                    gt_np = gt_np.squeeze()
                    # Prediction summed in R channel
                    image_np[:, :, -1] = np.where(pred_np == 255, pred_np,
                                                  image_np[:, :, -1])
                    # Ground truth summed in G channel
                    image_np[:, :, 1] = np.where(gt_np == 255, gt_np,
                                                 image_np[:, :, 1])

                    n += 2
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "{}.png".format(head)
                    filepath = os.path.join(args.out_dir, bname)
                    ecvl.ImWrite(filepath, img_ecvl)

            print()

        mean_dice = evaluator.MeanMetric()
        if mean_dice > best_dice:
            print("Saving weights")
            eddl.save(
                net, "pneumothorax_segnetBN_adam_lr_0.0001_"
                "loss_ce_size_512_{}.bin".format(e + 1), "bin")
            best_dice = mean_dice
        print("Mean Dice Coefficient: {:.6g}".format(mean_dice))
예제 #5
0
def main():
    # SET THE PARAMETERS
    parser = argparse.ArgumentParser()
    parser.add_argument('--lr', type=float, default=1e-3,
                        help='Initial learning rate (default: 1e-3)')
    parser.add_argument('--epochs', type=int, default=100,
                        help='Maximum number of epochs (default: 100)')
    parser.add_argument('--patience', type=int, default=10,
                        help='lr scheduler patience (default: 10)')
    parser.add_argument('--batch', type=int, default=4,
                        help='Batch size (default: 4)')
    parser.add_argument('--name', type=str, default='Prueba',
                        help='Name of the current test (default: Prueba)')

    parser.add_argument('--load_model', type=str, default='best_acc',
                        help='Weights to load (default: best_acc)')
    parser.add_argument('--test', action='store_false', default=True,
                        help='Only test the model')
    parser.add_argument('--resume', action='store_true', default=False,
                        help='Continue training a model')
    parser.add_argument('--load_path', type=str, default=None,
                        help='Name of the folder with the pretrained model')
    parser.add_argument('--ft', action='store_true', default=False,
                        help='Fine-tune a model')
    parser.add_argument('--psFactor', type=float, default=1,
                        help='Fine-tune a model')

    parser.add_argument('--gpu', type=str, default='0',
                        help='GPU(s) to use (default: 0)')
    args = parser.parse_args()

    training = args.test
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    if args.ft:
        args.resume = True

    args.patch_size = [int(128*args.psFactor), int(128*args.psFactor), int(96*args.psFactor)]
    args.num_classes = 2

    # PATHS AND DIRS
    save_path = os.path.join('TRAIN', args.name)
    out_path = os.path.join(save_path, 'Val')
    load_path = save_path
    if args.load_path is not None:
        load_path = os.path.join('TRAIN/', args.load_path)

    root = '../../Data/Heart'
    train_file = 'train_paths.csv'
    test_file = 'val_paths.csv'

    if not os.path.exists(save_path):
        os.makedirs(save_path)
        os.makedirs(out_path)

    # SEEDS
    np.random.seed(12345)
    torch.manual_seed(12345)

    cudnn.deterministic = False
    cudnn.benchmark = True

    # CREATE THE NETWORK ARCHITECTURE
    model = GNet(num_classes=args.num_classes, backbone='xception')
    print('---> Number of params: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    model = model.cuda()

    optimizer = optim.Adam(model.parameters(), lr=args.lr,
                           weight_decay=1e-5, amsgrad=True)

    model, optimizer = amp.initialize(model, optimizer, opt_level="O1")

    annealing = optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, verbose=True, patience=args.patience, threshold=0.001,
        factor=0.5, threshold_mode="abs")

    criterion = utils.segmentation_loss(alpha=1)
    metrics = utils.Evaluator(args.num_classes)

    # LOAD A MODEL IF NEEDED (TESTING OR CONTINUE TRAINING)
    args.epoch = 0
    best_acc = 0
    if args.resume or not training:
        name = 'epoch_' + args.load_model + '.pth.tar'
        checkpoint = torch.load(
            os.path.join(load_path, name),
            map_location=lambda storage, loc: storage)
        args.epoch = checkpoint['epoch']
        best_acc = checkpoint['best_acc']
        args.lr = checkpoint['lr']

        print('Loading model and optimizer {}.'.format(args.epoch))

        amp.load_state_dict(checkpoint['amp'])
        model.load_state_dict(checkpoint['state_dict'], strict=(not args.ft))
        if not args.ft:
            optimizer.load_state_dict(checkpoint['optimizer'])

    # DATALOADERS
    train_loader = Read_data.MRIdataset(True, train_file, root,
                                        args.patch_size)
    val_loader = Read_data.MRIdataset(True, test_file, root, args.patch_size,
                                      val=True)
    test_loader = Read_data.MRIdataset(False, test_file, root, args.patch_size)

    train_loader = DataLoader(train_loader, shuffle=True, sampler=None,
                              batch_size=args.batch, num_workers=10)
    val_loader = DataLoader(val_loader, shuffle=False, sampler=None,
                            batch_size=args.batch * 2, num_workers=10)
    test_loader = DataLoader(test_loader, shuffle=False, sampler=None,
                             batch_size=1, num_workers=0)

    # TRAIN THE MODEL
    is_best = True
    if training:
        torch.cuda.empty_cache()
        out_file = open(os.path.join(save_path, 'progress.csv'), 'a+')

        for epoch in range(args.epoch + 1, args.epochs + 1):
            args.epoch = epoch
            lr = utils.get_lr(optimizer)
            print('--------- Starting Epoch {} --> {} ---------'.format(
                epoch, time.strftime("%H:%M:%S")))
            print('Learning rate:', lr)

            train_loss = train(args, model, train_loader, optimizer, criterion)
            val_loss, acc = val(args, model, val_loader, criterion, metrics)

            acc = acc.item()
            out_file.write('{},{},{},{}\n'.format(
                args.epoch, train_loss, val_loss, acc))
            out_file.flush()

            annealing.step(val_loss)
            save_graph(save_path)

            is_best = best_acc < acc
            best_acc = max(best_acc, acc)

            state = {
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
                'amp': amp.state_dict(),
                'loss': [train_loss, val_loss],
                'lr': lr,
                'acc': acc,
                'best_acc': best_acc}

            checkpoint = epoch % 20 == 0
            utils.save_epoch(state, save_path, epoch,
                             checkpoint=checkpoint, is_best=is_best)

            if lr <= (args.lr / (10 ** 3)):
                print('Stopping training: learning rate is too small')
                break
        out_file.close()

    # TEST THE MODEL
    if not is_best:
        checkpoint = torch.load(
            os.path.join(save_path, 'epoch_best_acc.pth.tar'),
            map_location=lambda storage, loc: storage)
        model.load_state_dict(checkpoint['state_dict'])
        args.epoch = checkpoint['epoch']
        print('Testing epoch with best dice ({}: acc {})'.format(
            args.epoch, checkpoint['acc']))

    test(args, model, test_loader, out_path, test_file)
예제 #6
0
def evaluate(expression: str, at: Union[float, List[float]]) -> List[float]:
    evaluator = utils.Evaluator(expression, at)
    return evaluator.results
def main(args):
    num_classes = 1
    size = [512, 512]  # size of images
    thresh = 0.5

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([1, size[0], size[1]])
    out = SegNetBN(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(
        net,
        eddl.adam(0.0001),
        ["cross_entropy"],
        ["mean_squared_error"],
        eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU()
    )
    eddl.summary(net)
    eddl.setlogfile(net, "pneumothorax_segmentation_inference")

    if not os.path.exists(args.ckpts):
        raise RuntimeError('Checkpoint "{}" not found'.format(args.ckpts))
    eddl.load(net, args.ckpts, "bin")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    dataset_augs = ecvl.DatasetAugmentations([training_augs, None, test_augs])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs,
                       ecvl.ColorType.GRAY)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    print("Testing")
    d.SetSplit(ecvl.SplitType.test)
    num_samples_test = len(d.GetSplit())
    num_batches_test = num_samples_test // args.batch_size

    evaluator = utils.Evaluator()
    evaluator.ResetEval()
    for b in range(num_batches_test):
        n = 0
        print("Batch {:d}/{:d} ".format(
            b + 1, num_batches_test), end="", flush=True)
        d.LoadBatch(x)
        x.div_(255.0)
        eddl.forward(net, [x])
        if args.out_dir:
            output = eddl.getOutput(out_sigm)
            for k in range(args.batch_size):
                img = output.select([str(k)])
                img_I = ecvl.TensorToImage(img)
                img_I.colortype_ = ecvl.ColorType.GRAY
                img_I.channels_ = "xyc"
                ecvl.Threshold(img_I, img_I, thresh, 255)

                filename = d.samples_[d.GetSplit()[n]].location_[0]
                head, tail = os.path.splitext(os.path.basename(filename))
                bname = "{}.png".format(head)
                output_fn = os.path.join(args.out_dir, bname)
                ecvl.ImWrite(output_fn, img_I)

                n += 1
        print()
예제 #8
0
        tempfile.mkstemp('.png', 'segmentation_')
        for scale in propagationScalings
    ]

    (segmentationListFD,
     segmentationListFilename) = tempfile.mkstemp('.txt',
                                                  'segmentationList_',
                                                  text=True)
    segmentationListFile = open(segmentationListFilename, 'w')
    for (fd, filename) in segmentationTemporaryFiles:
        segmentationListFile.write(filename + '\n')
    segmentationListFile.close()

    testFilenames = open(testListFilename, 'r').read().splitlines()

    areaEvaluator = utils.Evaluator(len(segmentationTemporaryFiles))
    boundaryEvaluator = utils.Evaluator(len(segmentationTemporaryFiles))

    for imageFilename in testFilenames:
        print imageFilename

        match = re.search('_(0*([1-9][0-9]*))\.', imageFilename)
        if (match == None):
            match = re.search('_(0000(0))\.', imageFilename)
        assert match != None, 'Cannot parse an image index'

        fullImageIndex = match.group(1)
        imageIndex = match.group(2)

        groundTruthFilename = imageFilename.replace(
            '/image_', '/' + groundTruthPrefix + '_')