def UNITOPatho(resolution = 800):

    if resolution == 800:
        #for 800 micron resolution images
        image_size = 1812
        ds_file = 'unitopath-public/800/unitopath-public-800.yml'
    elif resolution == 7000:
        #for 7000 micron resolution images
        image_size = 15855
        ds_file = 'unitopath-public/7000/unitopath-public-7000.yml'
    else:
        print(f'Resolution must be 800 or 7000, got {resolution}')
        exit(1)

    base_augmention = []
    while image_size//2 > size[0]:
        base_augmention.append(ecvl.AugResizeDim([image_size//2,image_size//2]))
        image_size = image_size//2
    base_augmention.append(ecvl.AugResizeDim(size))

    #Augmentation examples
    training_augs = ecvl.SequentialAugmentationContainer( base_augmention + [
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugRotate([-180, 180])
    ])

    #Augmentationd for training, validation and test sets
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, None, None]
    )

    return ecvl.DLDataset(ds_file, batch_size, dataset_augs)
Esempio n. 2
0
def main(args):
    img = ecvl.ImRead(args.in_img)
    augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugRotate([-5, 5]),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugGammaContrast([3, 5]),
        ecvl.AugAdditiveLaplaceNoise([0, 0.2 * 255]),
        ecvl.AugCoarseDropout([0, 0.55], [0.02, 0.1], 0.5),
        ecvl.AugAdditivePoissonNoise([0, 40]),
        ecvl.AugResizeDim([500, 500]),
    ])
    ecvl.AugmentationParam.SetSeed(0)
    augs.Apply(img)
    print("Executing ImageToTensor")
    t = ecvl.ImageToTensor(img)
    t.div_(128)
    t.mult_(128)
    print("Executing TensorToImage")
    img = ecvl.TensorToImage(t)
    print("Executing TensorToView")
    ecvl.TensorToView(t)

    _ = ecvl.AugmentationFactory.create(AUG_TXT)

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugRotate([-5, 5]),
        ecvl.AugAdditiveLaplaceNoise([0, 0.2 * 255]),
        ecvl.AugCoarseDropout([0, 0.55], [0.02, 0.1], 0),
        ecvl.AugAdditivePoissonNoise([0, 40]),
        ecvl.AugResizeDim([30, 30]),
    ])
    test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim([30, 30]),
    ])
    ds_augs = ecvl.DatasetAugmentations([training_augs, None, test_augs])

    batch_size = 64
    print("Creating a DLDataset")
    d = ecvl.DLDataset(args.in_ds, batch_size, ds_augs, ecvl.ColorType.GRAY)
    print("Create x and y")
    x = Tensor(
        [batch_size, d.n_channels_, d.resize_dims_[0], d.resize_dims_[1]])
    y = Tensor([batch_size, len(d.classes_)])

    # Load a batch of d.batch_size_ images into x and corresponding labels
    # into y. Images are resized to the dimensions specified in the
    # augmentations chain
    print("Executing LoadBatch on training set")
    d.LoadBatch(x, y)

    # Change colortype and channels
    img = ecvl.TensorToImage(x)
    img.colortype_ = ecvl.ColorType.GRAY
    img.channels_ = "xyc"

    # Switch to Test split and load a batch of images
    print("Executing LoadBatch on test set")
    d.SetSplit(ecvl.SplitType.test)
    d.LoadBatch(x, y)
Esempio n. 3
0
def main(args):
    head, ext = os.path.splitext(os.path.basename(args.in_fn))
    img = ecvl.ImRead(args.in_fn)
    c = ecvl.SequentialAugmentationContainer([ecvl.AugFlip(0.5)])
    da = ecvl.DatasetAugmentations([c, None, None])
    # some output images should be flipped
    for i in range(10):
        da.Apply(ecvl.SplitType.training, img)
        out_fn = "%s_flip_%d%s" % (head, i, ext)
        print("writing", out_fn)
        ecvl.ImWrite(out_fn, img)
Esempio n. 4
0
def get_net(net_name='vgg16',
            in_size=[256, 256],
            num_classes=2,
            lr=1e-5,
            augs=False,
            gpus=[1],
            lsb=1,
            init=eddl.HeNormal,
            dropout=None,
            l2_reg=None):

    ## Network definition
    in_ = eddl.Input([3, in_size[0], in_size[1]])

    if net_name == 'vgg16':
        out = models.VGG16_promort(in_,
                                   num_classes,
                                   init=init,
                                   l2_reg=l2_reg,
                                   dropout=dropout)
    else:
        print('model %s not available' % net_name)
        sys.exit(-1)

    net = eddl.Model([in_], [out])
    eddl.build(
        net,
        eddl.rmsprop(lr),
        ["soft_cross_entropy"],
        ["categorical_accuracy"],
        #eddl.CS_GPU([1,1], mem="low_mem") if gpu else eddl.CS_CPU()
        eddl.CS_GPU(gpus, mem="low_mem", lsb=lsb) if gpus else eddl.CS_CPU()
        #eddl.CS_GPU(gpus, mem="low_mem") if gpus else eddl.CS_CPU()
    )

    eddl.summary(net)
    eddl.setlogfile(net, "promort_VGG16_classification")

    if augs:
        ## Set augmentations
        training_augs = ecvl.SequentialAugmentationContainer(
            [ecvl.AugMirror(.5),
             ecvl.AugFlip(.5),
             ecvl.AugRotate([-10, 10])])

        validation_augs = ecvl.SequentialAugmentationContainer([])

        dataset_augs = [training_augs, validation_augs, None]

    else:
        dataset_augs = [None, None, None]

    return net, dataset_augs
Esempio n. 5
0
def main(args):
    num_classes = 1
    size = [192, 192]  # size of images
    thresh = 0.5

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([3, size[0], size[1]])
    out = SegNet(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(net, eddl.adam(0.0001), ["cross_entropy"],
               ["mean_squared_error"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "skin_lesion_segmentation")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
        ecvl.AugMirror(0.5),
        ecvl.AugFlip(0.5),
        ecvl.AugRotate([-180, 180]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGammaContrast([0.5, 1.5]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5)
    ])
    validation_augs = ecvl.SequentialAugmentationContainer(
        [ecvl.AugResizeDim(size)])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_augs, None])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[0], size[1]])
    num_samples_train = len(d.GetSplit())
    num_batches_train = num_samples_train // args.batch_size
    d.SetSplit(ecvl.SplitType.validation)
    num_samples_validation = len(d.GetSplit())
    num_batches_validation = num_samples_validation // args.batch_size
    indices = list(range(args.batch_size))

    evaluator = utils.Evaluator()
    print("Starting training")
    for e in range(args.epochs):
        print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs),
              flush=True)
        d.SetSplit(ecvl.SplitType.training)
        eddl.reset_loss(net)
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        d.ResetAllBatches()
        for b in range(num_batches_train):
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                e + 1, args.epochs, b + 1, num_batches_train),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            y.div_(255.0)
            tx, ty = [x], [y]
            eddl.train_batch(net, tx, ty, indices)
            eddl.print_loss(net, b)
            print()

        print("Saving weights")
        eddl.save(net, "isic_segmentation_checkpoint_epoch_%s.bin" % e, "bin")

        d.SetSplit(ecvl.SplitType.validation)
        evaluator.ResetEval()
        print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True)
        for b in range(num_batches_validation):
            n = 0
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) ".format(
                e + 1, args.epochs, b + 1, num_batches_validation),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            y.div_(255.0)
            eddl.forward(net, [x])
            output = eddl.getOutput(out_sigm)
            for k in range(args.batch_size):
                img = output.select([str(k)])
                gt = y.select([str(k)])
                img_np = np.array(img, copy=False)
                gt_np = np.array(gt, copy=False)
                iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
                print("- IoU: %.6g " % iou, end="", flush=True)
                if args.out_dir:
                    # C++ BinaryIoU modifies image as a side effect
                    img_np[img_np >= thresh] = 1
                    img_np[img_np < thresh] = 0
                    img_t = ecvl.TensorToView(img)
                    img_t.colortype_ = ecvl.ColorType.GRAY
                    img_t.channels_ = "xyc"
                    img.mult_(255.)
                    # orig_img
                    orig_img = x.select([str(k)])
                    orig_img.mult_(255.)
                    orig_img_t = ecvl.TensorToImage(orig_img)
                    orig_img_t.colortype_ = ecvl.ColorType.BGR
                    orig_img_t.channels_ = "xyc"

                    tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                    ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                    ecvl.ConnectedComponentsLabeling(tmp, labels)
                    ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                    contours = ecvl.FindContours(tmp)
                    ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                    tmp_np = np.array(tmp, copy=False)
                    for cseq in contours:
                        for c in cseq:
                            tmp_np[c[0], c[1], 0] = 0
                            tmp_np[c[0], c[1], 1] = 0
                            tmp_np[c[0], c[1], 2] = 255
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s.png" % head
                    output_fn = os.path.join(args.out_dir, bname)
                    ecvl.ImWrite(output_fn, tmp)
                    if e == 0:
                        gt_t = ecvl.TensorToView(gt)
                        gt_t.colortype_ = ecvl.ColorType.GRAY
                        gt_t.channels_ = "xyc"
                        gt.mult_(255.)
                        gt_filename = d.samples_[d.GetSplit()[n]].label_path_
                        gt_fn = os.path.join(args.out_dir,
                                             os.path.basename(gt_filename))
                        ecvl.ImWrite(gt_fn, gt_t)
                n += 1
            print()
        print("MIoU: %.6g" % evaluator.MeanMetric())
Esempio n. 6
0
def main(args):
    if not ecvl.ECVL_EDDL:
        print("No EDDL support - quitting")
        sys.exit(0)
    img = ecvl.ImRead(args.in_img)
    augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugCenterCrop(),  # Make image square
        ecvl.AugRotate([-5, 5]),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugGammaContrast([3, 5]),
        ecvl.AugAdditiveLaplaceNoise([0, 0.2 * 255]),
        ecvl.AugCoarseDropout([0, 0.55], [0.02, 0.1], 0.5),
        ecvl.AugAdditivePoissonNoise([0, 40]),
        ecvl.AugResizeDim([500, 500]),
        ecvl.AugCenterCrop([224, 224]),
        ecvl.AugToFloat32(255),
        ecvl.AugNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])
    ecvl.AugmentationParam.SetSeed(0)
    print("Applying augmentations")
    augs.Apply(img)
    print("Executing ImageToTensor")
    t = ecvl.ImageToTensor(img)
    t.div_(128)
    t.mult_(128)
    print("Executing TensorToImage")
    img = ecvl.TensorToImage(t)
    print("Executing TensorToView")
    ecvl.TensorToView(t)

    print("Applying augmentations (from text)")
    newdeal_augs = ecvl.AugmentationFactory.create(AUG_TXT)
    newdeal_augs.Apply(img)

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugRotate([-5, 5]),
        ecvl.AugAdditiveLaplaceNoise([0, 0.2 * 255]),
        ecvl.AugCoarseDropout([0, 0.55], [0.02, 0.1], 0),
        ecvl.AugAdditivePoissonNoise([0, 40]),
        ecvl.AugResizeDim([30, 30]),
        ecvl.AugToFloat32(255),
        ecvl.AugNormalize(0.449, 0.226),  # mean of imagenet stats
    ])
    test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim([30, 30]),
        ecvl.AugToFloat32(255),
        ecvl.AugNormalize(0.449, 0.226),  # mean of imagenet stats
    ])
    # number of augmentation containers must match number of dataset splits
    ds_augs = ecvl.DatasetAugmentations([training_augs, test_augs])

    batch_size = 64
    print("Creating a DLDataset")
    d = ecvl.DLDataset(args.in_ds, batch_size, ds_augs, ecvl.ColorType.GRAY)
    print("Create x and y")
    x = Tensor(
        [batch_size, d.n_channels_, d.resize_dims_[0], d.resize_dims_[1]])
    y = Tensor([batch_size, len(d.classes_)])

    # Load a batch of d.batch_size_ images into x and corresponding labels
    # into y. Images are resized to the dimensions specified in the
    # augmentations chain
    print("Executing LoadBatch on training set")
    d.LoadBatch(x, y)

    # Change colortype and channels
    img = ecvl.TensorToImage(x)
    img.colortype_ = ecvl.ColorType.GRAY
    img.channels_ = "xyc"

    # Switch to Test split and load a batch of images
    print("Executing LoadBatch on test set")
    d.SetSplit(ecvl.SplitType.test)
    d.LoadBatch(x, y)

    # Save some input images
    ecvl.ImWrite("mnist_batch.png", ecvl.MakeGrid(x, 8, False))
    ecvl.ImWrite("mnist_batch_normalized.png", ecvl.MakeGrid(x, 8, True))
Esempio n. 7
0
def main(args):
    num_classes = 8
    size = [224, 224]  # size of images

    in_ = eddl.Input([3, size[0], size[1]])
    out = VGG16(in_, num_classes)
    net = eddl.Model([in_], [out])
    eddl.build(net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"],
               ["categorical_accuracy"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "skin_lesion_classification")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugRotate([-180, 180]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGammaContrast([0.5, 1.5]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5)
    ])
    validation_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_augs, None])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, len(d.classes_)])
    num_samples_train = len(d.GetSplit())
    num_batches_train = num_samples_train // args.batch_size
    d.SetSplit(ecvl.SplitType.validation)
    num_samples_val = len(d.GetSplit())
    num_batches_val = num_samples_val // args.batch_size
    indices = list(range(args.batch_size))
    metric = eddl.getMetric("categorical_accuracy")

    print("Starting training")
    for e in range(args.epochs):
        print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs),
              flush=True)
        if args.out_dir:
            current_path = os.path.join(args.out_dir, "Epoch_%d" % e)
            for c in d.classes_:
                c_dir = os.path.join(current_path, c)
                os.makedirs(c_dir, exist_ok=True)
        d.SetSplit(ecvl.SplitType.training)
        eddl.reset_loss(net)
        total_metric = []
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        d.ResetAllBatches()
        for b in range(num_batches_train):
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                e + 1, args.epochs, b + 1, num_batches_train),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            tx, ty = [x], [y]
            eddl.train_batch(net, tx, ty, indices)
            eddl.print_loss(net, b)
            print()

        print("Saving weights")
        eddl.save(net, "isic_classification_checkpoint_epoch_%s.bin" % e,
                  "bin")

        print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True)
        d.SetSplit(ecvl.SplitType.validation)
        for b in range(num_batches_val):
            n = 0
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                e + 1, args.epochs, b + 1, num_batches_val),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            eddl.forward(net, [x])
            output = eddl.getOutput(out)
            sum_ = 0.0
            for k in range(args.batch_size):
                result = output.select([str(k)])
                target = y.select([str(k)])
                ca = metric.value(target, result)
                total_metric.append(ca)
                sum_ += ca
                if args.out_dir:
                    result_a = np.array(result, copy=False)
                    target_a = np.array(target, copy=False)
                    classe = np.argmax(result_a).item()
                    gt_class = np.argmax(target_a).item()
                    single_image = x.select([str(k)])
                    img_t = ecvl.TensorToView(single_image)
                    img_t.colortype_ = ecvl.ColorType.BGR
                    single_image.mult_(255.)
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s_gt_class_%s.png" % (head, gt_class)
                    cur_path = os.path.join(current_path, d.classes_[classe],
                                            bname)
                    ecvl.ImWrite(cur_path, img_t)
                n += 1
            print("categorical_accuracy:", sum_ / args.batch_size)
        total_avg = sum(total_metric) / len(total_metric)
        print("Total categorical accuracy:", total_avg)
Esempio n. 8
0
def test_dataset():
    num_classes = 2
    size = [256, 256]  # size of images
    training_augs = ecvl.SequentialAugmentationContainer([
        #ecvl.AugResizeDim(size),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugRotate([-180, 180]),
        #ecvl.AugAdditivePoissonNoise([0, 10]),
        #ecvl.AugGammaContrast([0.5, 1.5]),
        #ecvl.AugGaussianBlur([0, 0.8]),
        #ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5),
    ])

    dataset_augs = [training_augs, None, None]

    in_ = eddl.Input([3, size[0], size[1]])
    out = VGG16(in_, num_classes)
    net = eddl.Model([in_], [out])
    eddl.build(
        net,
        eddl.rmsprop(1e-5),
        #eddl.sgd(0.001, 0.9),
        ["soft_cross_entropy"],
        ["categorical_accuracy"],
        eddl.CS_GPU([1], mem="low_mem")  # eddl.CS_CPU()
    )
    eddl.summary(net)

    # Read Cassandra password
    try:
        from private_data import cass_pass
    except ImportError:
        cass_pass = getpass('Insert Cassandra password: '******'prom', password=cass_pass)
    cd = CassandraDataset(ap, ['cassandra_db'])

    # Flow 0: read rows from db, create splits and save everything
    # Level 0
    cd.init_listmanager(table='promort.ids_osk_0',
                        id_col='patch_id',
                        split_ncols=1,
                        num_classes=2,
                        metatable='promort.metadata_osk_0',
                        partition_cols=['sample_name', 'sample_rep', 'label'])
    cd.read_rows_from_db()
    cd.save_rows('/tmp/osk_0_rows.pckl')
    cd.init_datatable(table='promort.data_osk_0')
    cd.split_setup(batch_size=32,
                   split_ratios=[7, 2, 1],
                   max_patches=120000,
                   augs=[])
    cd.save_splits('/tmp/osk_0_splits.pckl')

    ## Flow 1: read saved rows, create and save splits
    #cd.load_rows('/tmp/rows.pckl')
    #cd.init_datatable(table='promort.data_osk_0')
    #cd.split_setup(batch_size=32, split_ratios=[7,2,1],
    #               max_patches=120000, augs=[])
    #cd.save_splits('/tmp/osk_0_splits.pckl')

    ## Flow 2: read saved splits
    #cd.load_splits('/tmp/osk_0_splits.pckl', batch_size=32, augs=[])

    ## fit generator
    cassandra_fit(cd, net, epochs=1)

    # change batch size
    cd.set_batchsize(16)
    cassandra_fit(cd, net, epochs=1)

    # change partitioning and balance
    cd.split_setup(max_patches=1000, split_ratios=[10, 1, 1], balance=[2, 1])
    cassandra_fit(cd, net, epochs=1)
Esempio n. 9
0
def main(args):
    batch_size = args.batch_size
    image_size = args.size, args.size
    thresh = 0.5

    if args.weights:
        os.makedirs(args.weights, exist_ok=True)

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(image_size,
                          ecvl.InterpolationType.cubic,
                          gt_interp=ecvl.InterpolationType.nearest),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugRotate([-180, 180]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGammaContrast([0.5, 1.5]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.03], [0.02, 0.05], 0.25),
        ecvl.AugToFloat32(255, divisor_gt=255),
        ecvl.AugNormalize([0.6681, 0.5301, 0.5247],
                          [0.1337, 0.1480, 0.1595]),  # isic stats
    ])
    validation_test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(image_size,
                          ecvl.InterpolationType.cubic,
                          gt_interp=ecvl.InterpolationType.nearest),
        ecvl.AugToFloat32(255, divisor_gt=255),
        ecvl.AugNormalize([0.6681, 0.5301, 0.5247],
                          [0.1337, 0.1480, 0.1595]),  # isic stats
    ])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_test_augs, validation_test_augs])

    print('Reading dataset')
    d = ecvl.DLDataset(args.in_ds,
                       args.batch_size,
                       dataset_augs,
                       ctype=ecvl.ColorType.RGB)
    num_classes = len(d.classes_) or d.n_channels_gt_
    size = d.n_channels_, args.size, args.size

    if args.ckpts:
        net = eddl.import_net_from_onnx_file(args.ckpts, size)
    else:
        in_ = eddl.Input(size)
        out = Unet(in_, num_classes)
        out_sigm = eddl.Sigmoid(out)
        net = eddl.Model([in_], [out_sigm])

        # model_path = utils.DownloadModel(segmentation_zoo[args.model]['url'], f'{args.model}.onnx', 'model_onnx')
        # net = eddl.import_net_from_onnx_file(model_path, size)
        # eddl.removeLayer(net, segmentation_zoo[args.model]['to_remove'])
        # top = eddl.getLayer(net, segmentation_zoo[args.model]['top'])
        #
        # out = eddl.Sigmoid(eddl.Conv(top, num_classes, [3, 3], name='last_layer'))
        # data_input = eddl.getLayer(net, segmentation_zoo[args.model]['input'])  # input of the onnx
        # net = eddl.Model([data_input], [out])

    loss_name = 'binary_cross_entropy'
    metric_name = 'mean_squared_error'
    eddl.build(
        net, eddl.adam(args.learning_rate), [loss_name], [metric_name],
        eddl.CS_GPU(args.gpu, mem="low_mem") if args.gpu else eddl.CS_CPU(),
        True)
    out = eddl.getOut(net)[0]

    # if not args.ckpts:
    #     eddl.initializeLayer(net, "last_layer")

    eddl.summary(net)
    eddl.setlogfile(net, 'skin_lesion_segmentation')

    x = Tensor([args.batch_size, *size])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[1], size[2]])

    miou = 0.
    if args.train:
        num_samples_train = len(d.GetSplit())
        num_batches_train = num_samples_train // args.batch_size
        num_samples_val = len(d.GetSplit(ecvl.SplitType.validation))
        num_batches_val = num_samples_val // args.batch_size
        evaluator = utils.Evaluator()

        print('Starting training')
        for e in range(args.epochs):
            d.SetSplit(ecvl.SplitType.training)
            eddl.reset_loss(net)
            s = d.GetSplit()
            random.shuffle(s)
            d.split_.training_ = s
            d.ResetAllBatches()
            for b in range(num_batches_train):
                d.LoadBatch(x, y)
                # x_ = x.select(["0"])
                # x_.normalize_(0, 1)
                # x_.mult_(255.)
                # x_.save(f'images/train_{e}_{b}.png')
                #
                # y_ = y.select(["0"])
                # # y_.mult_(255.)
                # y_.save(f'images/train_gt_{e}_{b}.png')

                eddl.train_batch(net, [x], [y])
                losses = eddl.get_losses(net)
                metrics = eddl.get_metrics(net)

                print(
                    f'Train - epoch [{e + 1}/{args.epochs}] - batch [{b + 1}/{num_batches_train}]'
                    f' - {loss_name}={losses[0]:.3f} - {metric_name}={metrics[0]:.3f}',
                    flush=True)

            d.SetSplit(ecvl.SplitType.validation)
            evaluator.ResetEval()
            eddl.reset_loss(net)

            for b in range(num_batches_val):
                n = 0
                print(
                    f'Validation - epoch [{e + 1}/{args.epochs}] - batch [{b + 1}/{num_batches_val}]'
                )
                d.LoadBatch(x, y)
                eddl.forward(net, [x])
                output = eddl.getOutput(out)
                for bs in range(args.batch_size):
                    img = output.select([str(bs)])
                    gt = y.select([str(bs)])
                    img_np = np.array(img, copy=False)
                    gt_np = np.array(gt, copy=False)
                    iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
                    print(f' - IoU: {iou:.3f}', end="", flush=True)
                    if args.out_dir:
                        # C++ BinaryIoU modifies image as a side effect
                        img_np[img_np >= thresh] = 1
                        img_np[img_np < thresh] = 0
                        img_t = ecvl.TensorToView(img)
                        img_t.colortype_ = ecvl.ColorType.GRAY
                        img_t.channels_ = "xyc"
                        img.mult_(255.)
                        # orig_img
                        orig_img = x.select([str(bs)])
                        orig_img.mult_(255.)
                        orig_img_t = ecvl.TensorToImage(orig_img)
                        orig_img_t.colortype_ = ecvl.ColorType.BGR
                        orig_img_t.channels_ = "xyc"

                        tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                        ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                        ecvl.ConnectedComponentsLabeling(tmp, labels)
                        ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                        contours = ecvl.FindContours(tmp)
                        ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                        tmp_np = np.array(tmp, copy=False)
                        for cseq in contours:
                            for c in cseq:
                                tmp_np[c[0], c[1], 0] = 0
                                tmp_np[c[0], c[1], 1] = 0
                                tmp_np[c[0], c[1], 2] = 255
                        filename = d.samples_[d.GetSplit()[n]].location_[0]
                        head, tail = os.path.splitext(
                            os.path.basename(filename))
                        bname = "%s.png" % head
                        output_fn = os.path.join(args.out_dir, bname)
                        ecvl.ImWrite(output_fn, tmp)
                        if e == 0:
                            gt_t = ecvl.TensorToView(gt)
                            gt_t.colortype_ = ecvl.ColorType.GRAY
                            gt_t.channels_ = "xyc"
                            gt.mult_(255.)
                            gt_filename = d.samples_[d.GetSplit()
                                                     [n]].label_path_
                            gt_fn = os.path.join(args.out_dir,
                                                 os.path.basename(gt_filename))
                            ecvl.ImWrite(gt_fn, gt_t)
                    n += 1
                print()

            last_miou = evaluator.MIoU()
            print(
                f'Validation - epoch [{e + 1}/{args.epochs}] - Total MIoU: {last_miou:.3f}'
            )

            if last_miou > miou:
                miou = last_miou
                eddl.save_net_to_onnx_file(
                    net,
                    os.path.join(args.weights,
                                 f'isic_segm_{args.model}_epoch_{e + 1}.onnx'))
                print('Weights saved')
    elif args.test:
        evaluator = utils.Evaluator()
        evaluator.ResetEval()

        d.SetSplit(ecvl.SplitType.test)
        num_samples_test = len(d.GetSplit())
        num_batches_test = num_samples_test // batch_size
        for b in range(num_batches_test):
            n = 0
            print(f'Test - batch [{b + 1}/{num_batches_test}]')
            d.LoadBatch(x, y)
            eddl.forward(net, [x])
            output = eddl.getOutput(out)
            for bs in range(args.batch_size):
                img = output.select([str(bs)])
                gt = y.select([str(bs)])
                img_np, gt_np = np.array(img, copy=False), np.array(gt,
                                                                    copy=False)
                iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
                print(f' - IoU: {iou:.3f}', end="", flush=True)
                if args.out_dir:
                    # C++ BinaryIoU modifies image as a side effect
                    img_np[img_np >= thresh] = 1
                    img_np[img_np < thresh] = 0
                    img_t = ecvl.TensorToView(img)
                    img_t.colortype_ = ecvl.ColorType.GRAY
                    img_t.channels_ = "xyc"
                    img.mult_(255.)
                    # orig_img
                    orig_img = x.select([str(bs)])
                    orig_img.mult_(255.)
                    orig_img_t = ecvl.TensorToImage(orig_img)
                    orig_img_t.colortype_ = ecvl.ColorType.BGR
                    orig_img_t.channels_ = "xyc"

                    tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                    ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                    ecvl.ConnectedComponentsLabeling(tmp, labels)
                    ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                    contours = ecvl.FindContours(tmp)
                    ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                    tmp_np = np.array(tmp, copy=False)
                    for cseq in contours:
                        for c in cseq:
                            tmp_np[c[0], c[1], 0] = 0
                            tmp_np[c[0], c[1], 1] = 0
                            tmp_np[c[0], c[1], 2] = 255
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s.png" % head
                    output_fn = os.path.join(args.out_dir, bname)
                    ecvl.ImWrite(output_fn, tmp)

                    gt_t = ecvl.TensorToView(gt)
                    gt_t.colortype_ = ecvl.ColorType.GRAY
                    gt_t.channels_ = "xyc"
                    gt.mult_(255.)
                    gt_filename = d.samples_[d.GetSplit()[n]].label_path_
                    gt_fn = os.path.join(args.out_dir,
                                         os.path.basename(gt_filename))
                    ecvl.ImWrite(gt_fn, gt_t)
                n += 1
        miou = evaluator.MIoU()
        print(f'Test - Total MIoU: {miou:.3f}')
def main(args):
    batch_size = args.batch_size
    image_size = args.size, args.size

    if args.weights:
        os.makedirs(args.weights, exist_ok=True)

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(image_size, ecvl.InterpolationType.cubic),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugRotate([-180, 180]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGammaContrast([0.5, 1.5]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.03], [0.02, 0.05], 0.25),
        ecvl.AugToFloat32(255),
    ])
    validation_test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(image_size),
        ecvl.AugToFloat32(255),
    ])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_test_augs, validation_test_augs])

    print('Reading dataset')
    d = ecvl.DLDataset(args.in_ds,
                       args.batch_size,
                       dataset_augs,
                       ctype=ecvl.ColorType.RGB)
    num_classes = len(d.classes_)
    size = d.n_channels_, args.size, args.size

    if args.ckpts:
        net = eddl.import_net_from_onnx_file(args.ckpts, size)
    else:
        model_path = utils.DownloadModel(classification_zoo[args.model]['url'],
                                         f'{args.model}.onnx', 'model_onnx')
        net = eddl.import_net_from_onnx_file(model_path, size)
        eddl.removeLayer(net, classification_zoo[args.model]
                         ['to_remove'])  # remove last Linear of resnet
        top = eddl.getLayer(
            net,
            classification_zoo[args.model]['top'])  # get flatten of resnet

        out = eddl.Softmax(eddl.Dense(top, num_classes, True,
                                      'classifier'))  # true is for the bias
        data_input = eddl.getLayer(
            net, classification_zoo[args.model]['input'])  # input of the onnx
        net = eddl.Model([data_input], [out])

    eddl.build(
        net, eddl.adam(args.learning_rate), ['softmax_cross_entropy'],
        ['accuracy'],
        eddl.CS_GPU(args.gpu, mem="low_mem") if args.gpu else eddl.CS_CPU(),
        False)
    out = eddl.getOut(net)[0]

    if not args.ckpts:
        eddl.initializeLayer(net, "classifier")

    eddl.summary(net)
    eddl.setlogfile(net, 'skin_lesion_classification')

    x = Tensor([batch_size, *size])
    y = Tensor([batch_size, num_classes])

    metric_fn = eddl.getMetric('accuracy')
    best_accuracy = 0.
    if args.train:
        num_samples_train = len(d.GetSplit())
        num_batches_train = num_samples_train // args.batch_size
        num_samples_val = len(d.GetSplit(ecvl.SplitType.validation))
        num_batches_val = num_samples_val // args.batch_size

        print('Starting training')
        for e in range(args.epochs):
            if args.out_dir:
                current_path = os.path.join(args.out_dir, f'Epoch_{e}')
                for c in d.classes_:
                    c_dir = os.path.join(current_path, c)
                    os.makedirs(c_dir, exist_ok=True)
            d.SetSplit(ecvl.SplitType.training)
            eddl.reset_loss(net)
            s = d.GetSplit()
            random.shuffle(s)
            d.split_.training_ = s
            d.ResetAllBatches()
            for b in range(num_batches_train):
                d.LoadBatch(x, y)
                eddl.train_batch(net, [x], [y])
                losses = eddl.get_losses(net)
                metrics = eddl.get_metrics(net)

                print(
                    f'Train - epoch [{e + 1}/{args.epochs}] - batch [{b + 1}/{num_batches_train}]'
                    f' - loss={losses[0]:.3f} - accuracy={metrics[0]:.3f}',
                    flush=True)

            d.SetSplit(ecvl.SplitType.validation)
            values = np.zeros(num_batches_val)
            eddl.reset_loss(net)

            for b in range(num_batches_val):
                n = 0
                d.LoadBatch(x, y)
                eddl.forward(net, [x])
                output = eddl.getOutput(out)
                value = metric_fn.value(y, output)
                values[b] = value
                if args.out_dir:
                    for k in range(args.batch_size):
                        result = output.select([str(k)])
                        target = y.select([str(k)])
                        result_a = np.array(result, copy=False)
                        target_a = np.array(target, copy=False)
                        classe = np.argmax(result_a).item()
                        gt_class = np.argmax(target_a).item()
                        single_image = x.select([str(k)])
                        img_t = ecvl.TensorToView(single_image)
                        img_t.colortype_ = ecvl.ColorType.BGR
                        single_image.mult_(255.)
                        filename = d.samples_[d.GetSplit()[n]].location_[0]
                        head, tail = os.path.splitext(
                            os.path.basename(filename))
                        bname = '{}_gt_class_{}.png'.format(head, gt_class)
                        cur_path = os.path.join(current_path,
                                                d.classes_[classe], bname)
                        ecvl.ImWrite(cur_path, img_t)
                    n += 1

                print(
                    f'Validation - epoch [{e + 1}/{args.epochs}] - batch [{b + 1}/{num_batches_val}] -'
                    f' accuracy={np.mean(values[:b + 1] / batch_size):.3f}')

            last_accuracy = np.mean(values / batch_size)
            print(
                f'Validation - epoch [{e + 1}/{args.epochs}] - total accuracy={last_accuracy:.3f}'
            )
            if last_accuracy > best_accuracy:
                best_accuracy = last_accuracy
                print('Saving weights')
                eddl.save_net_to_onnx_file(
                    net,
                    f'isic_classification_{args.model}_epoch_{e + 1}.onnx')

    elif args.test:
        d.SetSplit(ecvl.SplitType.test)
        num_samples_test = len(d.GetSplit())
        num_batches_test = num_samples_test // batch_size
        values = np.zeros(num_batches_test)
        eddl.reset_loss(net)

        for b in range(num_batches_test):
            d.LoadBatch(x, y)
            eddl.forward(net, [x])
            output = eddl.getOutput(out)
            value = metric_fn.value(y, output)
            values[b] = value
            if args.out_dir:
                n = 0
                for k in range(args.batch_size):
                    result = output.select([str(k)])
                    target = y.select([str(k)])
                    result_a = np.array(result, copy=False)
                    target_a = np.array(target, copy=False)
                    classe = np.argmax(result_a).item()
                    gt_class = np.argmax(target_a).item()
                    single_image = x.select([str(k)])
                    img_t = ecvl.TensorToView(single_image)
                    img_t.colortype_ = ecvl.ColorType.BGR
                    single_image.mult_(255.)
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s_gt_class_%s.png" % (head, gt_class)
                    cur_path = os.path.join(args.out_dir, d.classes_[classe],
                                            bname)
                    ecvl.ImWrite(cur_path, img_t)
                    n += 1

            print(
                f'Test - batch [{b + 1}/{num_batches_test}] - accuracy={np.mean(values[:b + 1] / batch_size):.3f}'
            )
        print(f'Test - total accuracy={np.mean(values / batch_size):.3f}')