Exemplo n.º 1
0
def eddl_load_data_cifar(path=None,
                         download=False,
                         load_subset=False,
                         subset_size=4096):
    """
    :param path: Path to CIFAR10 dataset
    :param download: Default-False - download the dataset to current directory.
    :param load_subset: Default-False - use subset of the dataset for testing purpose.
    :param subset_size: Default-4096, the test size is subset_size/4.
    :return: two tuples of (x_train, y_train), (x_test, y_test) .
    """
    (x_train, y_train), (x_test, y_test) = resnet.load_cifar(path, download)

    if load_subset:
        x_subset_train = Tensor([subset_size, 3, 32, 32])
        y_subset_train = Tensor([subset_size, 10])

        eddl.next_batch([x_train], [x_subset_train])
        eddl.next_batch([y_train], [y_subset_train])

        x_subset_test = Tensor([math.ceil(subset_size / 4), 3, 32, 32])
        y_subset_test = Tensor([math.ceil(subset_size / 4), 10])

        eddl.next_batch([x_test], [x_subset_test])
        eddl.next_batch([y_test], [y_subset_test])
        return (x_subset_train, y_subset_train), (x_subset_test, y_subset_test)

    return (x_train, y_train), (x_test, y_test)
Exemplo n.º 2
0
def main(args):
    img = ecvl.ImRead(args.in_img)
    augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugRotate([-5, 5]),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugGammaContrast([3, 5]),
        ecvl.AugAdditiveLaplaceNoise([0, 0.2 * 255]),
        ecvl.AugCoarseDropout([0, 0.55], [0.02, 0.1], 0.5),
        ecvl.AugAdditivePoissonNoise([0, 40]),
        ecvl.AugResizeDim([500, 500]),
    ])
    ecvl.AugmentationParam.SetSeed(0)
    augs.Apply(img)
    print("Executing ImageToTensor")
    t = ecvl.ImageToTensor(img)
    t.div_(128)
    t.mult_(128)
    print("Executing TensorToImage")
    img = ecvl.TensorToImage(t)
    print("Executing TensorToView")
    ecvl.TensorToView(t)

    _ = ecvl.AugmentationFactory.create(AUG_TXT)

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugRotate([-5, 5]),
        ecvl.AugAdditiveLaplaceNoise([0, 0.2 * 255]),
        ecvl.AugCoarseDropout([0, 0.55], [0.02, 0.1], 0),
        ecvl.AugAdditivePoissonNoise([0, 40]),
        ecvl.AugResizeDim([30, 30]),
    ])
    test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim([30, 30]),
    ])
    ds_augs = ecvl.DatasetAugmentations([training_augs, None, test_augs])

    batch_size = 64
    print("Creating a DLDataset")
    d = ecvl.DLDataset(args.in_ds, batch_size, ds_augs, ecvl.ColorType.GRAY)
    print("Create x and y")
    x = Tensor(
        [batch_size, d.n_channels_, d.resize_dims_[0], d.resize_dims_[1]])
    y = Tensor([batch_size, len(d.classes_)])

    # Load a batch of d.batch_size_ images into x and corresponding labels
    # into y. Images are resized to the dimensions specified in the
    # augmentations chain
    print("Executing LoadBatch on training set")
    d.LoadBatch(x, y)

    # Change colortype and channels
    img = ecvl.TensorToImage(x)
    img.colortype_ = ecvl.ColorType.GRAY
    img.channels_ = "xyc"

    # Switch to Test split and load a batch of images
    print("Executing LoadBatch on test set")
    d.SetSplit(ecvl.SplitType.test)
    d.LoadBatch(x, y)
Exemplo n.º 3
0
def test_py_loss():
    shape = [8, 10]
    a = np.random.random(shape).astype(np.float32)
    b = np.random.random(shape).astype(np.float32)
    t, y = Tensor.fromarray(a), Tensor.fromarray(b)
    z = Tensor(shape)
    exp_z = Tensor(shape)
    py_mse_loss = MSELoss()
    mse_loss = eddl.getLoss("mse")
    mse_loss.delta(t, y, exp_z)
    py_mse_loss.delta(t, y, z)
    c = np.array(z, copy=False)
    exp_c = np.array(exp_z, copy=False)
    assert np.array_equal(c, exp_c)
    v = py_mse_loss.value(t, y)
    exp_v = mse_loss.value(t, y)
    assert v == pytest.approx(exp_v)
Exemplo n.º 4
0
def main(args):
    eddl.download_mnist()

    in_ = eddl.Input([784])
    target = eddl.Reshape(in_, [1, 28, 28])
    layer = in_
    layer = eddl.Reshape(layer, [1, 28, 28])
    layer = eddl.ReLu(eddl.Conv(layer, 8, [3, 3]))
    layer = eddl.ReLu(eddl.Conv(layer, 16, [3, 3]))
    layer = eddl.ReLu(eddl.Conv(layer, 8, [3, 3]))
    out = eddl.Sigmoid(eddl.Conv(layer, 1, [3, 3]))
    net = eddl.Model([in_], [])

    eddl.build(
        net,
        eddl.adam(0.001),
        [],
        [],
        eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)
    )
    eddl.summary(net)

    x_train = Tensor.load("mnist_trX.bin")
    if args.small:
        x_train = x_train.select([":6000"])
    x_train.div_(255.0)

    mse = eddl.newloss(mse_loss, [out, target], "mse_loss")
    dicei = eddl.newloss(dice_loss_img, [out, target], "dice_loss_img")
    dicep = eddl.newloss(dice_loss_pixel, [out, target], "dice_loss_pixel")

    batch = Tensor([args.batch_size, 784])
    num_batches = x_train.shape[0] // args.batch_size
    for i in range(args.epochs):
        print("Epoch %d/%d (%d batches)" % (i + 1, args.epochs, num_batches))
        diceploss = 0.0
        diceiloss = 0.0
        mseloss = 0
        for j in range(num_batches):
            print("Batch %d " % j, end="", flush=True)
            eddl.next_batch([x_train], [batch])
            eddl.zeroGrads(net)
            eddl.forward(net, [batch])
            diceploss += eddl.compute_loss(dicep) / args.batch_size
            print("diceploss = %.6f " % (diceploss / (j + 1)), end="")
            diceiloss += eddl.compute_loss(dicei) / args.batch_size
            print("diceiloss = %.6f " % (diceiloss / (j + 1)), end="")
            mseloss += eddl.compute_loss(mse) / args.batch_size
            print("mseloss = %.6f\r" % (mseloss / (j + 1)), end="")
            eddl.optimize(dicep)
            eddl.update(net)
        print()
    print("All done")
Exemplo n.º 5
0
 def fun(rows):
     assert (len(rows) == 1)
     item = rows[0]
     feat, lab = self._get_img(item)
     with self.lock:
         self.feats.append(feat)
         self.labels.append(lab)
         self.perm.append(idx)
         self.cow += 1
         self.onair -= 1
         if (self.cow == self.tot):  # last patch
             # recover original order of images
             sh = []
             for (i, x) in enumerate(self.perm):
                 sh.append([x, i])
             sh = np.array(sorted(sh))[:, 1]
             # reorder data and conclude
             feats = np.array(self.feats)[sh]
             labels = np.array(self.labels)[sh]
             self.bb = (Tensor(feats.transpose(0, 3, 1,
                                               2)), Tensor(labels))
             self.finished_event.set()
Exemplo n.º 6
0
def main(args):
    if not ecvl.ECVL_EDDL:
        print("No EDDL support - quitting")
        sys.exit(0)
    img = ecvl.ImRead(args.in_img)
    augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugCenterCrop(),  # Make image square
        ecvl.AugRotate([-5, 5]),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugGammaContrast([3, 5]),
        ecvl.AugAdditiveLaplaceNoise([0, 0.2 * 255]),
        ecvl.AugCoarseDropout([0, 0.55], [0.02, 0.1], 0.5),
        ecvl.AugAdditivePoissonNoise([0, 40]),
        ecvl.AugResizeDim([500, 500]),
        ecvl.AugCenterCrop([224, 224]),
        ecvl.AugToFloat32(255),
        ecvl.AugNormalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
    ])
    ecvl.AugmentationParam.SetSeed(0)
    print("Applying augmentations")
    augs.Apply(img)
    print("Executing ImageToTensor")
    t = ecvl.ImageToTensor(img)
    t.div_(128)
    t.mult_(128)
    print("Executing TensorToImage")
    img = ecvl.TensorToImage(t)
    print("Executing TensorToView")
    ecvl.TensorToView(t)

    print("Applying augmentations (from text)")
    newdeal_augs = ecvl.AugmentationFactory.create(AUG_TXT)
    newdeal_augs.Apply(img)

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugRotate([-5, 5]),
        ecvl.AugAdditiveLaplaceNoise([0, 0.2 * 255]),
        ecvl.AugCoarseDropout([0, 0.55], [0.02, 0.1], 0),
        ecvl.AugAdditivePoissonNoise([0, 40]),
        ecvl.AugResizeDim([30, 30]),
        ecvl.AugToFloat32(255),
        ecvl.AugNormalize(0.449, 0.226),  # mean of imagenet stats
    ])
    test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim([30, 30]),
        ecvl.AugToFloat32(255),
        ecvl.AugNormalize(0.449, 0.226),  # mean of imagenet stats
    ])
    # number of augmentation containers must match number of dataset splits
    ds_augs = ecvl.DatasetAugmentations([training_augs, test_augs])

    batch_size = 64
    print("Creating a DLDataset")
    d = ecvl.DLDataset(args.in_ds, batch_size, ds_augs, ecvl.ColorType.GRAY)
    print("Create x and y")
    x = Tensor(
        [batch_size, d.n_channels_, d.resize_dims_[0], d.resize_dims_[1]])
    y = Tensor([batch_size, len(d.classes_)])

    # Load a batch of d.batch_size_ images into x and corresponding labels
    # into y. Images are resized to the dimensions specified in the
    # augmentations chain
    print("Executing LoadBatch on training set")
    d.LoadBatch(x, y)

    # Change colortype and channels
    img = ecvl.TensorToImage(x)
    img.colortype_ = ecvl.ColorType.GRAY
    img.channels_ = "xyc"

    # Switch to Test split and load a batch of images
    print("Executing LoadBatch on test set")
    d.SetSplit(ecvl.SplitType.test)
    d.LoadBatch(x, y)

    # Save some input images
    ecvl.ImWrite("mnist_batch.png", ecvl.MakeGrid(x, 8, False))
    ecvl.ImWrite("mnist_batch_normalized.png", ecvl.MakeGrid(x, 8, True))
Exemplo n.º 7
0
def main(args):
    num_classes = 1
    size = [512, 512]  # size of images
    thresh = 0.5
    best_dice = -1

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([1, size[0], size[1]])
    out = SegNetBN(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(net, eddl.adam(0.0001), ["cross_entropy"],
               ["mean_squared_error"],
               eddl.CS_GPU([1], mem='low_mem') if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "pneumothorax_segmentation_training")

    if args.ckpts and os.path.exists(args.ckpts):
        print("Loading checkpoints '{}'".format(args.ckpts))
        eddl.load(net, args.ckpts, 'bin')

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
        ecvl.AugMirror(0.5),
        ecvl.AugRotate([-10, 10]),
        ecvl.AugBrightness([0, 30]),
        ecvl.AugGammaContrast([0, 3]),
    ])
    validation_augs = ecvl.SequentialAugmentationContainer(
        [ecvl.AugResizeDim(size)])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_augs, None])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs,
                       ecvl.ColorType.GRAY)
    # Prepare tensors which store batch
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[0], size[1]])

    # Retrieve indices of images with a black ground truth
    # which are not include in a split
    train_split = d.GetSplit(ecvl.SplitType.training)
    val_split = d.GetSplit(ecvl.SplitType.validation)
    test_split = d.GetSplit(ecvl.SplitType.test)
    all_split = set(train_split + val_split + test_split)

    images_list = set(range(len(d.samples_)))

    # Obtain images with black ground truth
    black_images = images_list - all_split

    # Add a 25% of training samples with black ground truth.
    num_samples_train = math.floor(len(train_split) * 1.25)
    num_batches_train = num_samples_train // args.batch_size

    # Add a 25% of validation samples with black ground truth.
    num_samples_validation = math.floor(len(val_split) * 1.25)
    num_batches_validation = num_samples_validation // args.batch_size

    black_images = list(black_images)
    black_training = black_images[0:-(num_samples_validation - len(val_split))]
    black_validation = black_images[-(num_samples_validation -
                                      len(val_split)):]
    indices = list(range(args.batch_size))

    evaluator = utils.Evaluator()
    print("Starting training")
    for e in range(args.epochs):
        print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs),
              flush=True)
        d.SetSplit(ecvl.SplitType.training)
        eddl.reset_loss(net)
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        random.shuffle(black_training)

        d.ResetAllBatches()
        # Indices to track mask and black vector in PneumothoraxLoadBatch
        m_i = 0
        b_i = 0
        for i, b in enumerate(range(num_batches_train)):
            d, images, labels, _, m_i, b_i = PneumothoraxLoadBatch(
                d, black_training, m_i, b_i)
            x, y = fill_tensors(images, labels, x, y)
            x.div_(255.0)
            y.div_(255.0)
            eddl.train_batch(net, [x], [y], indices)
            if i % args.log_interval == 0:
                print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                    e + 1, args.epochs, b + 1, num_batches_train),
                      end="",
                      flush=True)
                eddl.print_loss(net, b)
                print()

        d.SetSplit(ecvl.SplitType.validation)
        evaluator.ResetEval()
        print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True)
        m_i = 0
        b_i = 0
        for b in range(num_batches_validation):
            n = 0
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) ".format(
                e + 1, args.epochs, b + 1, num_batches_validation),
                  end="",
                  flush=True)
            d, images, labels, names, m_i, b_i = PneumothoraxLoadBatch(
                d, black_validation, m_i, b_i)
            x, y = fill_tensors(images, labels, x, y)
            x.div_(255.0)
            y.div_(255.0)
            eddl.forward(net, [x])
            output = eddl.getOutput(out_sigm)

            # Compute Dice metric and optionally save the output images
            for k in range(args.batch_size):
                pred = output.select([str(k)])
                gt = y.select([str(k)])
                pred_np = np.array(pred, copy=False)
                gt_np = np.array(gt, copy=False)
                # DiceCoefficient modifies image as a side effect
                dice = evaluator.DiceCoefficient(pred_np, gt_np, thresh=thresh)
                print("- Dice: {:.6f} ".format(dice), end="", flush=True)

                if args.out_dir:
                    # Save original image fused together with prediction and
                    # ground truth
                    pred_np *= 255
                    pred_ecvl = ecvl.TensorToImage(pred)
                    pred_ecvl.colortype_ = ecvl.ColorType.GRAY
                    pred_ecvl.channels_ = "xyc"
                    ecvl.ResizeDim(pred_ecvl, pred_ecvl, (1024, 1024),
                                   ecvl.InterpolationType.nearest)

                    filename_gt = names[n + 1]
                    gt_ecvl = ecvl.ImRead(filename_gt,
                                          ecvl.ImReadMode.GRAYSCALE)

                    filename = names[n]

                    # Image as BGR
                    img_ecvl = ecvl.ImRead(filename)
                    ecvl.Stack([img_ecvl, img_ecvl, img_ecvl], img_ecvl)
                    img_ecvl.channels_ = "xyc"
                    img_ecvl.colortype_ = ecvl.ColorType.BGR
                    image_np = np.array(img_ecvl, copy=False)
                    pred_np = np.array(pred_ecvl, copy=False)
                    gt_np = np.array(gt_ecvl, copy=False)

                    pred_np = pred_np.squeeze()
                    gt_np = gt_np.squeeze()
                    # Prediction summed in R channel
                    image_np[:, :, -1] = np.where(pred_np == 255, pred_np,
                                                  image_np[:, :, -1])
                    # Ground truth summed in G channel
                    image_np[:, :, 1] = np.where(gt_np == 255, gt_np,
                                                 image_np[:, :, 1])

                    n += 2
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "{}.png".format(head)
                    filepath = os.path.join(args.out_dir, bname)
                    ecvl.ImWrite(filepath, img_ecvl)

            print()

        mean_dice = evaluator.MeanMetric()
        if mean_dice > best_dice:
            print("Saving weights")
            eddl.save(
                net, "pneumothorax_segnetBN_adam_lr_0.0001_"
                "loss_ce_size_512_{}.bin".format(e + 1), "bin")
            best_dice = mean_dice
        print("Mean Dice Coefficient: {:.6g}".format(mean_dice))
def main(args):
    num_classes = 1
    size = [512, 512]  # size of images
    thresh = 0.5

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([1, size[0], size[1]])
    out = SegNetBN(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(
        net,
        eddl.adam(0.0001),
        ["cross_entropy"],
        ["mean_squared_error"],
        eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU()
    )
    eddl.summary(net)
    eddl.setlogfile(net, "pneumothorax_segmentation_inference")

    if not os.path.exists(args.ckpts):
        raise RuntimeError('Checkpoint "{}" not found'.format(args.ckpts))
    eddl.load(net, args.ckpts, "bin")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    dataset_augs = ecvl.DatasetAugmentations([training_augs, None, test_augs])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs,
                       ecvl.ColorType.GRAY)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    print("Testing")
    d.SetSplit(ecvl.SplitType.test)
    num_samples_test = len(d.GetSplit())
    num_batches_test = num_samples_test // args.batch_size

    evaluator = utils.Evaluator()
    evaluator.ResetEval()
    for b in range(num_batches_test):
        n = 0
        print("Batch {:d}/{:d} ".format(
            b + 1, num_batches_test), end="", flush=True)
        d.LoadBatch(x)
        x.div_(255.0)
        eddl.forward(net, [x])
        if args.out_dir:
            output = eddl.getOutput(out_sigm)
            for k in range(args.batch_size):
                img = output.select([str(k)])
                img_I = ecvl.TensorToImage(img)
                img_I.colortype_ = ecvl.ColorType.GRAY
                img_I.channels_ = "xyc"
                ecvl.Threshold(img_I, img_I, thresh, 255)

                filename = d.samples_[d.GetSplit()[n]].location_[0]
                head, tail = os.path.splitext(os.path.basename(filename))
                bname = "{}.png".format(head)
                output_fn = os.path.join(args.out_dir, bname)
                ecvl.ImWrite(output_fn, img_I)

                n += 1
        print()
Exemplo n.º 9
0
def main(args):
    num_classes = 8
    size = [224, 224]  # size of images

    in_ = eddl.Input([3, size[0], size[1]])
    out = VGG16(in_, num_classes)
    net = eddl.Model([in_], [out])
    eddl.build(net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"],
               ["categorical_accuracy"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "skin_lesion_classification")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugRotate([-180, 180]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGammaContrast([0.5, 1.5]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5)
    ])
    validation_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_augs, None])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, len(d.classes_)])
    num_samples_train = len(d.GetSplit())
    num_batches_train = num_samples_train // args.batch_size
    d.SetSplit(ecvl.SplitType.validation)
    num_samples_val = len(d.GetSplit())
    num_batches_val = num_samples_val // args.batch_size
    indices = list(range(args.batch_size))
    metric = eddl.getMetric("categorical_accuracy")

    print("Starting training")
    for e in range(args.epochs):
        print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs),
              flush=True)
        if args.out_dir:
            current_path = os.path.join(args.out_dir, "Epoch_%d" % e)
            for c in d.classes_:
                c_dir = os.path.join(current_path, c)
                os.makedirs(c_dir, exist_ok=True)
        d.SetSplit(ecvl.SplitType.training)
        eddl.reset_loss(net)
        total_metric = []
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        d.ResetAllBatches()
        for b in range(num_batches_train):
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                e + 1, args.epochs, b + 1, num_batches_train),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            tx, ty = [x], [y]
            eddl.train_batch(net, tx, ty, indices)
            eddl.print_loss(net, b)
            print()

        print("Saving weights")
        eddl.save(net, "isic_classification_checkpoint_epoch_%s.bin" % e,
                  "bin")

        print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True)
        d.SetSplit(ecvl.SplitType.validation)
        for b in range(num_batches_val):
            n = 0
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                e + 1, args.epochs, b + 1, num_batches_val),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            eddl.forward(net, [x])
            output = eddl.getOutput(out)
            sum_ = 0.0
            for k in range(args.batch_size):
                result = output.select([str(k)])
                target = y.select([str(k)])
                ca = metric.value(target, result)
                total_metric.append(ca)
                sum_ += ca
                if args.out_dir:
                    result_a = np.array(result, copy=False)
                    target_a = np.array(target, copy=False)
                    classe = np.argmax(result_a).item()
                    gt_class = np.argmax(target_a).item()
                    single_image = x.select([str(k)])
                    img_t = ecvl.TensorToView(single_image)
                    img_t.colortype_ = ecvl.ColorType.BGR
                    single_image.mult_(255.)
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s_gt_class_%s.png" % (head, gt_class)
                    cur_path = os.path.join(current_path, d.classes_[classe],
                                            bname)
                    ecvl.ImWrite(cur_path, img_t)
                n += 1
            print("categorical_accuracy:", sum_ / args.batch_size)
        total_avg = sum(total_metric) / len(total_metric)
        print("Total categorical accuracy:", total_avg)
Exemplo n.º 10
0
def main(args):
    num_classes = 10
    size = [28, 28]  # size of images
    ctype = ecvl.ColorType.GRAY

    in_ = eddl.Input([1, size[0], size[1]])
    out = LeNet(in_, num_classes)
    net = eddl.Model([in_], [out])
    eddl.build(net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"],
               ["categorical_accuracy"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "mnist")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugRotate([-5, 5]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0),
    ])
    dataset_augs = ecvl.DatasetAugmentations([training_augs, None, None])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs, ctype)
    x_train = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y_train = Tensor([args.batch_size, len(d.classes_)])
    num_samples = len(d.GetSplit())
    num_batches = num_samples // args.batch_size
    indices = list(range(args.batch_size))

    print("Training")
    for i in range(args.epochs):
        eddl.reset_loss(net)
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        d.ResetCurrentBatch()
        for j in range(num_batches):
            print("Epoch %d/%d (batch %d/%d) - " %
                  (i + 1, args.epochs, j + 1, num_batches),
                  end="",
                  flush=True)
            d.LoadBatch(x_train, y_train)
            x_train.div_(255.0)
            tx, ty = [x_train], [y_train]
            eddl.train_batch(net, tx, ty, indices)
            eddl.print_loss(net, j)
            print()

    print("Saving weights")
    eddl.save(net, "mnist_checkpoint.bin", "bin")

    print("Evaluation")
    d.SetSplit(ecvl.SplitType.test)
    num_samples = len(d.GetSplit())
    num_batches = num_samples // args.batch_size
    for i in range(num_batches):
        print("batch %d / %d - " % (i, num_batches), end="", flush=True)
        d.LoadBatch(x_train, y_train)
        x_train.div_(255.0)
        eddl.evaluate(net, [x_train], [y_train])
Exemplo n.º 11
0
def main(args):
    batch_size = args.batch_size
    image_size = args.size, args.size
    thresh = 0.5

    if args.weights:
        os.makedirs(args.weights, exist_ok=True)

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(image_size,
                          ecvl.InterpolationType.cubic,
                          gt_interp=ecvl.InterpolationType.nearest),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugRotate([-180, 180]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGammaContrast([0.5, 1.5]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.03], [0.02, 0.05], 0.25),
        ecvl.AugToFloat32(255, divisor_gt=255),
        ecvl.AugNormalize([0.6681, 0.5301, 0.5247],
                          [0.1337, 0.1480, 0.1595]),  # isic stats
    ])
    validation_test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(image_size,
                          ecvl.InterpolationType.cubic,
                          gt_interp=ecvl.InterpolationType.nearest),
        ecvl.AugToFloat32(255, divisor_gt=255),
        ecvl.AugNormalize([0.6681, 0.5301, 0.5247],
                          [0.1337, 0.1480, 0.1595]),  # isic stats
    ])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_test_augs, validation_test_augs])

    print('Reading dataset')
    d = ecvl.DLDataset(args.in_ds,
                       args.batch_size,
                       dataset_augs,
                       ctype=ecvl.ColorType.RGB)
    num_classes = len(d.classes_) or d.n_channels_gt_
    size = d.n_channels_, args.size, args.size

    if args.ckpts:
        net = eddl.import_net_from_onnx_file(args.ckpts, size)
    else:
        in_ = eddl.Input(size)
        out = Unet(in_, num_classes)
        out_sigm = eddl.Sigmoid(out)
        net = eddl.Model([in_], [out_sigm])

        # model_path = utils.DownloadModel(segmentation_zoo[args.model]['url'], f'{args.model}.onnx', 'model_onnx')
        # net = eddl.import_net_from_onnx_file(model_path, size)
        # eddl.removeLayer(net, segmentation_zoo[args.model]['to_remove'])
        # top = eddl.getLayer(net, segmentation_zoo[args.model]['top'])
        #
        # out = eddl.Sigmoid(eddl.Conv(top, num_classes, [3, 3], name='last_layer'))
        # data_input = eddl.getLayer(net, segmentation_zoo[args.model]['input'])  # input of the onnx
        # net = eddl.Model([data_input], [out])

    loss_name = 'binary_cross_entropy'
    metric_name = 'mean_squared_error'
    eddl.build(
        net, eddl.adam(args.learning_rate), [loss_name], [metric_name],
        eddl.CS_GPU(args.gpu, mem="low_mem") if args.gpu else eddl.CS_CPU(),
        True)
    out = eddl.getOut(net)[0]

    # if not args.ckpts:
    #     eddl.initializeLayer(net, "last_layer")

    eddl.summary(net)
    eddl.setlogfile(net, 'skin_lesion_segmentation')

    x = Tensor([args.batch_size, *size])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[1], size[2]])

    miou = 0.
    if args.train:
        num_samples_train = len(d.GetSplit())
        num_batches_train = num_samples_train // args.batch_size
        num_samples_val = len(d.GetSplit(ecvl.SplitType.validation))
        num_batches_val = num_samples_val // args.batch_size
        evaluator = utils.Evaluator()

        print('Starting training')
        for e in range(args.epochs):
            d.SetSplit(ecvl.SplitType.training)
            eddl.reset_loss(net)
            s = d.GetSplit()
            random.shuffle(s)
            d.split_.training_ = s
            d.ResetAllBatches()
            for b in range(num_batches_train):
                d.LoadBatch(x, y)
                # x_ = x.select(["0"])
                # x_.normalize_(0, 1)
                # x_.mult_(255.)
                # x_.save(f'images/train_{e}_{b}.png')
                #
                # y_ = y.select(["0"])
                # # y_.mult_(255.)
                # y_.save(f'images/train_gt_{e}_{b}.png')

                eddl.train_batch(net, [x], [y])
                losses = eddl.get_losses(net)
                metrics = eddl.get_metrics(net)

                print(
                    f'Train - epoch [{e + 1}/{args.epochs}] - batch [{b + 1}/{num_batches_train}]'
                    f' - {loss_name}={losses[0]:.3f} - {metric_name}={metrics[0]:.3f}',
                    flush=True)

            d.SetSplit(ecvl.SplitType.validation)
            evaluator.ResetEval()
            eddl.reset_loss(net)

            for b in range(num_batches_val):
                n = 0
                print(
                    f'Validation - epoch [{e + 1}/{args.epochs}] - batch [{b + 1}/{num_batches_val}]'
                )
                d.LoadBatch(x, y)
                eddl.forward(net, [x])
                output = eddl.getOutput(out)
                for bs in range(args.batch_size):
                    img = output.select([str(bs)])
                    gt = y.select([str(bs)])
                    img_np = np.array(img, copy=False)
                    gt_np = np.array(gt, copy=False)
                    iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
                    print(f' - IoU: {iou:.3f}', end="", flush=True)
                    if args.out_dir:
                        # C++ BinaryIoU modifies image as a side effect
                        img_np[img_np >= thresh] = 1
                        img_np[img_np < thresh] = 0
                        img_t = ecvl.TensorToView(img)
                        img_t.colortype_ = ecvl.ColorType.GRAY
                        img_t.channels_ = "xyc"
                        img.mult_(255.)
                        # orig_img
                        orig_img = x.select([str(bs)])
                        orig_img.mult_(255.)
                        orig_img_t = ecvl.TensorToImage(orig_img)
                        orig_img_t.colortype_ = ecvl.ColorType.BGR
                        orig_img_t.channels_ = "xyc"

                        tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                        ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                        ecvl.ConnectedComponentsLabeling(tmp, labels)
                        ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                        contours = ecvl.FindContours(tmp)
                        ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                        tmp_np = np.array(tmp, copy=False)
                        for cseq in contours:
                            for c in cseq:
                                tmp_np[c[0], c[1], 0] = 0
                                tmp_np[c[0], c[1], 1] = 0
                                tmp_np[c[0], c[1], 2] = 255
                        filename = d.samples_[d.GetSplit()[n]].location_[0]
                        head, tail = os.path.splitext(
                            os.path.basename(filename))
                        bname = "%s.png" % head
                        output_fn = os.path.join(args.out_dir, bname)
                        ecvl.ImWrite(output_fn, tmp)
                        if e == 0:
                            gt_t = ecvl.TensorToView(gt)
                            gt_t.colortype_ = ecvl.ColorType.GRAY
                            gt_t.channels_ = "xyc"
                            gt.mult_(255.)
                            gt_filename = d.samples_[d.GetSplit()
                                                     [n]].label_path_
                            gt_fn = os.path.join(args.out_dir,
                                                 os.path.basename(gt_filename))
                            ecvl.ImWrite(gt_fn, gt_t)
                    n += 1
                print()

            last_miou = evaluator.MIoU()
            print(
                f'Validation - epoch [{e + 1}/{args.epochs}] - Total MIoU: {last_miou:.3f}'
            )

            if last_miou > miou:
                miou = last_miou
                eddl.save_net_to_onnx_file(
                    net,
                    os.path.join(args.weights,
                                 f'isic_segm_{args.model}_epoch_{e + 1}.onnx'))
                print('Weights saved')
    elif args.test:
        evaluator = utils.Evaluator()
        evaluator.ResetEval()

        d.SetSplit(ecvl.SplitType.test)
        num_samples_test = len(d.GetSplit())
        num_batches_test = num_samples_test // batch_size
        for b in range(num_batches_test):
            n = 0
            print(f'Test - batch [{b + 1}/{num_batches_test}]')
            d.LoadBatch(x, y)
            eddl.forward(net, [x])
            output = eddl.getOutput(out)
            for bs in range(args.batch_size):
                img = output.select([str(bs)])
                gt = y.select([str(bs)])
                img_np, gt_np = np.array(img, copy=False), np.array(gt,
                                                                    copy=False)
                iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
                print(f' - IoU: {iou:.3f}', end="", flush=True)
                if args.out_dir:
                    # C++ BinaryIoU modifies image as a side effect
                    img_np[img_np >= thresh] = 1
                    img_np[img_np < thresh] = 0
                    img_t = ecvl.TensorToView(img)
                    img_t.colortype_ = ecvl.ColorType.GRAY
                    img_t.channels_ = "xyc"
                    img.mult_(255.)
                    # orig_img
                    orig_img = x.select([str(bs)])
                    orig_img.mult_(255.)
                    orig_img_t = ecvl.TensorToImage(orig_img)
                    orig_img_t.colortype_ = ecvl.ColorType.BGR
                    orig_img_t.channels_ = "xyc"

                    tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                    ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                    ecvl.ConnectedComponentsLabeling(tmp, labels)
                    ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                    contours = ecvl.FindContours(tmp)
                    ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                    tmp_np = np.array(tmp, copy=False)
                    for cseq in contours:
                        for c in cseq:
                            tmp_np[c[0], c[1], 0] = 0
                            tmp_np[c[0], c[1], 1] = 0
                            tmp_np[c[0], c[1], 2] = 255
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s.png" % head
                    output_fn = os.path.join(args.out_dir, bname)
                    ecvl.ImWrite(output_fn, tmp)

                    gt_t = ecvl.TensorToView(gt)
                    gt_t.colortype_ = ecvl.ColorType.GRAY
                    gt_t.channels_ = "xyc"
                    gt.mult_(255.)
                    gt_filename = d.samples_[d.GetSplit()[n]].label_path_
                    gt_fn = os.path.join(args.out_dir,
                                         os.path.basename(gt_filename))
                    ecvl.ImWrite(gt_fn, gt_t)
                n += 1
        miou = evaluator.MIoU()
        print(f'Test - Total MIoU: {miou:.3f}')
Exemplo n.º 12
0
def main(args):
    eddl.download_drive()

    in_1 = eddl.Input([3, 584, 584])
    in_2 = eddl.Input([1, 584, 584])
    layer = eddl.Concat([in_1, in_2])

    layer = eddl.RandomCropScale(layer, [0.9, 1.0])
    layer = eddl.CenteredCrop(layer, [512, 512])
    img = eddl.Select(layer, ["0:3"])
    mask = eddl.Select(layer, ["3"])

    # DA net
    danet = eddl.Model([in_1, in_2], [])
    eddl.build(danet)
    if args.gpu:
        eddl.toGPU(danet, mem="low_mem")
    eddl.summary(danet)

    # SegNet
    in_ = eddl.Input([3, 512, 512])
    out = eddl.Sigmoid(UNetWithPadding(in_))
    segnet = eddl.Model([in_], [out])
    eddl.build(
        segnet,
        eddl.adam(0.00001),  # Optimizer
        ["mse"],  # Losses
        ["mse"],  # Metrics
        eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)
    )
    eddl.summary(segnet)

    print("Reading training data")
    # x_train_f = Tensor.fromarray(np.load("drive_trX.npy").astype(np.float32))
    x_train_f = Tensor.load("drive_trX.bin")
    x_train = x_train_f.permute([0, 3, 1, 2])
    x_train.info()
    x_train.div_(255.0)

    print("Reading test data")
    # y_train = Tensor.fromarray(np.load("drive_trY.npy").astype(np.float32))
    y_train = Tensor.load("drive_trY.bin")
    y_train.info()
    y_train.reshape_([20, 1, 584, 584])
    y_train.div_(255.0)

    xbatch = Tensor([args.batch_size, 3, 584, 584])
    ybatch = Tensor([args.batch_size, 1, 584, 584])

    print("Starting training")
    for i in range(args.epochs):
        print("\nEpoch %d/%d" % (i + 1, args.epochs))
        eddl.reset_loss(segnet)
        for j in range(args.num_batches):
            eddl.next_batch([x_train, y_train], [xbatch, ybatch])
            # DA net
            eddl.forward(danet, [xbatch, ybatch])
            xbatch_da = eddl.getOutput(img)
            ybatch_da = eddl.getOutput(mask)
            # SegNet
            eddl.train_batch(segnet, [xbatch_da], [ybatch_da])
            eddl.print_loss(segnet, j)
            if i == args.epochs - 1:
                yout = eddl.getOutput(out).select(["0"])
                yout.save("./out_%d.jpg" % j)
            print()
    print("All done")
Exemplo n.º 13
0
def main(args):
    eddl.download_flickr()

    epochs = 2 if args.small else 50

    olength = 20
    outvs = 2000
    embdim = 32

    # True: remove last layers and set new top = flatten
    # new input_size: [3, 256, 256] (from [224, 224, 3])
    net = eddl.download_resnet18(True, [3, 256, 256])
    lreshape = eddl.getLayer(net, "top")

    # create a new model from input output
    image_in = eddl.getLayer(net, "input")

    # Decoder
    ldecin = eddl.Input([outvs])
    ldec = eddl.ReduceArgMax(ldecin, [0])
    ldec = eddl.RandomUniform(eddl.Embedding(ldec, outvs, 1, embdim, True),
                              -0.05, 0.05)

    ldec = eddl.Concat([ldec, lreshape])
    layer = eddl.LSTM(ldec, 512, True)
    out = eddl.Softmax(eddl.Dense(layer, outvs))
    eddl.setDecoder(ldecin)
    net = eddl.Model([image_in], [out])

    # Build model
    eddl.build(
        net, eddl.adam(0.01), ["softmax_cross_entropy"], ["accuracy"],
        eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem))
    eddl.summary(net)

    # Load dataset
    x_train = Tensor.load("flickr_trX.bin", "bin")
    y_train = Tensor.load("flickr_trY.bin", "bin")
    if args.small:
        x_train = x_train.select([f"0:{2 * args.batch_size}", ":", ":", ":"])
        y_train = y_train.select([f"0:{2 * args.batch_size}", ":"])
    xtrain = Tensor.permute(x_train, [0, 3, 1, 2])
    y_train = Tensor.onehot(y_train, outvs)
    # batch x timesteps x input_dim
    y_train.reshape_([y_train.shape[0], olength, outvs])

    eddl.fit(net, [xtrain], [y_train], args.batch_size, epochs)
    eddl.save(net, "img2text.bin", "bin")

    print("\n === INFERENCE ===\n")

    # Get all the reshapes of the images. Only use the CNN
    timage = Tensor([x_train.shape[0], 512])  # images reshape
    cnn = eddl.Model([image_in], [lreshape])
    eddl.build(
        cnn,
        eddl.adam(0.001),  # not relevant
        ["mse"],  # not relevant
        ["mse"],  # not relevant
        eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem))
    eddl.summary(cnn)

    # forward images
    xbatch = Tensor([args.batch_size, 3, 256, 256])
    # numbatches = x_train.shape[0] / args.batch_size
    j = 0
    eddl.next_batch([x_train], [xbatch])
    eddl.forward(cnn, [xbatch])
    ybatch = eddl.getOutput(lreshape)
    sample = str(j * args.batch_size) + ":" + str((j + 1) * args.batch_size)
    timage.set_select([sample, ":"], ybatch)

    # Create Decoder non recurrent for n-best
    ldecin = eddl.Input([outvs])
    image = eddl.Input([512])
    lstate = eddl.States([2, 512])
    ldec = eddl.ReduceArgMax(ldecin, [0])
    ldec = eddl.RandomUniform(eddl.Embedding(ldec, outvs, 1, embdim), -0.05,
                              0.05)
    ldec = eddl.Concat([ldec, image])
    lstm = eddl.LSTM([ldec, lstate], 512, True)
    lstm.isrecurrent = False  # Important
    out = eddl.Softmax(eddl.Dense(lstm, outvs))
    decoder = eddl.Model([ldecin, image, lstate], [out])
    eddl.build(
        decoder,
        eddl.adam(0.001),  # not relevant
        ["softmax_cross_entropy"],  # not relevant
        ["accuracy"],  # not relevant
        eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem))
    eddl.summary(decoder)

    # Copy params from trained net
    eddl.copyParam(eddl.getLayer(net, "LSTM1"),
                   eddl.getLayer(decoder, "LSTM2"))
    eddl.copyParam(eddl.getLayer(net, "dense1"),
                   eddl.getLayer(decoder, "dense2"))
    eddl.copyParam(eddl.getLayer(net, "embedding1"),
                   eddl.getLayer(decoder, "embedding2"))

    # N-best for sample s
    s = 1 if args.small else 100  # sample 100
    # three input tensors with batch_size = 1 (one sentence)
    treshape = timage.select([str(s), ":"])
    text = y_train.select([str(s), ":", ":"])  # 1 x olength x outvs
    for j in range(olength):
        print(f"Word: {j}")
        word = None
        if j == 0:
            word = Tensor.zeros([1, outvs])
        else:
            word = text.select(["0", str(j - 1), ":"])
            word.reshape_([1, outvs])  # batch = 1
        treshape.reshape_([1, 512])  # batch = 1
        state = Tensor.zeros([1, 2, 512])  # batch = 1
        input_ = [word, treshape, state]
        eddl.forward(decoder, input_)
        # outword = eddl.getOutput(out)
        vstates = eddl.getStates(lstm)
        for i in range(len(vstates)):
            vstates[i].reshape_([1, 1, 512])
            state.set_select([":", str(i), ":"], vstates[i])

    print("All done")
Exemplo n.º 14
0
def main(args):
    eddl.download_eutrans()

    epochs = 1 if args.small else 5

    ilength = 30
    olength = 30
    invs = 687
    outvs = 514
    embedding = 64

    # Encoder
    in_ = eddl.Input([1])  # 1 word
    layer = in_
    lE = eddl.RandomUniform(
        eddl.Embedding(layer, invs, 1, embedding, True), -0.05, 0.05
    )
    enc = eddl.LSTM(lE, 128, True)
    cps = eddl.GetStates(enc)

    # Decoder
    ldin = eddl.Input([outvs])
    ld = eddl.ReduceArgMax(ldin, [0])
    ld = eddl.RandomUniform(
        eddl.Embedding(ld, outvs, 1, embedding), -0.05, 0.05
    )
    layer = eddl.LSTM([ld, cps], 128)
    out = eddl.Softmax(eddl.Dense(layer, outvs))
    eddl.setDecoder(ldin)

    net = eddl.Model([in_], [out])

    # Build model
    eddl.build(
        net,
        eddl.adam(0.01),
        ["softmax_cross_entropy"],
        ["accuracy"],
        eddl.CS_GPU(mem=args.mem) if args.gpu else eddl.CS_CPU(mem=args.mem)
    )
    eddl.summary(net)

    # Load dataset
    x_train = Tensor.load("eutrans_trX.bin")
    y_train = Tensor.load("eutrans_trY.bin")
    y_train = Tensor.onehot(y_train, outvs)
    # batch x timesteps x input_dim
    x_train.reshape_([x_train.shape[0], ilength, 1])
    # batch x timesteps x ouput_dim
    y_train.reshape_([y_train.shape[0], olength, outvs])

    x_test = Tensor.load("eutrans_tsX.bin")
    y_test = Tensor.load("eutrans_tsY.bin")
    y_test = Tensor.onehot(y_test, outvs)
    # batch x timesteps x input_dim
    x_test.reshape_([x_test.shape[0], ilength, 1])
    # batch x timesteps x ouput_dim
    y_test.reshape_([y_test.shape[0], olength, outvs])

    if args.small:
        sel = [f":{3 * args.batch_size}", ":", ":"]
        x_train = x_train.select(sel)
        y_train = y_train.select(sel)
        x_test = x_test.select(sel)
        y_test = y_test.select(sel)

    # Train model
    ybatch = Tensor([args.batch_size, olength, outvs])
    eddl.next_batch([y_train], [ybatch])
    for i in range(epochs):
        eddl.fit(net, [x_train], [y_train], args.batch_size, 1)

    print("All done")
Exemplo n.º 15
0
def eddl_validate_DLDataset(model, out, d):
    batch_time = AverageMeter('BatchTime', ':6.3f')
    total_time = AverageMeter('TotalTime', ':6.3f')

    # Use the image resized dims defined by user or default image size for resnet [224,224]
    if hasattr(d, 'resize_dims_'):
        size = d.resize_dims_
    else:
        size = [224, 224]

    x = Tensor([d.batch_size_, d.n_channels_, size[0], size[1]])
    y = Tensor([d.batch_size_, len(d.classes_)])

    d.SetSplit(ecvl.SplitType.validation)
    num_samples_val = len(d.GetSplit())
    num_batches_val = num_samples_val // d.batch_size_

    indices = list(range(d.batch_size_))
    metric = eddl.getMetric("categorical_accuracy")

    print("Start Evaluation: ", flush=True)
    total_metric = []
    print("Evaluation (validation set)", flush=True)
    d.ResetAllBatches()
    d.SetSplit(ecvl.SplitType.validation)
    end_total = time.time()
    batch_time.reset()
    end = time.time()
    for b in range(num_batches_val):
        n = 0
        print("(batch {:d}/{:d}) - ".format(b + 1, num_batches_val),
              end="",
              flush=True)
        d.LoadBatch(x, y)
        x.div_(255.0)
        eddl.forward(model, [x])
        output = eddl.getOutput(out)
        sum_ = 0.0
        for k in range(d.batch_size_):
            result = output.select([str(k)])
            target = y.select([str(k)])
            ca = metric.value(target, result)
            total_metric.append(ca)
            sum_ += ca
            """
            if args.out_dir:
                result_a = np.array(result, copy=False)
                target_a = np.array(target, copy=False)
                classe = np.argmax(result_a).item()
                gt_class = np.argmax(target_a).item()
                single_image = eddlT.select(x, k)
                img_t = ecvl.TensorToView(single_image)
                img_t.colortype_ = ecvl.ColorType.BGR
                single_image.mult_(255.)
                filename = d.samples_[d.GetSplit()[n]].location_[0]
                head, tail = os.path.splitext(os.path.basename(filename))
                bname = "%s_gt_class_%s.png" % (head, gt_class)
                cur_path = os.path.join(
                    current_path, d.classes_[classe], bname
                )
                ecvl.ImWrite(cur_path, img_t)
            """
            n += 1
        batch_time.update(time.time() - end)
        end = time.time()
        print("categorical_accuracy:".format(sum_ / d.batch_size_), flush=True)
        print(batch_time)
    if (num_batches_val > 0):
        total_avg = sum(total_metric) / len(total_metric)
        print("Total categorical accuracy:{}".format(total_avg), flush=True)
    else:
        print(
            "Warning! \n "
            "Please check your validation set size, it might be smaller than the batch size,\n "
            "Validation test didn't execute as batch number is 0")

    #total_avg = sum(total_metric) / len(total_metric)
    #print("Total categorical accuracy:{}".format(total_avg), flush=True)

    total_time.update(time.time() - end_total)
    print(total_time, flush=True)
    return model
Exemplo n.º 16
0
def eddl_train_DLDataset(model,
                         out,
                         d,
                         learning_rate=1e-2,
                         momentum=0.9,
                         epochs=10,
                         dynamic_lr=False):

    batch_time = AverageMeter('BatchTime', ':6.3f')
    total_time = AverageMeter('TotalTime', ':6.3f')

    # Use the image resized dims defined by user or default image size for resnet [224,224]
    if hasattr(d, 'resize_dims_'):
        size = d.resize_dims_
    else:
        size = [224, 224]

    x = Tensor([d.batch_size_, d.n_channels_, size[0], size[1]])
    y = Tensor([d.batch_size_, len(d.classes_)])
    d.SetSplit(ecvl.SplitType.training)
    num_samples_train = len(d.GetSplit())
    num_batches_train = num_samples_train // d.batch_size_

    d.SetSplit(ecvl.SplitType.validation)
    num_samples_val = len(d.GetSplit())
    num_batches_val = num_samples_val // d.batch_size_

    indices = list(range(d.batch_size_))
    metric = eddl.getMetric("categorical_accuracy")

    print("Starting training", flush=True)
    end_total = time.time()
    for e in range(epochs):
        if dynamic_lr and (e % 30 == 0) and e > 0:
            # every 30 epochs we want to decrease the learning rate value by 0.1
            print(
                "every 30 epochs we want to decrease the learning rate value by 0.1"
            )
            learning_rate = learning_rate * 0.1
            eddl.setlr(model, [learning_rate, momentum])

        print("Epoch {:d}/{:d} - Training".format(e + 1, epochs), flush=True)

        d.SetSplit(ecvl.SplitType.training)
        eddl.reset_loss(model)
        total_metric = []
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        d.ResetAllBatches()
        batch_time.reset()
        end = time.time()
        for b in range(num_batches_train):
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                e + 1, epochs, b + 1, num_batches_train),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            tx, ty = [x], [y]
            eddl.train_batch(model, tx, ty, indices)
            eddl.print_loss(model, b)
            batch_time.update(time.time() - end)
            end = time.time()
            print(batch_time, flush=True)
            print()

        #print("Saving weights")
        #eddl.save(
        #    net, "isic_classification_checkpoint_epoch_%s.bin" % e, "bin"
        #)

        print("Epoch %d/%d - Evaluation (validation set)" % (e + 1, epochs),
              flush=True)
        d.SetSplit(ecvl.SplitType.validation)
        batch_time.reset()
        end = time.time()
        for b in range(num_batches_val):
            n = 0
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                e + 1, epochs, b + 1, num_batches_val),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            eddl.forward(model, [x])
            output = eddl.getOutput(out)
            sum_ = 0.0
            for k in range(d.batch_size_):
                result = output.select([str(k)])
                target = y.select([str(k)])
                ca = metric.value(target, result)
                total_metric.append(ca)
                sum_ += ca
                """
                if args.out_dir:
                    result_a = np.array(result, copy=False)
                    target_a = np.array(target, copy=False)
                    classe = np.argmax(result_a).item()
                    gt_class = np.argmax(target_a).item()
                    single_image = eddlT.select(x, k)
                    img_t = ecvl.TensorToView(single_image)
                    img_t.colortype_ = ecvl.ColorType.BGR
                    single_image.mult_(255.)
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s_gt_class_%s.png" % (head, gt_class)
                    cur_path = os.path.join(
                        current_path, d.classes_[classe], bname
                    )
                    ecvl.ImWrite(cur_path, img_t)
                """
                n += 1
            batch_time.update(time.time() - end)
            end = time.time()
            print("batch categorical accuracy:{}".format(sum_ / d.batch_size_),
                  flush=True)
            print(batch_time, flush=True)
        if (num_batches_val > 0):
            total_avg = sum(total_metric) / len(total_metric)
            print("Total categorical accuracy:{}".format(total_avg),
                  flush=True)
        else:
            print(
                "Warning! \n "
                "Please check your validation set size, it might be smaller than the batch size,\n "
                "Validation test didn't execute as batch number is 0")

    total_time.update(time.time() - end_total)
    print(total_time, flush=True)
    return model
Exemplo n.º 17
0
def main(args):
    num_classes = 8
    size = [224, 224]  # size of images

    in_ = eddl.Input([3, size[0], size[1]])
    out = VGG16(in_, num_classes)
    net = eddl.Model([in_], [out])
    eddl.build(net, eddl.sgd(0.001, 0.9), ["soft_cross_entropy"],
               ["categorical_accuracy"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "skin_lesion_classification_inference")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    dataset_augs = ecvl.DatasetAugmentations([training_augs, None, test_augs])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)

    if args.out_dir:
        for c in d.classes_:
            os.makedirs(os.path.join(args.out_dir, c), exist_ok=True)

    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, len(d.classes_)])

    d.SetSplit(ecvl.SplitType.test)
    num_samples = len(d.GetSplit())
    num_batches = num_samples // args.batch_size
    metric = eddl.getMetric("categorical_accuracy")
    total_metric = []

    if not os.path.exists(args.ckpts):
        raise RuntimeError('Checkpoint "{}" not found'.format(args.ckpts))
    eddl.load(net, args.ckpts, "bin")

    print("Testing")
    for b in range(num_batches):
        n = 0
        print("Batch {:d}/{:d}".format(b + 1, num_batches))
        d.LoadBatch(x, y)
        x.div_(255.0)
        eddl.forward(net, [x])
        output = eddl.getOutput(out)
        sum_ = 0.0
        for j in range(args.batch_size):
            result = output.select([str(j)])
            target = y.select([str(j)])
            ca = metric.value(target, result)
            total_metric.append(ca)
            sum_ += ca
            if args.out_dir:
                result_a = np.array(result, copy=False)
                target_a = np.array(target, copy=False)
                classe = np.argmax(result_a).item()
                gt_class = np.argmax(target_a).item()
                single_image = x.select([str(j)])
                img_t = ecvl.TensorToView(single_image)
                img_t.colortype_ = ecvl.ColorType.BGR
                single_image.mult_(255.)
                filename = d.samples_[d.GetSplit()[n]].location_[0]
                head, tail = os.path.splitext(os.path.basename(filename))
                bname = "%s_gt_class_%s.png" % (head, gt_class)
                cur_path = os.path.join(args.out_dir, d.classes_[classe],
                                        bname)
                ecvl.ImWrite(cur_path, img_t)
            n += 1
        print("categorical_accuracy:", sum_ / args.batch_size)
    total_avg = sum(total_metric) / len(total_metric)
    print("Total categorical accuracy:", total_avg)
Exemplo n.º 18
0
def main(args):
    num_classes = 1
    size = [192, 192]  # size of images
    thresh = 0.5

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([3, size[0], size[1]])
    out = SegNet(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(net, eddl.adam(0.0001), ["cross_entropy"],
               ["mean_squared_error"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "skin_lesion_segmentation_inference")

    if not os.path.exists(args.ckpts):
        raise RuntimeError('Checkpoint "{}" not found'.format(args.ckpts))
    eddl.load(net, args.ckpts, "bin")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
    ])
    dataset_augs = ecvl.DatasetAugmentations([training_augs, None, test_augs])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[0], size[1]])
    print("Testing")
    d.SetSplit(ecvl.SplitType.test)
    num_samples_test = len(d.GetSplit())
    num_batches_test = num_samples_test // args.batch_size

    evaluator = utils.Evaluator()
    evaluator.ResetEval()
    for b in range(num_batches_test):
        n = 0
        print("Batch {:d}/{:d} ".format(b + 1, num_batches_test),
              end="",
              flush=True)
        d.LoadBatch(x, y)
        x.div_(255.0)
        y.div_(255.0)
        eddl.forward(net, [x])
        output = eddl.getOutput(out_sigm)
        for k in range(args.batch_size):
            img = output.select([str(k)])
            gt = y.select([str(k)])
            img_np, gt_np = np.array(img, copy=False), np.array(gt, copy=False)
            iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
            print("- IoU: %.6g " % iou, end="", flush=True)
            if args.out_dir:
                # C++ BinaryIoU modifies image as a side effect
                img_np[img_np >= thresh] = 1
                img_np[img_np < thresh] = 0
                img_t = ecvl.TensorToView(img)
                img_t.colortype_ = ecvl.ColorType.GRAY
                img_t.channels_ = "xyc"
                img.mult_(255.)
                # orig_img
                orig_img = x.select([str(k)])
                orig_img.mult_(255.)
                orig_img_t = ecvl.TensorToImage(orig_img)
                orig_img_t.colortype_ = ecvl.ColorType.BGR
                orig_img_t.channels_ = "xyc"

                tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                ecvl.ConnectedComponentsLabeling(tmp, labels)
                ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                contours = ecvl.FindContours(tmp)
                ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                tmp_np = np.array(tmp, copy=False)
                for cseq in contours:
                    for c in cseq:
                        tmp_np[c[0], c[1], 0] = 0
                        tmp_np[c[0], c[1], 1] = 0
                        tmp_np[c[0], c[1], 2] = 255

                filename = d.samples_[d.GetSplit()[n]].location_[0]
                head, tail = os.path.splitext(os.path.basename(filename))
                bname = "%s.png" % head
                output_fn = os.path.join(args.out_dir, bname)
                ecvl.ImWrite(output_fn, tmp)

                gt_t = ecvl.TensorToView(gt)
                gt_t.colortype_ = ecvl.ColorType.GRAY
                gt_t.channels_ = "xyc"
                gt.mult_(255.)
                gt_filename = d.samples_[d.GetSplit()[n]].label_path_
                gt_fn = os.path.join(args.out_dir,
                                     os.path.basename(gt_filename))
                ecvl.ImWrite(gt_fn, gt_t)
            n += 1
        print()
    print("MIoU: %.6g" % evaluator.MeanMetric())
Exemplo n.º 19
0
def main(args):
    num_classes = 1
    size = [192, 192]  # size of images
    thresh = 0.5

    if args.out_dir:
        os.makedirs(args.out_dir, exist_ok=True)

    in_ = eddl.Input([3, size[0], size[1]])
    out = SegNet(in_, num_classes)
    out_sigm = eddl.Sigmoid(out)
    net = eddl.Model([in_], [out_sigm])
    eddl.build(net, eddl.adam(0.0001), ["cross_entropy"],
               ["mean_squared_error"],
               eddl.CS_GPU([1]) if args.gpu else eddl.CS_CPU())
    eddl.summary(net)
    eddl.setlogfile(net, "skin_lesion_segmentation")

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(size),
        ecvl.AugMirror(0.5),
        ecvl.AugFlip(0.5),
        ecvl.AugRotate([-180, 180]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGammaContrast([0.5, 1.5]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.3], [0.02, 0.05], 0.5)
    ])
    validation_augs = ecvl.SequentialAugmentationContainer(
        [ecvl.AugResizeDim(size)])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_augs, None])

    print("Reading dataset")
    d = ecvl.DLDataset(args.in_ds, args.batch_size, dataset_augs)
    x = Tensor([args.batch_size, d.n_channels_, size[0], size[1]])
    y = Tensor([args.batch_size, d.n_channels_gt_, size[0], size[1]])
    num_samples_train = len(d.GetSplit())
    num_batches_train = num_samples_train // args.batch_size
    d.SetSplit(ecvl.SplitType.validation)
    num_samples_validation = len(d.GetSplit())
    num_batches_validation = num_samples_validation // args.batch_size
    indices = list(range(args.batch_size))

    evaluator = utils.Evaluator()
    print("Starting training")
    for e in range(args.epochs):
        print("Epoch {:d}/{:d} - Training".format(e + 1, args.epochs),
              flush=True)
        d.SetSplit(ecvl.SplitType.training)
        eddl.reset_loss(net)
        s = d.GetSplit()
        random.shuffle(s)
        d.split_.training_ = s
        d.ResetAllBatches()
        for b in range(num_batches_train):
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) - ".format(
                e + 1, args.epochs, b + 1, num_batches_train),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            y.div_(255.0)
            tx, ty = [x], [y]
            eddl.train_batch(net, tx, ty, indices)
            eddl.print_loss(net, b)
            print()

        print("Saving weights")
        eddl.save(net, "isic_segmentation_checkpoint_epoch_%s.bin" % e, "bin")

        d.SetSplit(ecvl.SplitType.validation)
        evaluator.ResetEval()
        print("Epoch %d/%d - Evaluation" % (e + 1, args.epochs), flush=True)
        for b in range(num_batches_validation):
            n = 0
            print("Epoch {:d}/{:d} (batch {:d}/{:d}) ".format(
                e + 1, args.epochs, b + 1, num_batches_validation),
                  end="",
                  flush=True)
            d.LoadBatch(x, y)
            x.div_(255.0)
            y.div_(255.0)
            eddl.forward(net, [x])
            output = eddl.getOutput(out_sigm)
            for k in range(args.batch_size):
                img = output.select([str(k)])
                gt = y.select([str(k)])
                img_np = np.array(img, copy=False)
                gt_np = np.array(gt, copy=False)
                iou = evaluator.BinaryIoU(img_np, gt_np, thresh=thresh)
                print("- IoU: %.6g " % iou, end="", flush=True)
                if args.out_dir:
                    # C++ BinaryIoU modifies image as a side effect
                    img_np[img_np >= thresh] = 1
                    img_np[img_np < thresh] = 0
                    img_t = ecvl.TensorToView(img)
                    img_t.colortype_ = ecvl.ColorType.GRAY
                    img_t.channels_ = "xyc"
                    img.mult_(255.)
                    # orig_img
                    orig_img = x.select([str(k)])
                    orig_img.mult_(255.)
                    orig_img_t = ecvl.TensorToImage(orig_img)
                    orig_img_t.colortype_ = ecvl.ColorType.BGR
                    orig_img_t.channels_ = "xyc"

                    tmp, labels = ecvl.Image.empty(), ecvl.Image.empty()
                    ecvl.CopyImage(img_t, tmp, ecvl.DataType.uint8)
                    ecvl.ConnectedComponentsLabeling(tmp, labels)
                    ecvl.CopyImage(labels, tmp, ecvl.DataType.uint8)
                    contours = ecvl.FindContours(tmp)
                    ecvl.CopyImage(orig_img_t, tmp, ecvl.DataType.uint8)
                    tmp_np = np.array(tmp, copy=False)
                    for cseq in contours:
                        for c in cseq:
                            tmp_np[c[0], c[1], 0] = 0
                            tmp_np[c[0], c[1], 1] = 0
                            tmp_np[c[0], c[1], 2] = 255
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s.png" % head
                    output_fn = os.path.join(args.out_dir, bname)
                    ecvl.ImWrite(output_fn, tmp)
                    if e == 0:
                        gt_t = ecvl.TensorToView(gt)
                        gt_t.colortype_ = ecvl.ColorType.GRAY
                        gt_t.channels_ = "xyc"
                        gt.mult_(255.)
                        gt_filename = d.samples_[d.GetSplit()[n]].label_path_
                        gt_fn = os.path.join(args.out_dir,
                                             os.path.basename(gt_filename))
                        ecvl.ImWrite(gt_fn, gt_t)
                n += 1
            print()
        print("MIoU: %.6g" % evaluator.MeanMetric())
def main(args):
    batch_size = args.batch_size
    image_size = args.size, args.size

    if args.weights:
        os.makedirs(args.weights, exist_ok=True)

    training_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(image_size, ecvl.InterpolationType.cubic),
        ecvl.AugMirror(.5),
        ecvl.AugFlip(.5),
        ecvl.AugRotate([-180, 180]),
        ecvl.AugAdditivePoissonNoise([0, 10]),
        ecvl.AugGammaContrast([0.5, 1.5]),
        ecvl.AugGaussianBlur([0, 0.8]),
        ecvl.AugCoarseDropout([0, 0.03], [0.02, 0.05], 0.25),
        ecvl.AugToFloat32(255),
    ])
    validation_test_augs = ecvl.SequentialAugmentationContainer([
        ecvl.AugResizeDim(image_size),
        ecvl.AugToFloat32(255),
    ])
    dataset_augs = ecvl.DatasetAugmentations(
        [training_augs, validation_test_augs, validation_test_augs])

    print('Reading dataset')
    d = ecvl.DLDataset(args.in_ds,
                       args.batch_size,
                       dataset_augs,
                       ctype=ecvl.ColorType.RGB)
    num_classes = len(d.classes_)
    size = d.n_channels_, args.size, args.size

    if args.ckpts:
        net = eddl.import_net_from_onnx_file(args.ckpts, size)
    else:
        model_path = utils.DownloadModel(classification_zoo[args.model]['url'],
                                         f'{args.model}.onnx', 'model_onnx')
        net = eddl.import_net_from_onnx_file(model_path, size)
        eddl.removeLayer(net, classification_zoo[args.model]
                         ['to_remove'])  # remove last Linear of resnet
        top = eddl.getLayer(
            net,
            classification_zoo[args.model]['top'])  # get flatten of resnet

        out = eddl.Softmax(eddl.Dense(top, num_classes, True,
                                      'classifier'))  # true is for the bias
        data_input = eddl.getLayer(
            net, classification_zoo[args.model]['input'])  # input of the onnx
        net = eddl.Model([data_input], [out])

    eddl.build(
        net, eddl.adam(args.learning_rate), ['softmax_cross_entropy'],
        ['accuracy'],
        eddl.CS_GPU(args.gpu, mem="low_mem") if args.gpu else eddl.CS_CPU(),
        False)
    out = eddl.getOut(net)[0]

    if not args.ckpts:
        eddl.initializeLayer(net, "classifier")

    eddl.summary(net)
    eddl.setlogfile(net, 'skin_lesion_classification')

    x = Tensor([batch_size, *size])
    y = Tensor([batch_size, num_classes])

    metric_fn = eddl.getMetric('accuracy')
    best_accuracy = 0.
    if args.train:
        num_samples_train = len(d.GetSplit())
        num_batches_train = num_samples_train // args.batch_size
        num_samples_val = len(d.GetSplit(ecvl.SplitType.validation))
        num_batches_val = num_samples_val // args.batch_size

        print('Starting training')
        for e in range(args.epochs):
            if args.out_dir:
                current_path = os.path.join(args.out_dir, f'Epoch_{e}')
                for c in d.classes_:
                    c_dir = os.path.join(current_path, c)
                    os.makedirs(c_dir, exist_ok=True)
            d.SetSplit(ecvl.SplitType.training)
            eddl.reset_loss(net)
            s = d.GetSplit()
            random.shuffle(s)
            d.split_.training_ = s
            d.ResetAllBatches()
            for b in range(num_batches_train):
                d.LoadBatch(x, y)
                eddl.train_batch(net, [x], [y])
                losses = eddl.get_losses(net)
                metrics = eddl.get_metrics(net)

                print(
                    f'Train - epoch [{e + 1}/{args.epochs}] - batch [{b + 1}/{num_batches_train}]'
                    f' - loss={losses[0]:.3f} - accuracy={metrics[0]:.3f}',
                    flush=True)

            d.SetSplit(ecvl.SplitType.validation)
            values = np.zeros(num_batches_val)
            eddl.reset_loss(net)

            for b in range(num_batches_val):
                n = 0
                d.LoadBatch(x, y)
                eddl.forward(net, [x])
                output = eddl.getOutput(out)
                value = metric_fn.value(y, output)
                values[b] = value
                if args.out_dir:
                    for k in range(args.batch_size):
                        result = output.select([str(k)])
                        target = y.select([str(k)])
                        result_a = np.array(result, copy=False)
                        target_a = np.array(target, copy=False)
                        classe = np.argmax(result_a).item()
                        gt_class = np.argmax(target_a).item()
                        single_image = x.select([str(k)])
                        img_t = ecvl.TensorToView(single_image)
                        img_t.colortype_ = ecvl.ColorType.BGR
                        single_image.mult_(255.)
                        filename = d.samples_[d.GetSplit()[n]].location_[0]
                        head, tail = os.path.splitext(
                            os.path.basename(filename))
                        bname = '{}_gt_class_{}.png'.format(head, gt_class)
                        cur_path = os.path.join(current_path,
                                                d.classes_[classe], bname)
                        ecvl.ImWrite(cur_path, img_t)
                    n += 1

                print(
                    f'Validation - epoch [{e + 1}/{args.epochs}] - batch [{b + 1}/{num_batches_val}] -'
                    f' accuracy={np.mean(values[:b + 1] / batch_size):.3f}')

            last_accuracy = np.mean(values / batch_size)
            print(
                f'Validation - epoch [{e + 1}/{args.epochs}] - total accuracy={last_accuracy:.3f}'
            )
            if last_accuracy > best_accuracy:
                best_accuracy = last_accuracy
                print('Saving weights')
                eddl.save_net_to_onnx_file(
                    net,
                    f'isic_classification_{args.model}_epoch_{e + 1}.onnx')

    elif args.test:
        d.SetSplit(ecvl.SplitType.test)
        num_samples_test = len(d.GetSplit())
        num_batches_test = num_samples_test // batch_size
        values = np.zeros(num_batches_test)
        eddl.reset_loss(net)

        for b in range(num_batches_test):
            d.LoadBatch(x, y)
            eddl.forward(net, [x])
            output = eddl.getOutput(out)
            value = metric_fn.value(y, output)
            values[b] = value
            if args.out_dir:
                n = 0
                for k in range(args.batch_size):
                    result = output.select([str(k)])
                    target = y.select([str(k)])
                    result_a = np.array(result, copy=False)
                    target_a = np.array(target, copy=False)
                    classe = np.argmax(result_a).item()
                    gt_class = np.argmax(target_a).item()
                    single_image = x.select([str(k)])
                    img_t = ecvl.TensorToView(single_image)
                    img_t.colortype_ = ecvl.ColorType.BGR
                    single_image.mult_(255.)
                    filename = d.samples_[d.GetSplit()[n]].location_[0]
                    head, tail = os.path.splitext(os.path.basename(filename))
                    bname = "%s_gt_class_%s.png" % (head, gt_class)
                    cur_path = os.path.join(args.out_dir, d.classes_[classe],
                                            bname)
                    ecvl.ImWrite(cur_path, img_t)
                    n += 1

            print(
                f'Test - batch [{b + 1}/{num_batches_test}] - accuracy={np.mean(values[:b + 1] / batch_size):.3f}'
            )
        print(f'Test - total accuracy={np.mean(values / batch_size):.3f}')